focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public synchronized void patchConnectorConfig(String connName, Map<String, String> configPatch, Callback<Created<ConnectorInfo>> callback) {
try {
ConnectorInfo connectorInfo = connectorInfo(connName);
if (connectorInfo == null) {
callback.onCompletion(new NotFoundException("Connector " + connName + " not found", null), null);
return;
}
Map<String, String> patchedConfig = ConnectUtils.patchConfig(connectorInfo.config(), configPatch);
validateConnectorConfig(patchedConfig, (error, configInfos) -> {
if (error != null) {
callback.onCompletion(error, null);
return;
}
requestExecutorService.submit(
() -> putConnectorConfig(connName, patchedConfig, null, true, callback, configInfos)
);
});
} catch (Throwable e) {
callback.onCompletion(e, null);
}
}
|
@Test
public void testPatchConnectorConfigNotFound() {
initialize(false);
Map<String, String> connConfigPatch = new HashMap<>();
connConfigPatch.put("foo1", "baz1");
Callback<Herder.Created<ConnectorInfo>> patchCallback = mock(Callback.class);
herder.patchConnectorConfig(CONNECTOR_NAME, connConfigPatch, patchCallback);
ArgumentCaptor<NotFoundException> exceptionCaptor = ArgumentCaptor.forClass(NotFoundException.class);
verify(patchCallback).onCompletion(exceptionCaptor.capture(), isNull());
assertEquals(exceptionCaptor.getValue().getMessage(), "Connector " + CONNECTOR_NAME + " not found");
}
|
public static LocalDateTime parse(CharSequence text) {
return parse(text, (DateTimeFormatter) null);
}
|
@Test
public void parseTest4() {
final LocalDateTime localDateTime = LocalDateTimeUtil.parse("2020-01-23T12:23:56");
assertEquals("2020-01-23T12:23:56", localDateTime.toString());
}
|
public String generateRedirectUrl(String artifact, String transactionId, String sessionId, BvdStatus status) throws SamlSessionException, UnsupportedEncodingException {
final var samlSession = findSamlSessionByArtifactOrTransactionId(artifact, transactionId);
if (CANCELLED.equals(status))
samlSession.setBvdStatus(AdAuthenticationStatus.STATUS_CANCELED.label);
if (ERROR.equals(status))
samlSession.setBvdStatus(AdAuthenticationStatus.STATUS_FAILED.label);
if (artifact == null)
artifact = samlSession.getArtifact();
if (sessionId == null || !sessionId.equals(samlSession.getHttpSessionId()))
throw new SamlSessionException("Saml session found with invalid sessionId for redirect_with_artifact");
var url = new StringBuilder(samlSession.getAssertionConsumerServiceURL() + "?SAMLart=" + URLEncoder.encode(artifact, "UTF-8"));
// append relay-state
if (samlSession.getRelayState() != null)
url.append("&RelayState=" + URLEncoder.encode(samlSession.getRelayState(), "UTF-8"));
samlSession.setResolveBeforeTime(System.currentTimeMillis() + 1000 * 60 * minutesToResolve);
samlSessionRepository.save(samlSession);
return url.toString();
}
|
@Test
void redirectWithWrongArtifact() {
when(samlSessionRepositoryMock.findByArtifact(anyString())).thenReturn(Optional.empty());
Exception exception = assertThrows(SamlSessionException.class,
() -> assertionConsumerServiceUrlService.generateRedirectUrl("IncorrectArtifact", null, "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", null));
assertEquals("Saml session not found by artifact/transactionid for redirect_with_artifact", exception.getMessage());
}
|
@Override
public void flush() throws IOException {
if(log.isWarnEnabled()) {
log.warn(String.format("Flush stream %s", proxy));
}
this.flush(true);
}
|
@Test
public void testFlush() throws Exception {
final ByteArrayOutputStream proxy = new ByteArrayOutputStream(20);
final MemorySegementingOutputStream out = new MemorySegementingOutputStream(proxy, 32768);
final byte[] content = RandomUtils.nextBytes(40500);
out.write(content, 0, 32800);
assertEquals(32768, proxy.toByteArray().length);
out.flush();
assertEquals(32800, proxy.toByteArray().length);
out.write(content, 32800, 7700);
out.close();
assertArrayEquals(content, proxy.toByteArray());
}
|
@Override
public CompletableFuture<SchemaVersion> putSchemaIfAbsent(String schemaId,
SchemaData schema,
SchemaCompatibilityStrategy strategy) {
try {
SchemaDataValidator.validateSchemaData(schema);
} catch (InvalidSchemaDataException e) {
return FutureUtil.failedFuture(e);
}
return service.putSchemaIfAbsent(schemaId, schema, strategy);
}
|
@Test
public void testPutSchemaIfAbsentWithBadSchemaData() {
String schemaId = "test-schema-id";
SchemaCompatibilityStrategy strategy = SchemaCompatibilityStrategy.FULL;
CompletableFuture<SchemaVersion> future = new CompletableFuture<>();
when(underlyingService.putSchemaIfAbsent(eq(schemaId), any(SchemaData.class), eq(strategy)))
.thenReturn(future);
SchemaData schemaData = SchemaData.builder()
.type(SchemaType.BOOLEAN)
.data(new byte[10])
.build();
try {
service.putSchemaIfAbsent(schemaId, schemaData, strategy).get();
fail("Should fail putSchemaIfAbsent");
} catch (Exception e) {
assertTrue(e.getCause() instanceof InvalidSchemaDataException);
}
verify(underlyingService, times(0))
.putSchemaIfAbsent(eq(schemaId), same(schemaData), eq(strategy));
}
|
@Override
public Timestamp getValue() {
return value;
}
|
@Test
public void getValue() {
RubyTimeStampGauge gauge = new RubyTimeStampGauge("bar", RUBY_TIMESTAMP);
assertThat(gauge.getValue()).isEqualTo(RUBY_TIMESTAMP.getTimestamp());
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_RUBYTIMESTAMP);
//Null initialize
gauge = new RubyTimeStampGauge("bar");
assertThat(gauge.getValue()).isNull();
assertThat(gauge.getType()).isEqualTo(MetricType.GAUGE_RUBYTIMESTAMP);
}
|
@SuppressWarnings("rawtypes")
@Override
@SneakyThrows(ReflectiveOperationException.class)
public Map<String, Class<?>> getYamlShortcuts() {
Collection<YamlRuleConfigurationSwapper> swappers = ShardingSphereServiceLoader.getServiceInstances(YamlRuleConfigurationSwapper.class);
Map<String, Class<?>> result = new HashMap<>(swappers.size(), 1F);
for (YamlRuleConfigurationSwapper each : swappers) {
Class<?> yamlRuleConfigurationClass = Class.forName(((ParameterizedType) each.getClass().getGenericInterfaces()[0]).getActualTypeArguments()[0].getTypeName());
result.put(String.format("!%s", each.getRuleTagName()), yamlRuleConfigurationClass);
}
return result;
}
|
@Test
void assertGetYamlShortcuts() {
Map<String, Class<?>> actual = new YamlRuleConfigurationShortcuts().getYamlShortcuts();
assertThat(actual.size(), is(1));
assertTrue(actual.containsKey("!FIXTURE"));
}
|
public static ParamType getSchemaFromType(final Type type) {
return getSchemaFromType(type, JAVA_TO_ARG_TYPE);
}
|
@Test
public void shouldGetGenericSchemaFromType() throws NoSuchMethodException {
// Given:
final Type genericType = getClass().getMethod("genericType").getGenericReturnType();
// When:
final ParamType returnType = UdfUtil.getSchemaFromType(genericType);
// Then:
MatcherAssert.assertThat(returnType, CoreMatchers.is(GenericType.of("T")));
}
|
@Override
public RedisClusterNode clusterGetNodeForSlot(int slot) {
Iterable<RedisClusterNode> res = clusterGetNodes();
for (RedisClusterNode redisClusterNode : res) {
if (redisClusterNode.isMaster() && redisClusterNode.getSlotRange().contains(slot)) {
return redisClusterNode;
}
}
return null;
}
|
@Test
public void testClusterGetNodeForSlot() {
RedisClusterNode node1 = connection.clusterGetNodeForSlot(1);
RedisClusterNode node2 = connection.clusterGetNodeForSlot(16000);
assertThat(node1.getId()).isNotEqualTo(node2.getId());
}
|
static void closeStateManager(final Logger log,
final String logPrefix,
final boolean closeClean,
final boolean eosEnabled,
final ProcessorStateManager stateMgr,
final StateDirectory stateDirectory,
final TaskType taskType) {
// if EOS is enabled, wipe out the whole state store for unclean close since it is now invalid
final boolean wipeStateStore = !closeClean && eosEnabled;
final TaskId id = stateMgr.taskId();
log.trace("Closing state manager for {} task {}", taskType, id);
final AtomicReference<ProcessorStateException> firstException = new AtomicReference<>(null);
try {
if (stateDirectory.lock(id)) {
try {
stateMgr.close();
} catch (final ProcessorStateException e) {
firstException.compareAndSet(null, e);
} finally {
try {
if (wipeStateStore) {
log.debug("Wiping state stores for {} task {}", taskType, id);
// we can just delete the whole dir of the task, including the state store images and the checkpoint files,
// and then we write an empty checkpoint file indicating that the previous close is graceful and we just
// need to re-bootstrap the restoration from the beginning
Utils.delete(stateMgr.baseDir());
}
} finally {
stateDirectory.unlock(id);
}
}
} else {
log.error("Failed to acquire lock while closing the state store for {} task {}", taskType, id);
}
} catch (final IOException e) {
final ProcessorStateException exception = new ProcessorStateException(
String.format("%sFatal error while trying to close the state manager for task %s", logPrefix, id), e
);
firstException.compareAndSet(null, exception);
}
final ProcessorStateException exception = firstException.get();
if (exception != null) {
throw exception;
}
}
|
@Test
public void testCloseStateManagerWithStateStoreWipeOutRethrowWrappedIOException() {
final File unknownFile = new File("/unknown/path");
final InOrder inOrder = inOrder(stateManager, stateDirectory);
when(stateManager.taskId()).thenReturn(taskId);
when(stateDirectory.lock(taskId)).thenReturn(true);
when(stateManager.baseDir()).thenReturn(unknownFile);
try (MockedStatic<Utils> utils = mockStatic(Utils.class)) {
utils.when(() -> Utils.delete(unknownFile)).thenThrow(new IOException("Deletion failed"));
final ProcessorStateException thrown = assertThrows(
ProcessorStateException.class, () -> StateManagerUtil.closeStateManager(logger,
"logPrefix:", false, true, stateManager, stateDirectory, TaskType.ACTIVE));
assertEquals(IOException.class, thrown.getCause().getClass());
}
inOrder.verify(stateManager).close();
inOrder.verify(stateDirectory).unlock(taskId);
verifyNoMoreInteractions(stateManager, stateDirectory);
}
|
@Override
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
if (metric == null) {
throw new NullPointerException("metric == null");
}
return metric;
}
|
@Test
public void registerNullMetric() {
MetricRegistry registry = new NoopMetricRegistry();
assertThatNullPointerException()
.isThrownBy(() -> registry.register("any_name", null))
.withMessage("metric == null");
}
|
@Nullable static String channelName(@Nullable Destination destination) {
if (destination == null) return null;
boolean isQueue = isQueue(destination);
try {
if (isQueue) {
return ((Queue) destination).getQueueName();
} else {
return ((Topic) destination).getTopicName();
}
} catch (Throwable t) {
propagateIfFatal(t);
log(t, "error getting destination name from {0}", destination, null);
}
return null;
}
|
@Test void channelName_queueAndTopic_null() {
assertThat(MessageParser.channelName(null)).isNull();
}
|
@Override
public void validateJoinRequest(JoinMessage joinMessage) {
// check joining member's major.minor version is same as current cluster version's major.minor numbers
MemberVersion memberVersion = joinMessage.getMemberVersion();
Version clusterVersion = node.getClusterService().getClusterVersion();
if (!memberVersion.asVersion().equals(clusterVersion)) {
String msg = "Joining node's version " + memberVersion + " is not compatible with cluster version " + clusterVersion;
if (clusterVersion.getMajor() != memberVersion.getMajor()) {
msg += " (Rolling Member Upgrades are only supported for the same major version)";
}
if (clusterVersion.getMinor() > memberVersion.getMinor()) {
msg += " (Rolling Member Upgrades are only supported for the next minor version)";
}
if (!BuildInfoProvider.getBuildInfo().isEnterprise()) {
msg += " (Rolling Member Upgrades are only supported in Hazelcast Enterprise)";
}
throw new VersionMismatchException(msg);
}
}
|
@Test
public void test_joinRequestFails_whenPreviousMinorVersion() {
assumeTrue("Minor version is 0", nodeVersion.getMinor() > 0);
MemberVersion nextMinorVersion = MemberVersion.of(nodeVersion.getMajor(), nodeVersion.getMinor() - 1,
nodeVersion.getPatch());
JoinRequest joinRequest = new JoinRequest(Packet.VERSION, buildNumber, nextMinorVersion, joinAddress,
newUnsecureUUID(), false, null, null, null, null, null);
assertThatThrownBy(() -> nodeExtension.validateJoinRequest(joinRequest))
.isInstanceOf(VersionMismatchException.class)
.hasMessageContaining("Rolling Member Upgrades are only supported for the next minor version")
.hasMessageContaining("Rolling Member Upgrades are only supported in Hazelcast Enterprise");
}
|
@Deprecated
@Restricted(DoNotUse.class)
public static String resolve(ConfigurationContext context, String toInterpolate) {
return context.getSecretSourceResolver().resolve(toInterpolate);
}
|
@Test
public void resolve_JsonWithSpaceInKey() {
String input = "{ \"abc def\": 1, \"b\": 2 }";
environment.set("FOO", input);
String output = resolve("${json:abc def:${FOO}}");
assertThat(output, equalTo("1"));
}
|
public static boolean notEqualWithinTolerance(double left, double right, double tolerance) {
if (Doubles.isFinite(left) && Doubles.isFinite(right)) {
return Math.abs(left - right) > Math.abs(tolerance);
} else {
return false;
}
}
|
@Test
public void floatNotEquals() {
assertThat(notEqualWithinTolerance(1.3f, 1.3f, 0.00000000000001f)).isFalse();
assertThat(notEqualWithinTolerance(1.3f, 1.3f, 0.0f)).isFalse();
assertThat(
notEqualWithinTolerance(0.0f, 1.0f + 2.0f - 3.0f, 0.00000000000000000000000000000001f))
.isFalse();
assertThat(notEqualWithinTolerance(1.3f, 1.303f, 0.004f)).isFalse();
assertThat(notEqualWithinTolerance(1.3f, 1.303f, 0.002f)).isTrue();
assertThat(notEqualWithinTolerance(Float.POSITIVE_INFINITY, Float.POSITIVE_INFINITY, 0.01f))
.isFalse();
assertThat(notEqualWithinTolerance(Float.POSITIVE_INFINITY, Float.NEGATIVE_INFINITY, 0.01f))
.isFalse();
assertThat(notEqualWithinTolerance(Float.NEGATIVE_INFINITY, Float.NEGATIVE_INFINITY, 0.01f))
.isFalse();
assertThat(notEqualWithinTolerance(Float.NaN, Float.NaN, 0.01f)).isFalse();
}
|
public static List<ParameterMarkerExpressionSegment> getParameterMarkerExpressions(final Collection<ExpressionSegment> expressions) {
List<ParameterMarkerExpressionSegment> result = new ArrayList<>();
extractParameterMarkerExpressions(result, expressions);
return result;
}
|
@Test
void assertGetParameterMarkerExpressionsFromTypeCastExpression() {
ParameterMarkerExpressionSegment expected = new ParameterMarkerExpressionSegment(0, 0, 1, ParameterMarkerType.DOLLAR);
Collection<ExpressionSegment> input = Collections.singleton(new TypeCastExpression(0, 0, "$2::varchar", expected, "varchar"));
List<ParameterMarkerExpressionSegment> actual = ExpressionExtractUtils.getParameterMarkerExpressions(input);
assertThat(actual.size(), is(1));
assertThat(actual.get(0), is(expected));
}
|
@Override
public List<PartitionInfo> getPartitions(Table table, List<String> partitionNames) {
Map<String, Partition> partitionMap = Maps.newHashMap();
IcebergTable icebergTable = (IcebergTable) table;
PartitionsTable partitionsTable = (PartitionsTable) MetadataTableUtils.
createMetadataTableInstance(icebergTable.getNativeTable(), org.apache.iceberg.MetadataTableType.PARTITIONS);
if (icebergTable.isUnPartitioned()) {
try (CloseableIterable<FileScanTask> tasks = partitionsTable.newScan().planFiles()) {
for (FileScanTask task : tasks) {
// partitionsTable Table schema :
// record_count,
// file_count,
// total_data_file_size_in_bytes,
// position_delete_record_count,
// position_delete_file_count,
// equality_delete_record_count,
// equality_delete_file_count,
// last_updated_at,
// last_updated_snapshot_id
CloseableIterable<StructLike> rows = task.asDataTask().rows();
for (StructLike row : rows) {
// Get the last updated time of the table according to the table schema
long lastUpdated = -1;
try {
lastUpdated = row.get(7, Long.class);
} catch (NullPointerException e) {
LOG.error("The table [{}] snapshot [{}] has been expired",
icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), e);
}
Partition partition = new Partition(lastUpdated);
return ImmutableList.of(partition);
}
}
// for empty table, use -1 as last updated time
return ImmutableList.of(new Partition(-1));
} catch (IOException e) {
throw new StarRocksConnectorException("Failed to get partitions for table: " + table.getName(), e);
}
} else {
// For partition table, we need to get all partitions from PartitionsTable.
try (CloseableIterable<FileScanTask> tasks = partitionsTable.newScan().planFiles()) {
for (FileScanTask task : tasks) {
// partitionsTable Table schema :
// partition,
// spec_id,
// record_count,
// file_count,
// total_data_file_size_in_bytes,
// position_delete_record_count,
// position_delete_file_count,
// equality_delete_record_count,
// equality_delete_file_count,
// last_updated_at,
// last_updated_snapshot_id
CloseableIterable<StructLike> rows = task.asDataTask().rows();
for (StructLike row : rows) {
// Get the partition data/spec id/last updated time according to the table schema
StructProjection partitionData = row.get(0, StructProjection.class);
int specId = row.get(1, Integer.class);
PartitionSpec spec = icebergTable.getNativeTable().specs().get(specId);
String partitionName =
PartitionUtil.convertIcebergPartitionToPartitionName(spec, partitionData);
long lastUpdated = -1;
try {
lastUpdated = row.get(9, Long.class);
} catch (NullPointerException e) {
LOG.error("The table [{}.{}] snapshot [{}] has been expired",
icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), partitionName, e);
}
Partition partition = new Partition(lastUpdated);
partitionMap.put(partitionName, partition);
}
}
} catch (IOException e) {
throw new StarRocksConnectorException("Failed to get partitions for table: " + table.getName(), e);
}
}
ImmutableList.Builder<PartitionInfo> partitions = ImmutableList.builder();
partitionNames.forEach(partitionName -> partitions.add(partitionMap.get(partitionName)));
return partitions.build();
}
|
@Test
public void testGetPartitions2() {
mockedNativeTableG.newAppend().appendFile(FILE_B_5).commit();
IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG);
CachingIcebergCatalog cachingIcebergCatalog = new CachingIcebergCatalog(CATALOG_NAME, icebergHiveCatalog,
DEFAULT_CATALOG_PROPERTIES, Executors.newSingleThreadExecutor());
IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, cachingIcebergCatalog,
Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null);
IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME,
"resource_name", "db",
"table", "", Lists.newArrayList(), mockedNativeTableG, Maps.newHashMap());
List<PartitionInfo> partitions = metadata.getPartitions(icebergTable, Lists.newArrayList());
Assert.assertEquals(1, partitions.size());
}
|
@Override
public DdlCommand create(
final String sqlExpression,
final DdlStatement ddlStatement,
final SessionConfig config
) {
return FACTORIES
.getOrDefault(ddlStatement.getClass(), (statement, cf, ci) -> {
throw new KsqlException(
"Unable to find ddl command factory for statement:"
+ statement.getClass()
+ " valid statements:"
+ FACTORIES.keySet()
);
})
.handle(
this,
new CallInfo(sqlExpression, config),
ddlStatement);
}
|
@Test
public void shouldCreateStreamCommandWithSingleValueWrappingFromOverridesNotConfig() {
// Given:
givenCommandFactories();
ksqlConfig = new KsqlConfig(ImmutableMap.of(
KsqlConfig.KSQL_WRAP_SINGLE_VALUES, true
));
final ImmutableMap<String, Object> overrides = ImmutableMap.of(
KsqlConfig.KSQL_WRAP_SINGLE_VALUES, false
);
final DdlStatement statement =
new CreateStream(SOME_NAME, SOME_ELEMENTS, false, true, withProperties, false);
// When:
final DdlCommand cmd = commandFactories
.create(sqlExpression, statement, SessionConfig.of(ksqlConfig, overrides));
// Then:
assertThat(cmd, is(instanceOf(CreateStreamCommand.class)));
assertThat(((CreateStreamCommand) cmd).getFormats().getValueFeatures().all(),
contains(SerdeFeature.UNWRAP_SINGLES));
}
|
@Override
public void resolve(ConcurrentJobModificationException e) {
final List<Job> concurrentUpdatedJobs = e.getConcurrentUpdatedJobs();
final List<ConcurrentJobModificationResolveResult> failedToResolve = concurrentUpdatedJobs
.stream()
.map(this::resolve)
.filter(ConcurrentJobModificationResolveResult::failed)
.collect(toList());
if (!failedToResolve.isEmpty()) {
throw new UnresolvableConcurrentJobModificationException(failedToResolve, e);
}
}
|
@Test
void concurrentStateChangeFromProcessingToDeletedIsAllowedAndInterruptsThread() {
final Job job1 = aJobInProgress().build();
final Job job2 = aJobInProgress().build();
final Thread job1Thread = mock(Thread.class);
final Thread job2Thread = mock(Thread.class);
when(storageProvider.getJobById(job1.getId())).thenReturn(aCopyOf(job1).withDeletedState().build());
when(storageProvider.getJobById(job2.getId())).thenReturn(aCopyOf(job2).withDeletedState().build());
when(jobSteward.getThreadProcessingJob(job1)).thenReturn(job1Thread);
when(jobSteward.getThreadProcessingJob(job2)).thenReturn(job2Thread);
concurrentJobModificationResolver.resolve(new ConcurrentJobModificationException(asList(job1, job2)));
verify(job1Thread).interrupt();
verify(job2Thread).interrupt();
assertThat(job1).hasState(DELETED);
assertThat(job2).hasState(DELETED);
}
|
@Override
public Result run(GoPluginDescriptor pluginDescriptor, Map<String, List<String>> extensionsInfoOfPlugin) {
final ValidationResult validationResult = validate(pluginDescriptor, extensionsInfoOfPlugin);
return new Result(validationResult.hasError(), validationResult.toErrorMessage());
}
|
@Test
void shouldAddErrorAndReturnValidationResultWhenPluginRequiredExtensionIsNotSupportedByGoCD() {
final PluginPostLoadHook.Result validationResult = pluginExtensionsAndVersionValidator.run(descriptor, Map.of("some-invalid-extension", List.of("2.0")));
assertThat(validationResult.isAFailure()).isTrue();
assertThat(validationResult.getMessage()).isEqualTo("Extension incompatibility detected between plugin(Some-Plugin-Id) and GoCD:\n" +
" Extension(s) [some-invalid-extension] used by the plugin is not supported. GoCD Supported extensions are [authorization, elastic-agent].");
}
|
@Override
public URL getLocalArtifactUrl(DependencyJar dependency) {
String depShortName = dependency.getShortName();
String pathStr = properties.getProperty(depShortName);
if (pathStr != null) {
if (pathStr.indexOf(File.pathSeparatorChar) != -1) {
throw new IllegalArgumentException(
"didn't expect multiple files for " + dependency + ": " + pathStr);
}
Path path = baseDir.resolve(Paths.get(pathStr));
try {
return path.toUri().toURL();
} catch (MalformedURLException e) {
throw new RuntimeException(e);
}
} else {
if (delegate != null) {
return delegate.getLocalArtifactUrl(dependency);
}
}
throw new RuntimeException("no artifacts found for " + dependency);
}
|
@Test
public void whenMissingFromProperties_shouldDelegate() throws Exception {
DependencyResolver resolver =
new PropertiesDependencyResolver(propsFile("nothing", new File("interesting")), mock);
when(mock.getLocalArtifactUrl(exampleDep)).thenReturn(new URL("file:///path/3"));
URL url = resolver.getLocalArtifactUrl(exampleDep);
assertThat(url).isEqualTo(new URL("file:///path/3"));
}
|
@ApiOperation(value = "Delete a user’s info", tags = { "Users" }, code = 204)
@ApiResponses(value = {
@ApiResponse(code = 204, message = "Indicates the user was found and the info for the given key has been deleted. Response body is left empty intentionally."),
@ApiResponse(code = 404, message = "Indicates the requested user was not found or the user does not have info for the given key. Status description contains additional information about the error.")
})
@DeleteMapping("/identity/users/{userId}/info/{key}")
@ResponseStatus(HttpStatus.NO_CONTENT)
public void deleteUserInfo(@ApiParam(name = "userId") @PathVariable("userId") String userId, @ApiParam(name = "key") @PathVariable("key") String key) {
User user = getUserFromRequest(userId);
if (restApiInterceptor != null) {
restApiInterceptor.deleteUser(user);
}
String validKey = getValidKeyFromRequest(user, key);
identityService.setUserInfo(user.getId(), validKey, null);
}
|
@Test
public void testDeleteUserInfo() throws Exception {
User savedUser = null;
try {
User newUser = identityService.newUser("testuser");
newUser.setFirstName("Fred");
newUser.setLastName("McDonald");
newUser.setEmail("[email protected]");
identityService.saveUser(newUser);
savedUser = newUser;
identityService.setUserInfo(newUser.getId(), "key1", "Value 1");
closeResponse(executeRequest(new HttpDelete(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_USER_INFO, newUser.getId(), "key1")), HttpStatus.SC_NO_CONTENT));
// Check if info is actually deleted
assertThat(identityService.getUserInfo(newUser.getId(), "key1")).isNull();
} finally {
// Delete user after test passes or fails
if (savedUser != null) {
identityService.deleteUser(savedUser.getId());
}
}
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
final List<Path> deleted = new ArrayList<Path>();
for(Map.Entry<Path, TransferStatus> entry : files.entrySet()) {
boolean skip = false;
final Path file = entry.getKey();
for(Path d : deleted) {
if(file.isChild(d)) {
skip = true;
break;
}
}
if(skip) {
continue;
}
deleted.add(file);
callback.delete(file);
try {
final TransferStatus status = entry.getValue();
session.getClient().execute(this.toRequest(file, status), new VoidResponseHandler());
}
catch(SardineException e) {
throw new DAVExceptionMappingService().map("Cannot delete {0}", e, file);
}
catch(IOException e) {
throw new HttpExceptionMappingService().map(e, file);
}
}
}
|
@Test
public void testDeleteDirectory() throws Exception {
final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory));
new DAVDirectoryFeature(session).mkdir(test, new TransferStatus());
assertTrue(new DAVFindFeature(session).find(test));
new DAVTouchFeature(session).touch(new Path(test, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
new DAVDeleteFeature(session).delete(Collections.singletonMap(test, new TransferStatus()), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new DAVFindFeature(session).find(test));
}
|
public static Interval of(String interval, TimeRange timeRange) {
switch (timeRange.type()) {
case TimeRange.KEYWORD: return timestampInterval(interval);
case TimeRange.ABSOLUTE:
return ofAbsoluteRange(interval, (AbsoluteRange)timeRange);
case TimeRange.RELATIVE:
return ofRelativeRange(interval, (RelativeRange)timeRange);
}
throw new RuntimeException("Unable to parse time range type: " + timeRange.type());
}
|
@Test
public void approximatesAutoIntervalWithScalingIfAbsoluteRangeAndBeyondLimits() {
final AbsoluteRange absoluteRange = AbsoluteRange.create(
DateTime.parse("2019-12-02T12:50:23Z"),
DateTime.parse("2019-12-02T14:50:23Z")
);
final Interval interval = ApproximatedAutoIntervalFactory.of("minute", absoluteRange);
assertThat(interval).isEqualTo(AutoInterval.create(2.0));
}
|
public static String toString(InputStream input, String encoding) throws IOException {
return (null == encoding) ? toString(new InputStreamReader(input, Constants.ENCODE))
: toString(new InputStreamReader(input, encoding));
}
|
@Test
void testToStringV2() {
try {
Reader reader = new CharArrayReader("test".toCharArray());
String actualValue = MD5Util.toString(reader);
assertEquals("test", actualValue);
} catch (IOException e) {
System.out.println(e.toString());
}
}
|
@Override
public final void restore() throws Exception {
restoreInternal();
}
|
@Test
void testSubTaskInitializationMetrics() throws Exception {
StreamTaskMailboxTestHarnessBuilder<Integer> builder =
new StreamTaskMailboxTestHarnessBuilder<>(
OneInputStreamTask::new, BasicTypeInfo.INT_TYPE_INFO)
.addInput(BasicTypeInfo.INT_TYPE_INFO)
.setupOutputForSingletonOperatorChain(
new TestBoundedOneInputStreamOperator());
try (StreamTaskMailboxTestHarness<Integer> harness = builder.buildUnrestored()) {
harness.streamTask.restore();
assertThat(harness.getTaskStateManager().getReportedInitializationMetrics())
.isPresent();
}
}
|
@SuppressWarnings("ReturnValueIgnored")
void startStreams() {
getWorkStream.get();
getDataStream.get();
commitWorkStream.get();
workCommitter.get().start();
// *stream.get() is all memoized in a threadsafe manner.
started.set(true);
}
|
@Test
public void testStartStream_onlyStartsStreamsOnceConcurrent() throws InterruptedException {
long itemBudget = 1L;
long byteBudget = 1L;
WindmillStreamSender windmillStreamSender =
newWindmillStreamSender(
GetWorkBudget.builder().setBytes(byteBudget).setItems(itemBudget).build());
Thread startStreamThread = new Thread(windmillStreamSender::startStreams);
startStreamThread.start();
windmillStreamSender.startStreams();
startStreamThread.join();
verify(streamFactory, times(1))
.createDirectGetWorkStream(
eq(connection),
eq(
GET_WORK_REQUEST
.toBuilder()
.setMaxItems(itemBudget)
.setMaxBytes(byteBudget)
.build()),
any(ThrottleTimer.class),
any(),
any(),
any(),
eq(workItemScheduler));
verify(streamFactory, times(1))
.createGetDataStream(eq(connection.stub()), any(ThrottleTimer.class));
verify(streamFactory, times(1))
.createCommitWorkStream(eq(connection.stub()), any(ThrottleTimer.class));
}
|
@Override
public void serialize(Asn1OutputStream out, byte[] obj) {
out.write(0);
out.write(obj);
}
|
@Test
public void shouldSerialize() {
assertArrayEquals(
new byte[] { 0, 1, 2, 3 },
serialize(new BitStringToByteArrayConverter(), byte[].class, new byte[] { 1, 2, 3 })
);
}
|
@Override
public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) {
return false;
}
if (this == o) {
return true;
}
NamingListenerInvoker that = (NamingListenerInvoker) o;
return Objects.equals(listener, that.listener);
}
|
@Test
public void testEquals() {
EventListener listener1 = mock(EventListener.class);
EventListener listener2 = mock(EventListener.class);
NamingListenerInvoker invoker1 = new NamingListenerInvoker(listener1);
NamingListenerInvoker invoker2 = new NamingListenerInvoker(listener1);
NamingListenerInvoker invoker3 = new NamingListenerInvoker(listener2);
assertEquals(invoker1.hashCode(), invoker2.hashCode());
assertEquals(invoker1, invoker2);
assertNotEquals(invoker1.hashCode(), invoker3.hashCode());
assertNotEquals(invoker1, invoker3);
}
|
public static ExpressionTree parseFilterTree(String filter) throws MetaException {
return PartFilterParser.parseFilter(filter);
}
|
@Test
public void testParseFilterWithInvalidTimestampWithType() {
MetaException exception = assertThrows(MetaException.class,
() -> PartFilterExprUtil.parseFilterTree("(j = TIMESTAMP'2023-06-02 99:35:00')"));
assertTrue(exception.getMessage().contains("Error parsing partition filter"));
}
|
public String normalizeNamespace(String appId, String namespaceName) {
AppNamespace appNamespace = appNamespaceServiceWithCache.findByAppIdAndNamespace(appId, namespaceName);
if (appNamespace != null) {
return appNamespace.getName();
}
appNamespace = appNamespaceServiceWithCache.findPublicNamespaceByName(namespaceName);
if (appNamespace != null) {
return appNamespace.getName();
}
return namespaceName;
}
|
@Test
public void testNormalizeNamespaceWithPrivateNamespace() throws Exception {
String someAppId = "someAppId";
String someNamespaceName = "someNamespaceName";
String someNormalizedNamespaceName = "someNormalizedNamespaceName";
AppNamespace someAppNamespace = mock(AppNamespace.class);
when(someAppNamespace.getName()).thenReturn(someNormalizedNamespaceName);
when(appNamespaceServiceWithCache.findByAppIdAndNamespace(someAppId, someNamespaceName)).thenReturn
(someAppNamespace);
assertEquals(someNormalizedNamespaceName, namespaceUtil.normalizeNamespace(someAppId, someNamespaceName));
verify(appNamespaceServiceWithCache, times(1)).findByAppIdAndNamespace(someAppId, someNamespaceName);
verify(appNamespaceServiceWithCache, never()).findPublicNamespaceByName(someNamespaceName);
}
|
@Override
public void execute(Exchange exchange) throws SmppException {
DataSm dataSm = createDataSm(exchange);
if (log.isDebugEnabled()) {
log.debug("Sending a data short message for exchange id '{}'...", exchange.getExchangeId());
}
DataSmResult result;
try {
result = session.dataShortMessage(
dataSm.getServiceType(),
TypeOfNumber.valueOf(dataSm.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(dataSm.getSourceAddrNpi()),
dataSm.getSourceAddr(),
TypeOfNumber.valueOf(dataSm.getDestAddrTon()),
NumberingPlanIndicator.valueOf(dataSm.getDestAddrNpi()),
dataSm.getDestAddress(),
new ESMClass(dataSm.getEsmClass()),
new RegisteredDelivery(dataSm.getRegisteredDelivery()),
DataCodings.newInstance(dataSm.getDataCoding()),
dataSm.getOptionalParameters());
} catch (Exception e) {
throw new SmppException(e);
}
if (log.isDebugEnabled()) {
log.debug("Sent a data short message for exchange id '{}' and message id '{}'",
exchange.getExchangeId(), result.getMessageId());
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, result.getMessageId());
message.setHeader(SmppConstants.OPTIONAL_PARAMETERS, createOptionalParameterByName(result.getOptionalParameters()));
message.setHeader(SmppConstants.OPTIONAL_PARAMETER, createOptionalParameterByCode(result.getOptionalParameters()));
}
|
@Test
public void execute() throws Exception {
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "DataSm");
exchange.getIn().setHeader(SmppConstants.SERVICE_TYPE, "XXX");
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_TON, TypeOfNumber.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR_NPI, NumberingPlanIndicator.NATIONAL.value());
exchange.getIn().setHeader(SmppConstants.SOURCE_ADDR, "1818");
exchange.getIn().setHeader(SmppConstants.DEST_ADDR_TON, TypeOfNumber.INTERNATIONAL.value());
exchange.getIn().setHeader(SmppConstants.DEST_ADDR_NPI, NumberingPlanIndicator.INTERNET.value());
exchange.getIn().setHeader(SmppConstants.DEST_ADDR, "1919");
exchange.getIn().setHeader(SmppConstants.REGISTERED_DELIVERY,
new RegisteredDelivery(SMSCDeliveryReceipt.FAILURE).value());
when(session.dataShortMessage(eq("XXX"), eq(TypeOfNumber.NATIONAL), eq(NumberingPlanIndicator.NATIONAL), eq("1818"),
eq(TypeOfNumber.INTERNATIONAL), eq(NumberingPlanIndicator.INTERNET), eq("1919"), eq(new ESMClass()),
eq(new RegisteredDelivery((byte) 2)), eq(DataCodings.newInstance((byte) 0))))
.thenReturn(new DataSmResult(new MessageId("1"), null));
command.execute(exchange);
assertEquals("1", exchange.getMessage().getHeader(SmppConstants.ID));
assertNull(exchange.getMessage().getHeader(SmppConstants.OPTIONAL_PARAMETERS));
}
|
public SmppCommand createSmppCommand(SMPPSession session, Exchange exchange) {
SmppCommandType commandType = SmppCommandType.fromExchange(exchange);
return commandType.createCommand(session, configuration);
}
|
@Test
public void createSmppQuerySmCommand() {
SMPPSession session = new SMPPSession();
Exchange exchange = new DefaultExchange(new DefaultCamelContext());
exchange.getIn().setHeader(SmppConstants.COMMAND, "QuerySm");
SmppCommand command = binding.createSmppCommand(session, exchange);
assertTrue(command instanceof SmppQuerySmCommand);
}
|
public static <T> void invokeAll(List<Callable<T>> callables, long timeoutMs)
throws TimeoutException, ExecutionException {
ExecutorService service = Executors.newCachedThreadPool();
try {
invokeAll(service, callables, timeoutMs);
} finally {
service.shutdownNow();
}
}
|
@Test
public void invokeAllPropagatesExceptionWithTimeout() throws Exception {
int numTasks = 5;
final AtomicInteger id = new AtomicInteger();
List<Callable<Void>> tasks = new ArrayList<>();
final Exception testException = new Exception("test message");
for (int i = 0; i < numTasks; i++) {
tasks.add(() -> {
int myId = id.incrementAndGet();
// The 3rd task throws an exception, other tasks sleep.
if (myId == 3) {
throw testException;
} else {
Thread.sleep(10 * Constants.SECOND_MS);
}
return null;
});
}
try {
CommonUtils.invokeAll(tasks, 500);
fail("Expected an exception to be thrown");
} catch (ExecutionException e) {
assertSame(testException, e.getCause());
}
}
|
@Override
public void init(File dataFile, @Nullable Set<String> fieldsToRead, @Nullable RecordReaderConfig recordReaderConfig)
throws IOException {
File parquetFile = RecordReaderUtils.unpackIfRequired(dataFile, EXTENSION);
if (recordReaderConfig != null && ((ParquetRecordReaderConfig) recordReaderConfig).useParquetAvroRecordReader()) {
_internalParquetRecordReader = new ParquetAvroRecordReader();
} else if (recordReaderConfig != null
&& ((ParquetRecordReaderConfig) recordReaderConfig).useParquetNativeRecordReader()) {
_useAvroParquetRecordReader = false;
_internalParquetRecordReader = new ParquetNativeRecordReader();
} else {
// No reader type specified. Determine using file metadata
if (ParquetUtils.hasAvroSchemaInFileMetadata(new Path(parquetFile.getAbsolutePath()))) {
_internalParquetRecordReader = new ParquetAvroRecordReader();
} else {
_useAvroParquetRecordReader = false;
_internalParquetRecordReader = new ParquetNativeRecordReader();
}
}
_internalParquetRecordReader.init(parquetFile, fieldsToRead, recordReaderConfig);
}
|
@Test
public void testParquetAvroRecordReader()
throws IOException {
ParquetAvroRecordReader avroRecordReader = new ParquetAvroRecordReader();
avroRecordReader.init(_dataFile, null, new ParquetRecordReaderConfig());
testReadParquetFile(avroRecordReader, SAMPLE_RECORDS_SIZE);
}
|
@Override
public Optional<Language> find(String languageKey) {
return Optional.ofNullable(languagesByKey.get(languageKey));
}
|
@Test
public void find_by_other_key_returns_absent() {
LanguageRepositoryImpl languageRepository = new LanguageRepositoryImpl(SOME_LANGUAGE);
Optional<Language> language = languageRepository.find(ANY_KEY);
assertThat(language).isEmpty();
}
|
@Override
public HttpResponseOutputStream<File> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final String location = new StoregateWriteFeature(session, fileid).start(file, status);
final MultipartOutputStream proxy = new MultipartOutputStream(location, file, status);
return new HttpResponseOutputStream<File>(new MemorySegementingOutputStream(proxy,
new HostPreferences(session.getHost()).getInteger("storegate.upload.multipart.chunksize")),
new StoregateAttributesFinderFeature(session, fileid), status) {
@Override
public File getStatus() {
return proxy.getResult();
}
};
}
|
@Test
public void testWriteZeroLength() throws Exception {
final StoregateIdProvider nodeid = new StoregateIdProvider(session);
final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(
new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()),
EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final TransferStatus status = new TransferStatus();
final Path test = new Path(room, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
final StoregateMultipartWriteFeature writer = new StoregateMultipartWriteFeature(session, nodeid);
final HttpResponseOutputStream<File> out = writer.write(test, status, new DisabledConnectionCallback());
assertNotNull(out);
new StreamCopier(status, status).transfer(new NullInputStream(0L), out);
assertEquals(0L, out.getStatus().getSize(), 0L);
final String version = out.getStatus().getId();
assertNotNull(version);
assertTrue(new DefaultFindFeature(session).find(test));
new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
@Override
public boolean contains(Object o) {
for (M member : members) {
if (selector.select(member) && o.equals(member)) {
return true;
}
}
return false;
}
|
@Test
public void testDoesNotContainNonMatchingMemberWhenLiteMembersSelected() {
Collection<MemberImpl> collection = new MemberSelectingCollection<>(members, LITE_MEMBER_SELECTOR);
assertFalse(collection.contains(dataMember));
}
|
@Override
public S3ClientBuilder createBuilder(S3Options s3Options) {
return createBuilder(S3Client.builder(), s3Options);
}
|
@Test
public void testEmptyOptions() {
DefaultS3ClientBuilderFactory.createBuilder(builder, s3Options);
verifyNoInteractions(builder);
}
|
@VisibleForTesting
RowExpression getScopedCanonical(RowExpression expression, Predicate<VariableReferenceExpression> variableScope)
{
RowExpression canonicalIndex = canonicalMap.get(expression);
if (canonicalIndex == null) {
return null;
}
return getCanonical(filter(equalitySets.get(canonicalIndex), variableToExpressionPredicate(variableScope)));
}
|
@Test
public void testEqualityGeneration()
{
EqualityInference.Builder builder = new EqualityInference.Builder(METADATA);
builder.addEquality(variable("a1"), add("b", "c")); // a1 = b + c
builder.addEquality(variable("e1"), add("b", "d")); // e1 = b + d
addEquality("c", "d", builder);
EqualityInference inference = builder.build();
RowExpression scopedCanonical = inference.getScopedCanonical(variable("e1"), variableBeginsWith("a"));
assertEquals(scopedCanonical, variable("a1"));
}
|
@Override
public int getMaxParallelism() {
return parallelismInfo.getMaxParallelism();
}
|
@Test
void testConfiguredMaxParallelismIsRespected() throws Exception {
final int configuredMaxParallelism = 12;
final int defaultMaxParallelism = 13;
final ExecutionJobVertex ejv =
createDynamicExecutionJobVertex(
-1, configuredMaxParallelism, defaultMaxParallelism);
assertThat(ejv.getMaxParallelism()).isEqualTo(configuredMaxParallelism);
}
|
@Override
public boolean assign(final Map<ProcessId, ClientState> clients,
final Set<TaskId> allTaskIds,
final Set<TaskId> statefulTaskIds,
final AssignmentConfigs configs) {
final int numStandbyReplicas = configs.numStandbyReplicas();
final Set<String> rackAwareAssignmentTags = new HashSet<>(tagsFunction.apply(configs));
final Map<TaskId, Integer> tasksToRemainingStandbys = computeTasksToRemainingStandbys(
numStandbyReplicas,
statefulTaskIds
);
final Map<String, Set<String>> tagKeyToValues = new HashMap<>();
final Map<TagEntry, Set<ProcessId>> tagEntryToClients = new HashMap<>();
fillClientsTagStatistics(clients, tagEntryToClients, tagKeyToValues);
final ConstrainedPrioritySet standbyTaskClientsByTaskLoad = createLeastLoadedPrioritySetConstrainedByAssignedTask(clients);
final Map<TaskId, ProcessId> pendingStandbyTasksToClientId = new HashMap<>();
for (final TaskId statefulTaskId : statefulTaskIds) {
for (final Map.Entry<ProcessId, ClientState> entry : clients.entrySet()) {
final ProcessId clientId = entry.getKey();
final ClientState clientState = entry.getValue();
if (clientState.activeTasks().contains(statefulTaskId)) {
assignStandbyTasksToClientsWithDifferentTags(
numStandbyReplicas,
standbyTaskClientsByTaskLoad,
statefulTaskId,
clientId,
rackAwareAssignmentTags,
clients,
tasksToRemainingStandbys,
tagKeyToValues,
tagEntryToClients,
pendingStandbyTasksToClientId
);
}
}
}
if (!tasksToRemainingStandbys.isEmpty()) {
assignPendingStandbyTasksToLeastLoadedClients(clients,
numStandbyReplicas,
standbyTaskClientsByTaskLoad,
tasksToRemainingStandbys);
}
// returning false, because standby task assignment will never require a follow-up probing rebalance.
return false;
}
|
@Test
public void shouldNotAssignStandbyTasksIfThereAreNoEnoughClients() {
final Map<ProcessId, ClientState> clientStates = mkMap(
mkEntry(PID_1, createClientStateWithCapacity(PID_1, 3, mkMap(mkEntry(CLUSTER_TAG, CLUSTER_1), mkEntry(ZONE_TAG, ZONE_1)), TASK_0_0))
);
final Set<TaskId> allActiveTasks = findAllActiveTasks(clientStates);
final AssignmentConfigs assignmentConfigs = newAssignmentConfigs(1, CLUSTER_TAG, ZONE_TAG);
standbyTaskAssignor.assign(clientStates, allActiveTasks, allActiveTasks, assignmentConfigs);
assertTotalNumberOfStandbyTasksEqualsTo(clientStates, 0);
assertEquals(0, clientStates.get(PID_1).standbyTaskCount());
}
|
public IdChain get(IdChain key) {
return mapping.get(key);
}
|
@Test
public void test() {
IdChain key = new IdChain(10005L, 10006L, 10005L, 10007L, 10008L);
Assert.assertTrue(key.equals(src));
Assert.assertEquals(src, key);
IdChain val = fileMapping.get(key);
Assert.assertNotNull(val);
Assert.assertEquals(dest, val);
Long l1 = new Long(10005L);
Long l2 = new Long(10005L);
Assert.assertFalse(l1 == l2);
Assert.assertTrue(l1.equals(l2));
Long l3 = new Long(1L);
Long l4 = new Long(1L);
Assert.assertFalse(l3 == l4);
Assert.assertTrue(l3.equals(l4));
}
|
@SuppressWarnings("unchecked")
public String uploadFile(
String parentPath,
String name,
InputStream inputStream,
String mediaType,
Date modified,
String description)
throws IOException, InvalidTokenException, DestinationMemoryFullException {
String url;
try {
URIBuilder builder =
getUriBuilder()
.setPath(CONTENT_API_PATH_PREFIX + "/mounts/primary/files/put")
.setParameter("path", parentPath)
.setParameter("filename", name)
.setParameter("autorename", "true")
.setParameter("info", "true");
if (description != null && description.length() > 0) {
builder.setParameter("tags", "description=" + description);
}
if (modified != null) {
builder.setParameter("modified", Long.toString(modified.getTime()));
}
url = builder.build().toString();
} catch (URISyntaxException e) {
throw new IllegalStateException("Could not produce url.", e);
}
Request.Builder requestBuilder = getRequestBuilder(url);
RequestBody uploadBody = new InputStreamRequestBody(MediaType.parse(mediaType), inputStream);
requestBuilder.post(uploadBody);
// We need to reset the input stream because the request could already read some data
try (Response response =
getResponse(fileUploadClient, requestBuilder, inputStream::reset)) {
int code = response.code();
ResponseBody body = response.body();
if (code == 413) {
throw new DestinationMemoryFullException(
"Koofr quota exceeded", new Exception("Koofr file upload response code " + code));
}
if (code < 200 || code > 299) {
throw new KoofrClientIOException(response);
}
Map<String, Object> responseData = objectMapper.readValue(body.bytes(), Map.class);
String newName = (String) responseData.get("name");
Preconditions.checkState(
!Strings.isNullOrEmpty(newName), "Expected name value to be present in %s", responseData);
return parentPath + "/" + newName;
}
}
|
@Test
public void testUploadFileNotFound() {
server.enqueue(
new MockResponse()
.setResponseCode(404)
.setHeader("Content-Type", "application/json")
.setBody(
"{\"error\":{\"code\":\"NotFound\",\"message\":\"File not found\"},\"requestId\":\"bad2465e-300e-4079-57ad-46b256e74d21\"}"));
final InputStream inputStream = new ByteArrayInputStream(new byte[] {0, 1, 2, 3, 4});
KoofrClientIOException caughtException =
assertThrows(
KoofrClientIOException.class,
() ->
client.uploadFile(
"/path/to/folder", "image.jpg", inputStream, "image/jpeg", null, null));
Assertions.assertNotNull(caughtException);
Assertions.assertEquals(404, caughtException.getCode());
Assertions.assertEquals(
"Got error code: 404 message: Client Error body: {\"error\":{\"code\":\"NotFound\",\"message\":\"File not found\"},\"requestId\":\"bad2465e-300e-4079-57ad-46b256e74d21\"}",
caughtException.getMessage());
Assertions.assertEquals(1, server.getRequestCount());
}
|
@Override
public boolean equals(Object other) {
return super.equals(other);
}
|
@Test
public void equalsTest(){
RollbackRule rollbackRuleByClass = new NoRollbackRule(Exception.class);
RollbackRule otherRollbackRuleByClass = new NoRollbackRule(Exception.class);
Assertions.assertEquals(rollbackRuleByClass, otherRollbackRuleByClass);
RollbackRule rollbackRuleByName = new NoRollbackRule(Exception.class.getName());
RollbackRule otherRollbackRuleByName = new NoRollbackRule(Exception.class.getName());
Assertions.assertEquals(rollbackRuleByName, otherRollbackRuleByName);
NoRollbackRule otherRollbackRuleByName3 = new NoRollbackRule(Exception.class.getName());
Assertions.assertEquals(otherRollbackRuleByName3.toString(),"NoRollbackRule with pattern [" + Exception.class.getName() + "]");
}
|
@Override
public String toString() {
return String.format("Repeatedly.forever(%s)", subTriggers.get(REPEATED));
}
|
@Test
public void testToString() {
TriggerStateMachine trigger =
RepeatedlyStateMachine.forever(StubTriggerStateMachine.named("innerTrigger"));
assertEquals("Repeatedly.forever(innerTrigger)", trigger.toString());
}
|
@Override
public String toString() {
if (str == null)
str = attributeSelector.toStringBuilder()
.append("/Value[@number=\"")
.append(valueIndex + 1)
.append("\"]")
.toString();
return str;
}
|
@Test
public void testToString() {
ItemPointer ip = new ItemPointer(Tag.RequestAttributesSequence, 0);
ValueSelector vs = new ValueSelector(Tag.StudyInstanceUID, null, 0, ip);
assertEquals(XPATH, vs.toString());
}
|
public RetryableException() {
super();
}
|
@Test
public void testRetryableException() {
Assertions.assertThrowsExactly(RetryableException.class, () -> {
throw new RetryableException();
});
Assertions.assertThrowsExactly(RetryableException.class, () -> {
throw new RetryableException("error");
});
Assertions.assertThrowsExactly(RetryableException.class, () -> {
throw new RetryableException(new Throwable("error"));
});
Assertions.assertThrowsExactly(RetryableException.class, () -> {
throw new RetryableException("error", new Throwable("error"));
});
}
|
@Override
public @Nullable String getFilename() {
if (!isDirectory()) {
return key.substring(key.lastIndexOf('/') + 1);
}
if ("/".equals(key)) {
return null;
}
String keyWithoutTrailingSlash = key.substring(0, key.length() - 1);
return keyWithoutTrailingSlash.substring(keyWithoutTrailingSlash.lastIndexOf('/') + 1);
}
|
@Test
public void testGetFilename() {
assertNull(S3ResourceId.fromUri("s3://my_bucket/").getFilename());
assertEquals("abc", S3ResourceId.fromUri("s3://my_bucket/abc").getFilename());
assertEquals("abc", S3ResourceId.fromUri("s3://my_bucket/abc/").getFilename());
assertEquals("def", S3ResourceId.fromUri("s3://my_bucket/abc/def").getFilename());
assertEquals("def", S3ResourceId.fromUri("s3://my_bucket/abc/def/").getFilename());
assertEquals("xyz.txt", S3ResourceId.fromUri("s3://my_bucket/abc/xyz.txt").getFilename());
}
|
@Override
public V fetch(final K key, final long time) {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType);
for (final ReadOnlyWindowStore<K, V> windowStore : stores) {
try {
final V result = windowStore.fetch(key, time);
if (result != null) {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException(
"State store is not available anymore and may have been migrated to another instance; " +
"please re-discover its location from the state metadata.");
}
}
return null;
}
|
@Test
public void shouldReturnEmptyIteratorIfNoData() {
try (final WindowStoreIterator<String> iterator =
windowStore.fetch("my-key", ofEpochMilli(0L), ofEpochMilli(25L))) {
assertFalse(iterator.hasNext());
}
}
|
@Override
public Processor<KIn, VIn, KOut, VOut> get() {
return new KStreamFlatTransformProcessor<>(transformerSupplier.get());
}
|
@Test
public void shouldGetFlatTransformProcessor() {
@SuppressWarnings("unchecked")
final org.apache.kafka.streams.kstream.TransformerSupplier<Number, Number, Iterable<KeyValue<Integer, Integer>>> transformerSupplier =
mock(org.apache.kafka.streams.kstream.TransformerSupplier.class);
final KStreamFlatTransform<Number, Number, Integer, Integer> processorSupplier =
new KStreamFlatTransform<>(transformerSupplier);
when(transformerSupplier.get()).thenReturn(transformer);
final Processor<Number, Number, Integer, Integer> processor = processorSupplier.get();
assertInstanceOf(KStreamFlatTransformProcessor.class, processor);
}
|
public void stop() {
try {
sharedHealthState.clearMine();
} catch (HazelcastInstanceNotActiveException | RetryableHazelcastException e) {
LOG.debug("Hazelcast is not active anymore", e);
}
}
|
@Test
void stop_whenCalled_hasNoEffect() {
underTest.stop();
verify(sharedHealthState).clearMine();
verifyNoInteractions(executorService, nodeHealthProvider);
}
|
@Override
public Map<String, String> requestParameters() {
return unmodifiableMap(requestParameters);
}
|
@Test
public void shouldReturnUnmodifiableRequestParams() throws Exception {
DefaultGoPluginApiRequest request = new DefaultGoPluginApiRequest("extension", "1.0", "request-name");
Map<String, String> requestParameters = request.requestParameters();
try {
requestParameters.put("new-key", "new-value");
fail("Should not allow modification of request params");
} catch (UnsupportedOperationException e) {
}
try {
requestParameters.remove("key");
fail("Should not allow modification of request params");
} catch (UnsupportedOperationException e) {
}
}
|
public String createULID(Message message) {
checkTimestamp(message.getTimestamp().getMillis());
try {
return createULID(message.getTimestamp().getMillis(), message.getSequenceNr());
} catch (Exception e) {
LOG.error("Exception while creating ULID.", e);
return ulid.nextULID(message.getTimestamp().getMillis());
}
}
|
@Test
public void simpleGenerate() {
final MessageULIDGenerator generator = new MessageULIDGenerator(new ULID());
final long ts = Tools.nowUTC().getMillis();
ULID.Value parsedULID = ULID.parseULID(generator.createULID(ts, 123));
assertThat(extractSequenceNr(parsedULID)).isEqualTo(123);
}
|
public boolean registerConsumer(final String group, final ClientChannelInfo clientChannelInfo,
ConsumeType consumeType, MessageModel messageModel, ConsumeFromWhere consumeFromWhere,
final Set<SubscriptionData> subList, boolean isNotifyConsumerIdsChangedEnable) {
return registerConsumer(group, clientChannelInfo, consumeType, messageModel, consumeFromWhere, subList,
isNotifyConsumerIdsChangedEnable, true);
}
|
@Test
public void registerConsumerTest() {
register();
final Set<SubscriptionData> subList = new HashSet<>();
SubscriptionData subscriptionData = new SubscriptionData(TOPIC, "*");
subList.add(subscriptionData);
consumerManager.registerConsumer(GROUP, clientChannelInfo, ConsumeType.CONSUME_PASSIVELY,
MessageModel.BROADCASTING, ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET, subList, true);
Assertions.assertThat(consumerManager.getConsumerTable().get(GROUP)).isNotNull();
}
|
public int doWork()
{
final long nowNs = nanoClock.nanoTime();
trackTime(nowNs);
int workCount = 0;
workCount += processTimers(nowNs);
if (!asyncClientCommandInFlight)
{
workCount += clientCommandAdapter.receive();
}
workCount += drainCommandQueue();
workCount += trackStreamPositions(workCount, nowNs);
workCount += nameResolver.doWork(cachedEpochClock.time());
workCount += freeEndOfLifeResources(ctx.resourceFreeLimit());
return workCount;
}
|
@Test
void shouldRemoveSingleCounter()
{
final long registrationId = driverProxy.addCounter(
COUNTER_TYPE_ID,
counterKeyAndLabel,
COUNTER_KEY_OFFSET,
COUNTER_KEY_LENGTH,
counterKeyAndLabel,
COUNTER_LABEL_OFFSET,
COUNTER_LABEL_LENGTH);
driverConductor.doWork();
final long removeCorrelationId = driverProxy.removeCounter(registrationId);
driverConductor.doWork();
final ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(Integer.class);
final InOrder inOrder = inOrder(mockClientProxy);
inOrder.verify(mockClientProxy).onCounterReady(eq(registrationId), captor.capture());
inOrder.verify(mockClientProxy).operationSucceeded(removeCorrelationId);
verify(spyCountersManager).free(captor.getValue());
}
|
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"})
void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas,
MigrationDecisionCallback callback) {
assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: "
+ Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas);
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas));
logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas));
}
initState(oldReplicas);
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
// fix cyclic partition replica movements
if (fixCycle(oldReplicas, newReplicas)) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId,
Arrays.toString(newReplicas));
}
}
int currentIndex = 0;
while (currentIndex < oldReplicas.length) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex,
Arrays.toString(state));
}
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
if (newReplicas[currentIndex] == null) {
if (state[currentIndex] != null) {
// replica owner is removed and no one will own this replica
logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1);
state[currentIndex] = null;
}
currentIndex++;
continue;
}
if (state[currentIndex] == null) {
int i = getReplicaIndex(state, newReplicas[currentIndex]);
if (i == -1) {
// fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner
logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (i > currentIndex) {
// SHIFT UP replica from i to currentIndex, copy data from partition owner
logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId,
state[i], i, currentIndex);
callback.migrate(null, -1, -1, state[i], i, currentIndex);
state[currentIndex] = state[i];
state[i] = null;
continue;
}
throw new AssertionError("partitionId=" + partitionId
+ "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas)
+ ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas));
}
if (newReplicas[currentIndex].equals(state[currentIndex])) {
// no change, no action needed
currentIndex++;
continue;
}
if (getReplicaIndex(newReplicas, state[currentIndex]) == -1
&& getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
// MOVE partition replica from its old owner to new owner
logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
int newIndex = getReplicaIndex(newReplicas, state[currentIndex]);
assert newIndex > currentIndex : "partitionId=" + partitionId
+ ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: "
+ Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state)
+ ", FINAL: " + Arrays.toString(newReplicas);
if (state[newIndex] == null) {
// it is a SHIFT DOWN
logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId,
state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex);
state[newIndex] = state[currentIndex];
} else {
logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
}
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex);
}
assert Arrays.equals(state, newReplicas)
: "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas)
+ " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas);
}
|
@Test
public void test_SHIFT_DOWN_withNullNonNullKeepReplicaIndex() throws UnknownHostException {
final PartitionReplica[] oldReplicas = {
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
new PartitionReplica(new Address("localhost", 5702), uuids[1]),
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
null,
null,
null,
null,
};
final PartitionReplica[] newReplicas = {
new PartitionReplica(new Address("localhost", 5704), uuids[3]),
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
null,
null,
null,
null,
};
migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5701), uuids[0]), 0, -1, new PartitionReplica(new Address("localhost", 5704), uuids[3]), -1, 0);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5702), uuids[1]), 1, -1, new PartitionReplica(new Address("localhost", 5701), uuids[0]), -1, 1);
}
|
@RequestMapping(value = "", method = RequestMethod.POST)
public ResponseEntity<String> postCluster(@RequestParam(required = false) String product,
@RequestParam(required = false) String cluster, @RequestParam(name = "ips") String ips) {
//1. prepare the storage name for product and cluster
String productName = addressServerGeneratorManager.generateProductName(product);
String clusterName = addressServerManager.getDefaultClusterNameIfEmpty(cluster);
//2. prepare the response name for product and cluster to client
String rawProductName = addressServerManager.getRawProductName(product);
String rawClusterName = addressServerManager.getRawClusterName(cluster);
Loggers.ADDRESS_LOGGER.info("put cluster node,the cluster name is " + cluster + "; the product name=" + product
+ "; the ip list=" + ips);
ResponseEntity<String> responseEntity;
try {
String serviceName = addressServerGeneratorManager.generateNacosServiceName(productName);
Result result = registerCluster(serviceName, rawProductName, clusterName, ips);
if (InternetAddressUtil.checkOK(result.getCheckResult())) {
responseEntity = ResponseEntity
.ok("product=" + rawProductName + ",cluster=" + rawClusterName + "; put success with size="
+ result.getSize());
} else {
responseEntity = ResponseEntity.status(HttpStatus.BAD_REQUEST).body(result.getCheckResult());
}
} catch (Exception e) {
responseEntity = ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(e.getMessage());
}
return responseEntity;
}
|
@Test
void testPostCluster() throws Exception {
mockMvc.perform(post("/nacos/v1/as/nodes").param("product", "default").param("cluster", "serverList")
.param("ips", "192.168.3.1,192.168.3.2")).andExpect(status().isOk());
}
|
public static void getSemanticPropsSingleFromString(
SingleInputSemanticProperties result,
String[] forwarded,
String[] nonForwarded,
String[] readSet,
TypeInformation<?> inType,
TypeInformation<?> outType) {
getSemanticPropsSingleFromString(
result, forwarded, nonForwarded, readSet, inType, outType, false);
}
|
@Test
void testForwardedInvalidTargetFieldType2() {
String[] forwardedFields = {"f2.*->*"};
SingleInputSemanticProperties sp = new SingleInputSemanticProperties();
assertThatThrownBy(
() ->
SemanticPropUtil.getSemanticPropsSingleFromString(
sp,
forwardedFields,
null,
null,
pojoInTupleType,
pojo2Type))
.isInstanceOf(InvalidSemanticAnnotationException.class);
}
|
public void reportMeasurement(final long durationNs)
{
if (!maxCycleTime.isClosed())
{
maxCycleTime.proposeMaxOrdered(durationNs);
if (durationNs > cycleTimeThresholdNs)
{
cycleTimeThresholdExceededCount.incrementOrdered();
}
}
}
|
@Test
void reportMeasurementIsANoOpIfMaxCounterIsClosed()
{
final AtomicCounter maxCycleTime = mock(AtomicCounter.class);
when(maxCycleTime.isClosed()).thenReturn(true);
final AtomicCounter cycleTimeThresholdExceededCount = mock(AtomicCounter.class);
final DutyCycleStallTracker dutyCycleStallTracker =
new DutyCycleStallTracker(maxCycleTime, cycleTimeThresholdExceededCount, 1);
dutyCycleStallTracker.reportMeasurement(555);
verify(maxCycleTime).isClosed();
verifyNoMoreInteractions(maxCycleTime);
verifyNoInteractions(cycleTimeThresholdExceededCount);
}
|
@Override
public KeyValueIterator<K, V> reverseAll() {
final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() {
@Override
public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) {
try {
return store.reverseAll();
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata.");
}
}
};
final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType);
return new DelegatingPeekingKeyValueIterator<>(
storeName,
new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction));
}
|
@Test
public void shouldThrowInvalidStoreExceptionOnReverseAllDuringRebalance() {
assertThrows(InvalidStateStoreException.class, () -> rebalancing().reverseAll());
}
|
@Override
public void executeUpdate(final UnregisterStorageUnitStatement sqlStatement, final ContextManager contextManager) {
if (!sqlStatement.isIfExists()) {
checkExisted(sqlStatement.getStorageUnitNames());
}
checkInUsed(sqlStatement);
try {
contextManager.getPersistServiceFacade().getMetaDataManagerPersistService().unregisterStorageUnits(database.getName(), sqlStatement.getStorageUnitNames());
} catch (final SQLException | ShardingSphereServerException ex) {
throw new StorageUnitsOperateException("unregister", sqlStatement.getStorageUnitNames(), ex);
}
}
|
@Test
void assertExecuteUpdateSuccess() throws SQLException {
when(database.getRuleMetaData().getInUsedStorageUnitNameAndRulesMap()).thenReturn(Collections.emptyMap());
UnregisterStorageUnitStatement sqlStatement = new UnregisterStorageUnitStatement(Collections.singleton("foo_ds"), false, false);
executor.executeUpdate(sqlStatement, contextManager);
verify(metaDataManagerPersistService).unregisterStorageUnits("foo_db", sqlStatement.getStorageUnitNames());
}
|
void appendMergeClause(StringBuilder sb) {
sb.append("MERGE INTO ");
dialect.quoteIdentifier(sb, jdbcTable.getExternalNameList());
sb.append(' ');
appendFieldNames(sb, jdbcTable.dbFieldNames());
}
|
@Test
void appendMergeClause() {
H2UpsertQueryBuilder builder = new H2UpsertQueryBuilder(table, dialect);
StringBuilder sb = new StringBuilder();
builder.appendMergeClause(sb);
String mergeClause = sb.toString();
assertThat(mergeClause).isEqualTo("MERGE INTO \"table1\" (\"field1\",\"field2\")");
}
|
@Override
public RelativeRange apply(final Period period) {
if (period != null) {
return RelativeRange.Builder.builder()
.from(period.withYears(0).withMonths(0).plusDays(period.getYears() * 365).plusDays(period.getMonths() * 30).toStandardSeconds().getSeconds())
.build();
} else {
return null;
}
}
|
@Test
void testMixedPeriodConversion() {
final RelativeRange result = converter.apply(Period.hours(1).plusMinutes(10).plusSeconds(7));
verifyResult(result, 4207);
}
|
@Udf
public <T> List<T> except(
@UdfParameter(description = "Array of values") final List<T> left,
@UdfParameter(description = "Array of exceptions") final List<T> right) {
if (left == null || right == null) {
return null;
}
final Set<T> distinctRightValues = new HashSet<>(right);
final Set<T> distinctLeftValues = new LinkedHashSet<>(left);
return distinctLeftValues
.stream()
.filter(e -> !distinctRightValues.contains(e))
.collect(Collectors.toList());
}
|
@Test
public void shouldDistinctValuesForEmptyExceptionArray() {
final List<String> input1 = Arrays.asList("foo", "foo", "bar", "foo");
final List<String> input2 = Arrays.asList();
final List<String> result = udf.except(input1, input2);
assertThat(result, contains("foo", "bar"));
}
|
@Udf
public boolean check(@UdfParameter(description = "The input JSON string") final String input) {
if (input == null) {
return false;
}
try {
return !UdfJsonMapper.parseJson(input).isMissingNode();
} catch (KsqlFunctionException e) {
return false;
}
}
|
@Test
public void shouldNotInterpretNull() {
assertFalse(udf.check(null));
}
|
@Override
public ObjectNode encode(Criterion criterion, CodecContext context) {
EncodeCriterionCodecHelper encoder = new EncodeCriterionCodecHelper(criterion, context);
return encoder.encode();
}
|
@Test
public void matchIPSrcTest() {
Criterion criterion = Criteria.matchIPSrc(ipPrefix4);
ObjectNode result = criterionCodec.encode(criterion, context);
assertThat(result, matchesCriterion(criterion));
}
|
@Override
protected void registerMetadata(final MetaDataRegisterDTO metaDataDTO) {
MetaDataService metaDataService = getMetaDataService();
MetaDataDO exist = metaDataService.findByPath(metaDataDTO.getPath());
metaDataService.saveOrUpdateMetaData(exist, metaDataDTO);
}
|
@Test
public void testRegisterMetadata() {
MetaDataDO metaDataDO = MetaDataDO.builder().build();
when(metaDataService.findByPath(any())).thenReturn(metaDataDO);
MetaDataRegisterDTO metaDataDTO = MetaDataRegisterDTO.builder().build();
shenyuClientRegisterMotanService.registerMetadata(metaDataDTO);
verify(metaDataService).saveOrUpdateMetaData(metaDataDO, metaDataDTO);
}
|
@Override
public Collection<SchemaMetaData> load(final MetaDataLoaderMaterial material) throws SQLException {
try (Connection connection = material.getDataSource().getConnection()) {
Collection<String> schemaNames = SchemaMetaDataLoader.loadSchemaNames(connection, TypedSPILoader.getService(DatabaseType.class, "PostgreSQL"));
Map<String, Multimap<String, IndexMetaData>> schemaIndexMetaDataMap = loadIndexMetaDataMap(connection, schemaNames);
Map<String, Multimap<String, ColumnMetaData>> schemaColumnMetaDataMap = loadColumnMetaDataMap(connection, material.getActualTableNames(), schemaNames);
Map<String, Multimap<String, ConstraintMetaData>> schemaConstraintMetaDataMap = loadConstraintMetaDataMap(connection, schemaNames);
Map<String, Collection<String>> schemaViewNames = loadViewNames(connection, schemaNames, material.getActualTableNames());
Collection<SchemaMetaData> result = new LinkedList<>();
for (String each : schemaNames) {
Multimap<String, IndexMetaData> tableIndexMetaDataMap = schemaIndexMetaDataMap.getOrDefault(each, LinkedHashMultimap.create());
Multimap<String, ColumnMetaData> tableColumnMetaDataMap = schemaColumnMetaDataMap.getOrDefault(each, LinkedHashMultimap.create());
Multimap<String, ConstraintMetaData> tableConstraintMetaDataMap = schemaConstraintMetaDataMap.getOrDefault(each, LinkedHashMultimap.create());
Collection<String> viewNames = schemaViewNames.getOrDefault(each, Collections.emptySet());
result.add(new SchemaMetaData(each, createTableMetaDataList(tableIndexMetaDataMap, tableColumnMetaDataMap, tableConstraintMetaDataMap, viewNames)));
}
return result;
}
}
|
@Test
void assertLoadWithoutTables() throws SQLException {
DataSource dataSource = mockDataSource();
ResultSet schemaResultSet = mockSchemaMetaDataResultSet();
when(dataSource.getConnection().getMetaData().getSchemas()).thenReturn(schemaResultSet);
ResultSet tableResultSet = mockTableMetaDataResultSet();
when(dataSource.getConnection().prepareStatement(TABLE_META_DATA_SQL_WITHOUT_TABLES).executeQuery()).thenReturn(tableResultSet);
ResultSet primaryKeyResultSet = mockPrimaryKeyMetaDataResultSet();
when(dataSource.getConnection().prepareStatement(PRIMARY_KEY_META_DATA_SQL).executeQuery()).thenReturn(primaryKeyResultSet);
ResultSet indexResultSet = mockIndexMetaDataResultSet();
when(dataSource.getConnection().prepareStatement(BASIC_INDEX_META_DATA_SQL).executeQuery()).thenReturn(indexResultSet);
ResultSet advanceIndexResultSet = mockAdvanceIndexMetaDataResultSet();
when(dataSource.getConnection().prepareStatement(ADVANCE_INDEX_META_DATA_SQL).executeQuery()).thenReturn(advanceIndexResultSet);
ResultSet constraintResultSet = mockConstraintMetaDataResultSet();
when(dataSource.getConnection().prepareStatement(BASIC_CONSTRAINT_META_DATA_SQL).executeQuery()).thenReturn(constraintResultSet);
ResultSet roleTableGrantsResultSet = mockRoleTableGrantsResultSet();
when(dataSource.getConnection().prepareStatement(startsWith(LOAD_ALL_ROLE_TABLE_GRANTS_SQL)).executeQuery()).thenReturn(roleTableGrantsResultSet);
assertTableMetaDataMap(getDialectTableMetaDataLoader().load(new MetaDataLoaderMaterial(Collections.emptyList(), dataSource, new PostgreSQLDatabaseType(), "sharding_db")));
}
|
public ConfigTransformerResult transform(Map<String, String> configs) {
Map<String, Map<String, Set<String>>> keysByProvider = new HashMap<>();
Map<String, Map<String, Map<String, String>>> lookupsByProvider = new HashMap<>();
// Collect the variables from the given configs that need transformation
for (Map.Entry<String, String> config : configs.entrySet()) {
if (config.getValue() != null) {
List<ConfigVariable> configVars = getVars(config.getValue(), DEFAULT_PATTERN);
for (ConfigVariable configVar : configVars) {
Map<String, Set<String>> keysByPath = keysByProvider.computeIfAbsent(configVar.providerName, k -> new HashMap<>());
Set<String> keys = keysByPath.computeIfAbsent(configVar.path, k -> new HashSet<>());
keys.add(configVar.variable);
}
}
}
// Retrieve requested variables from the ConfigProviders
Map<String, Long> ttls = new HashMap<>();
for (Map.Entry<String, Map<String, Set<String>>> entry : keysByProvider.entrySet()) {
String providerName = entry.getKey();
ConfigProvider provider = configProviders.get(providerName);
Map<String, Set<String>> keysByPath = entry.getValue();
if (provider != null && keysByPath != null) {
for (Map.Entry<String, Set<String>> pathWithKeys : keysByPath.entrySet()) {
String path = pathWithKeys.getKey();
Set<String> keys = new HashSet<>(pathWithKeys.getValue());
ConfigData configData = provider.get(path, keys);
Map<String, String> data = configData.data();
Long ttl = configData.ttl();
if (ttl != null && ttl >= 0) {
ttls.put(path, ttl);
}
Map<String, Map<String, String>> keyValuesByPath =
lookupsByProvider.computeIfAbsent(providerName, k -> new HashMap<>());
keyValuesByPath.put(path, data);
}
}
}
// Perform the transformations by performing variable replacements
Map<String, String> data = new HashMap<>(configs);
for (Map.Entry<String, String> config : configs.entrySet()) {
data.put(config.getKey(), replace(lookupsByProvider, config.getValue(), DEFAULT_PATTERN));
}
return new ConfigTransformerResult(data, ttls);
}
|
@Test
public void testReplaceMultipleVariablesWithoutPathInValue() {
ConfigTransformerResult result = configTransformer.transform(Collections.singletonMap(MY_KEY, "first ${test:testKey}; second ${test:testKey}"));
Map<String, String> data = result.data();
assertEquals("first testResultNoPath; second testResultNoPath", data.get(MY_KEY));
}
|
MethodSpec buildFunction(AbiDefinition functionDefinition) throws ClassNotFoundException {
return buildFunction(functionDefinition, true);
}
|
@Test
public void testBuildFunctionConstantMultiDynamicArrayRawListReturn() throws Exception {
AbiDefinition functionDefinition =
new AbiDefinition(
true,
Arrays.asList(new NamedType("param", "uint8[][]")),
"functionName",
Arrays.asList(new NamedType("result", "address[]")),
"type",
false);
MethodSpec methodSpec = solidityFunctionWrapper.buildFunction(functionDefinition);
String expected =
"public org.web3j.protocol.core.RemoteFunctionCall<java.util.List> functionName(\n"
+ " java.util.List<java.util.List<java.math.BigInteger>> param) {\n"
+ " final org.web3j.abi.datatypes.Function function = new org.web3j.abi.datatypes.Function(FUNC_FUNCTIONNAME, \n"
+ " java.util.Arrays.<org.web3j.abi.datatypes.Type>asList(new org.web3j.abi.datatypes.DynamicArray<org.web3j.abi.datatypes.DynamicArray>(\n"
+ " org.web3j.abi.datatypes.DynamicArray.class,\n"
+ " org.web3j.abi.Utils.typeMap(param, org.web3j.abi.datatypes.DynamicArray.class,\n"
+ " org.web3j.abi.datatypes.generated.Uint8.class))), \n"
+ " java.util.Arrays.<org.web3j.abi.TypeReference<?>>asList(new org.web3j.abi.TypeReference<org.web3j.abi.datatypes.DynamicArray<org.web3j.abi.datatypes.Address>>() {}));\n"
+ " return new org.web3j.protocol.core.RemoteFunctionCall<java.util.List>(function,\n"
+ " new java.util.concurrent.Callable<java.util.List>() {\n"
+ " @java.lang.Override\n"
+ " @java.lang.SuppressWarnings(\"unchecked\")\n"
+ " public java.util.List call() throws java.lang.Exception {\n"
+ " java.util.List<org.web3j.abi.datatypes.Type> result = (java.util.List<org.web3j.abi.datatypes.Type>) executeCallSingleValueReturn(function, java.util.List.class);\n"
+ " return convertToNative(result);\n"
+ " }\n"
+ " });\n"
+ "}\n";
assertEquals((expected), methodSpec.toString());
}
|
@Override
public void execute(Context context) {
try (StreamWriter<ProjectDump.Plugin> writer = dumpWriter.newStreamWriter(DumpElement.PLUGINS)) {
Collection<PluginInfo> plugins = pluginRepository.getPluginInfos();
for (PluginInfo plugin : plugins) {
ProjectDump.Plugin.Builder builder = ProjectDump.Plugin.newBuilder();
writer.write(convert(plugin, builder));
}
LoggerFactory.getLogger(getClass()).debug("{} plugins exported", plugins.size());
}
}
|
@Test
public void export_zero_plugins() {
when(pluginRepository.getPluginInfos()).thenReturn(Collections.emptyList());
underTest.execute(new TestComputationStepContext());
assertThat(dumpWriter.getWrittenMessagesOf(DumpElement.PLUGINS)).isEmpty();
assertThat(logTester.logs(Level.DEBUG)).contains("0 plugins exported");
}
|
@Bean
public ShenyuClientRegisterRepository shenyuClientRegisterRepository(final ShenyuRegisterCenterConfig config) {
return ShenyuClientRegisterRepositoryFactory.newInstance(config);
}
|
@Test
public void testShenyuClientRegisterRepository() {
MockedStatic<RegisterUtils> registerUtilsMockedStatic = mockStatic(RegisterUtils.class);
registerUtilsMockedStatic.when(() -> RegisterUtils.doLogin(any(), any(), any())).thenReturn(Optional.ofNullable("token"));
applicationContextRunner.run(context -> {
ShenyuClientRegisterRepository repository = context.getBean("shenyuClientRegisterRepository", ShenyuClientRegisterRepository.class);
assertNotNull(repository);
});
registerUtilsMockedStatic.close();
}
|
private List<Configserver> getConfigServers(DeployState deployState, TreeConfigProducer<AnyConfigProducer> parent, Element adminE) {
Element configserversE = XML.getChild(adminE, "configservers");
if (configserversE == null) {
Element adminserver = XML.getChild(adminE, "adminserver");
if (adminserver == null) {
return createSingleConfigServer(deployState, parent);
} else {
SimpleConfigProducer<AnyConfigProducer> configServers = new SimpleConfigProducer<>(parent, "configservers");
return List.of(new ConfigserverBuilder(0, configServerSpecs).build(deployState, configServers, adminserver));
}
}
else {
SimpleConfigProducer<AnyConfigProducer> configServers = new SimpleConfigProducer<>(parent, "configservers");
List<Configserver> configservers = new ArrayList<>();
int i = 0;
for (Element configserverE : XML.getChildren(configserversE, "configserver"))
configservers.add(new ConfigserverBuilder(i++, configServerSpecs).build(deployState, configServers, configserverE));
return configservers;
}
}
|
@Test
void adminWithConfigserversElement() {
Admin admin = buildAdmin(servicesConfigservers());
assertEquals(1, admin.getConfigservers().size());
}
|
public void setContract(@Nullable Produce contract)
{
this.contract = contract;
setStoredContract(contract);
handleContractState();
}
|
@Test
public void cabbageContractOnionHarvestableAndPotatoHarvestable()
{
final long unixNow = Instant.now().getEpochSecond();
// Get the two allotment patches
final FarmingPatch patch1 = farmingGuildPatches.get(Varbits.FARMING_4773);
final FarmingPatch patch2 = farmingGuildPatches.get(Varbits.FARMING_4774);
assertNotNull(patch1);
assertNotNull(patch2);
// Specify the two allotment patches
when(farmingTracker.predictPatch(patch1))
.thenReturn(new PatchPrediction(Produce.ONION, CropState.HARVESTABLE, unixNow, 3, 3));
when(farmingTracker.predictPatch(patch2))
.thenReturn(new PatchPrediction(Produce.POTATO, CropState.HARVESTABLE, unixNow, 3, 3));
farmingContractManager.setContract(Produce.CABBAGE);
assertEquals(SummaryState.OCCUPIED, farmingContractManager.getSummary());
}
|
public void addObjectSink(final ObjectSink objectSink) {
this.sink = this.sink.addObjectSink( objectSink, this.alphaNodeHashingThreshold, this.alphaNodeRangeIndexThreshold );
}
|
@Test
public void testAddObjectSink() throws Exception {
final MockObjectSource source = new MockObjectSource( 15 );
// We need to re-assign this var each time the sink changes references
final Field field = ObjectSource.class.getDeclaredField( "sink" );
field.setAccessible( true );
ObjectSinkPropagator sink = (ObjectSinkPropagator) field.get( source );
assertThat(sink).isSameAs(EmptyObjectSinkAdapter.getInstance());
final MockObjectSink sink1 = new MockObjectSink();
source.addObjectSink( sink1 );
sink = (ObjectSinkPropagator) field.get( source );
assertThat(sink).isInstanceOf(SingleObjectSinkAdapter.class);
assertThat(sink.getSinks()).hasSize(1);
final MockObjectSink sink2 = new MockObjectSink();
source.addObjectSink( sink2 );
sink = (ObjectSinkPropagator) field.get( source );
assertThat(sink).isInstanceOf(CompositeObjectSinkAdapter.class);
assertThat(sink.getSinks()).hasSize(2);
final MockObjectSink sink3 = new MockObjectSink();
source.addObjectSink( sink3 );
assertThat(sink).isInstanceOf(CompositeObjectSinkAdapter.class);
assertThat(sink.getSinks()).hasSize(3);
source.removeObjectSink( sink2 );
assertThat(sink).isInstanceOf(CompositeObjectSinkAdapter.class);
assertThat(sink.getSinks()).hasSize(2);
source.removeObjectSink( sink1 );
sink = (ObjectSinkPropagator) field.get( source );
assertThat(sink).isInstanceOf(SingleObjectSinkAdapter.class);
assertThat(sink.getSinks()).hasSize(1);
source.removeObjectSink( sink3 );
sink = (ObjectSinkPropagator) field.get( source );
assertThat(sink).isSameAs(EmptyObjectSinkAdapter.getInstance());
assertThat(sink.getSinks()).hasSize(0);
}
|
public static List<String> shellSplit(CharSequence string) {
List<String> tokens = new ArrayList<>();
if ( string == null ) {
return tokens;
}
boolean escaping = false;
char quoteChar = ' ';
boolean quoting = false;
StringBuilder current = new StringBuilder() ;
for (int i = 0; i<string.length(); i++) {
char c = string.charAt(i);
if (escaping) {
current.append(c);
escaping = false;
} else if (c == '\\' && !(quoting && quoteChar == '\'')) {
escaping = true;
} else if (quoting && c == quoteChar) {
quoting = false;
} else if (!quoting && (c == '\'' || c == '"')) {
quoting = true;
quoteChar = c;
} else if (!quoting && Character.isWhitespace(c)) {
if (current.length() > 0) {
tokens.add(current.toString());
current = new StringBuilder();
}
} else {
current.append(c);
}
}
if (current.length() > 0) {
tokens.add(current.toString());
}
return tokens;
}
|
@Test
public void whitespacesOnlyYeildsEmptyArgs() {
assertTrue(StringUtils.shellSplit(" \t \n").isEmpty());
}
|
public static ConfigurableResource parseResourceConfigValue(String value)
throws AllocationConfigurationException {
return parseResourceConfigValue(value, Long.MAX_VALUE);
}
|
@Test
public void testMemoryPercentageCpuAbsolute() throws Exception {
expectMissingResource("cpu");
parseResourceConfigValue("50% memory, 2 vcores");
}
|
public void logOnNewLeadershipTerm(
final int memberId,
final long logLeadershipTermId,
final long nextLeadershipTermId,
final long nextTermBaseLogPosition,
final long nextLogPosition,
final long leadershipTermId,
final long termBaseLogPosition,
final long logPosition,
final long leaderRecordingId,
final long timestamp,
final int leaderId,
final int logSessionId,
final int appVersion,
final boolean isStartup)
{
final int length = newLeaderShipTermLength();
final int captureLength = captureLength(length);
final int encodedLength = encodedLength(captureLength);
final ManyToOneRingBuffer ringBuffer = this.ringBuffer;
final int index = ringBuffer.tryClaim(NEW_LEADERSHIP_TERM.toEventCodeId(), encodedLength);
if (index > 0)
{
try
{
encodeOnNewLeadershipTerm(
(UnsafeBuffer)ringBuffer.buffer(),
index,
captureLength,
length,
memberId,
logLeadershipTermId,
nextLeadershipTermId,
nextTermBaseLogPosition,
nextLogPosition,
leadershipTermId,
termBaseLogPosition,
logPosition,
leaderRecordingId,
timestamp,
leaderId,
logSessionId,
appVersion,
isStartup);
}
finally
{
ringBuffer.commit(index);
}
}
}
|
@Test
void logOnNewLeadershipTerm()
{
final int offset = align(22, ALIGNMENT);
logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset);
final long logLeadershipTermId = 434;
final long nextLeadershipTermId = 2561;
final long nextTermBaseLogPosition = 2562;
final long nextLogPosition = 2563;
final long leadershipTermId = -500;
final long logPosition = 43;
final long timestamp = 2;
final int memberId = 19;
final int leaderId = -1;
final int logSessionId = 3;
final int appVersion = SemanticVersion.compose(0, 3, 9);
final int captureLength = newLeaderShipTermLength();
final boolean isStartup = true;
final long termBaseLogPosition = 982734;
final long leaderRecordingId = 76434;
logger.logOnNewLeadershipTerm(
memberId,
logLeadershipTermId,
nextLeadershipTermId,
nextTermBaseLogPosition,
nextLogPosition,
leadershipTermId,
termBaseLogPosition,
logPosition,
leaderRecordingId,
timestamp,
leaderId,
logSessionId,
appVersion,
isStartup);
verifyLogHeader(logBuffer, offset, NEW_LEADERSHIP_TERM.toEventCodeId(), captureLength, captureLength);
int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH;
assertEquals(logLeadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(nextLeadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(nextTermBaseLogPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(nextLogPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(leadershipTermId, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(termBaseLogPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(logPosition, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(leaderRecordingId, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(timestamp, logBuffer.getLong(index, LITTLE_ENDIAN));
index += SIZE_OF_LONG;
assertEquals(memberId, logBuffer.getInt(index, LITTLE_ENDIAN));
index += SIZE_OF_INT;
assertEquals(leaderId, logBuffer.getInt(index, LITTLE_ENDIAN));
index += SIZE_OF_INT;
assertEquals(logSessionId, logBuffer.getInt(index, LITTLE_ENDIAN));
index += SIZE_OF_INT;
assertEquals(appVersion, logBuffer.getInt(index, LITTLE_ENDIAN));
index += SIZE_OF_INT;
assertEquals(isStartup, 1 == logBuffer.getByte(index));
final StringBuilder sb = new StringBuilder();
ClusterEventDissector.dissectNewLeadershipTerm(logBuffer, encodedMsgOffset(offset), sb);
final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: NEW_LEADERSHIP_TERM " +
"\\[89/89]: memberId=19 logLeadershipTermId=434 nextLeadershipTermId=2561 " +
"nextTermBaseLogPosition=2562 nextLogPosition=2563 leadershipTermId=-500 termBaseLogPosition=982734 " +
"logPosition=43 leaderRecordingId=76434 timestamp=2 leaderId=-1 logSessionId=3 appVersion=0.3.9 " +
"isStartup=true";
assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern));
}
|
public List<String> build() {
return switch (dialect.getId()) {
case Oracle.ID -> forOracle(tableName);
case H2.ID, PostgreSql.ID -> singletonList("drop table if exists " + tableName);
case MsSql.ID ->
// "if exists" is supported only since MSSQL 2016.
singletonList("drop table " + tableName);
default -> throw new IllegalStateException("Unsupported DB: " + dialect.getId());
};
}
|
@Test
public void drop_tables_on_h2() {
assertThat(new DropTableBuilder(new H2(), "issues")
.build()).containsOnly("drop table if exists issues");
}
|
public static Method getGenericAccessor(Class<?> clazz, String field) {
LOG.trace( "getGenericAccessor({}, {})", clazz, field );
AccessorCacheKey accessorCacheKey =
new AccessorCacheKey( clazz.getClassLoader(), clazz.getCanonicalName(), field );
return accessorCache.computeIfAbsent(accessorCacheKey, key ->
Stream.of( clazz.getMethods() )
.filter( m -> Optional.ofNullable( m.getAnnotation( FEELProperty.class ) )
.map( ann -> ann.value().equals( field ) )
.orElse( false )
)
.findFirst()
.orElse( getAccessor( clazz, field ) ));
}
|
@Test
void getGenericAccessor() throws NoSuchMethodException {
Method expectedAccessor = TestPojo.class.getMethod("getAProperty");
assertThat(EvalHelper.getGenericAccessor(TestPojo.class, "aProperty")).as("getGenericAccessor should work on Java bean accessors.").isEqualTo(expectedAccessor);
assertThat(EvalHelper.getGenericAccessor(TestPojo.class, "feelPropertyIdentifier")).as("getGenericAccessor should work for methods annotated with '@FEELProperty'.").isEqualTo(expectedAccessor);
}
|
@Override
public AttributeResource getAttrValue(ResName resName) {
AttributeResource attributeResource = items.get(resName);
// This hack allows us to look up attributes from downstream dependencies, see comment in
// org.robolectric.shadows.ShadowThemeTest.obtainTypedArrayFromDependencyLibrary()
// for an explanation. TODO(jongerrish): Make Robolectric use a more realistic resource merging
// scheme.
if (attributeResource == null
&& !"android".equals(resName.packageName)
&& !"android".equals(packageName)) {
attributeResource = items.get(resName.withPackageName(packageName));
if (attributeResource != null && !"android".equals(attributeResource.contextPackageName)) {
attributeResource =
new AttributeResource(resName, attributeResource.value, resName.packageName);
}
}
return attributeResource;
}
|
@Test
public void getAttrValue_willReturnTrimmedAttributeValues() throws Exception {
StyleData styleData =
new StyleData(
"library.resource",
"Theme_MyApp",
"Theme_Material",
asList(
new AttributeResource(myLibSearchViewStyle, "\n lib_value ", "library.resource")));
assertThat(styleData.getAttrValue(myAppSearchViewStyle).value).isEqualTo("\n lib_value ");
assertThat(styleData.getAttrValue(myLibSearchViewStyle).trimmedValue).isEqualTo("lib_value");
}
|
@Override
public void onStateElection(Job job, JobState newState) {
if (isNotFailed(newState) || isJobNotFoundException(newState) || isProblematicExceptionAndMustNotRetry(newState) || maxAmountOfRetriesReached(job))
return;
job.scheduleAt(now().plusSeconds(getSecondsToAdd(job)), String.format("Retry %d of %d", getFailureCount(job), getMaxNumberOfRetries(job)));
}
|
@Test
void retryFilterDoesNotScheduleJobAgainIfTheExceptionIsProblematic() {
final Job job = aFailedJob().withState(new FailedState("a message", problematicConfigurationException("big problem"))).build();
applyDefaultJobFilter(job);
int beforeVersion = job.getJobStates().size();
retryFilter.onStateElection(job, job.getJobState());
int afterVersion = job.getJobStates().size();
assertThat(afterVersion).isEqualTo(beforeVersion);
assertThat(job.getState()).isEqualTo(FAILED);
}
|
public static String SHA256(String data) {
return SHA256(data.getBytes());
}
|
@Test
public void testSHA256() throws Exception {
String biezhiSHA256 = "8fcbefd5c7a6c81165f587e46bffd821214a6fc1bc3842309f3aef6938e627a7";
Assert.assertEquals(
biezhiSHA256,
EncryptKit.SHA256("biezhi")
);
Assert.assertEquals(
biezhiSHA256,
EncryptKit.SHA256("biezhi".getBytes())
);
TestCase.assertTrue(
Arrays.equals(
ConvertKit.hexString2Bytes(biezhiSHA256),
EncryptKit.SHA256ToByte("biezhi".getBytes())
)
);
}
|
public void serialize()
throws KettleException {
String xml = MetaXmlSerializer.serialize(
from( stepMetaInterface ) ); //.encryptedFields( encryptedFields ) );
rep.saveStepAttribute( idTrans, idStep, REPO_TAG, xml );
}
|
@Test
public void testSerialize() throws KettleException {
String serialized = serialize( from( stepMeta ) );
RepoSerializer
.builder()
.repo( repo )
.stepId( stepId )
.transId( transId )
.stepMeta( stepMeta )
.serialize();
verify( repo, times( 1 ) )
.saveStepAttribute( transId, stepId, "step-xml", serialized );
}
|
@Nullable
@Override
public Message decode(@Nonnull final RawMessage rawMessage) {
final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress());
final String json = gelfMessage.getJSON(decompressSizeLimit, charset);
final JsonNode node;
try {
node = objectMapper.readTree(json);
if (node == null) {
throw new IOException("null result");
}
} catch (final Exception e) {
log.error("Could not parse JSON, first 400 characters: " +
StringUtils.abbreviate(json, 403), e);
throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e);
}
try {
validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress());
} catch (IllegalArgumentException e) {
log.trace("Invalid GELF message <{}>", node);
throw e;
}
// Timestamp.
final double messageTimestamp = timestampValue(node);
final DateTime timestamp;
if (messageTimestamp <= 0) {
timestamp = rawMessage.getTimestamp();
} else {
// we treat this as a unix timestamp
timestamp = Tools.dateTimeFromDouble(messageTimestamp);
}
final Message message = messageFactory.createMessage(
stringValue(node, "short_message"),
stringValue(node, "host"),
timestamp
);
message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message"));
final String file = stringValue(node, "file");
if (file != null && !file.isEmpty()) {
message.addField("file", file);
}
final long line = longValue(node, "line");
if (line > -1) {
message.addField("line", line);
}
// Level is set by server if not specified by client.
final int level = intValue(node, "level");
if (level > -1) {
message.addField("level", level);
}
// Facility is set by server if not specified by client.
final String facility = stringValue(node, "facility");
if (facility != null && !facility.isEmpty()) {
message.addField("facility", facility);
}
// Add additional data if there is some.
final Iterator<Map.Entry<String, JsonNode>> fields = node.fields();
while (fields.hasNext()) {
final Map.Entry<String, JsonNode> entry = fields.next();
String key = entry.getKey();
// Do not index useless GELF "version" field.
if ("version".equals(key)) {
continue;
}
// Don't include GELF syntax underscore in message field key.
if (key.startsWith("_") && key.length() > 1) {
key = key.substring(1);
}
// We already set short_message and host as message and source. Do not add as fields again.
if ("short_message".equals(key) || "host".equals(key)) {
continue;
}
// Skip standard or already set fields.
if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) {
continue;
}
// Convert JSON containers to Strings, and pick a suitable number representation.
final JsonNode value = entry.getValue();
final Object fieldValue;
if (value.isContainerNode()) {
fieldValue = value.toString();
} else if (value.isFloatingPointNumber()) {
fieldValue = value.asDouble();
} else if (value.isIntegralNumber()) {
fieldValue = value.asLong();
} else if (value.isNull()) {
log.debug("Field [{}] is NULL. Skipping.", key);
continue;
} else if (value.isTextual()) {
fieldValue = value.asText();
} else {
log.debug("Field [{}] has unknown value type. Skipping.", key);
continue;
}
message.addField(key, fieldValue);
}
return message;
}
|
@Test
public void decodeFailsWithBlankShortMessage() throws Exception {
final String json = "{"
+ "\"version\": \"1.1\","
+ "\"host\": \"example.org\","
+ "\"short_message\": \" \""
+ "}";
final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8));
assertThatIllegalArgumentException().isThrownBy(() -> codec.decode(rawMessage))
.withNoCause()
.withMessageMatching("GELF message <[0-9a-f-]+> has empty mandatory \"short_message\" field.");
}
|
@Override
public V setValue(V value) {
throw new UnsupportedOperationException();
}
|
@Test(expected = UnsupportedOperationException.class)
public void givenNewEntry_whenSetValue_thenThrowUnsupportedOperationException() {
CachedQueryEntry<Object, Object> entry = createEntry("key");
entry.setValue(new Object());
}
|
public static long getInitialSeedUniquifier() {
// Use the value set via the setter.
long initialSeedUniquifier = ThreadLocalRandom.initialSeedUniquifier;
if (initialSeedUniquifier != 0) {
return initialSeedUniquifier;
}
synchronized (ThreadLocalRandom.class) {
initialSeedUniquifier = ThreadLocalRandom.initialSeedUniquifier;
if (initialSeedUniquifier != 0) {
return initialSeedUniquifier;
}
// Get the random seed from the generator thread with timeout.
final long timeoutSeconds = 3;
final long deadLine = seedGeneratorStartTime + TimeUnit.SECONDS.toNanos(timeoutSeconds);
boolean interrupted = false;
for (;;) {
final long waitTime = deadLine - System.nanoTime();
try {
final Long seed;
if (waitTime <= 0) {
seed = seedQueue.poll();
} else {
seed = seedQueue.poll(waitTime, TimeUnit.NANOSECONDS);
}
if (seed != null) {
initialSeedUniquifier = seed;
break;
}
} catch (InterruptedException e) {
interrupted = true;
logger.warn("Failed to generate a seed from SecureRandom due to an InterruptedException.");
break;
}
if (waitTime <= 0) {
seedGeneratorThread.interrupt();
logger.warn(
"Failed to generate a seed from SecureRandom within {} seconds. " +
"Not enough entropy?", timeoutSeconds
);
break;
}
}
// Just in case the initialSeedUniquifier is zero or some other constant
initialSeedUniquifier ^= 0x3255ecdc33bae119L; // just a meaningless random number
initialSeedUniquifier ^= Long.reverse(System.nanoTime());
ThreadLocalRandom.initialSeedUniquifier = initialSeedUniquifier;
if (interrupted) {
// Restore the interrupt status because we don't know how to/don't need to handle it here.
Thread.currentThread().interrupt();
// Interrupt the generator thread if it's still running,
// in the hope that the SecureRandom provider raises an exception on interruption.
seedGeneratorThread.interrupt();
}
if (seedGeneratorEndTime == 0) {
seedGeneratorEndTime = System.nanoTime();
}
return initialSeedUniquifier;
}
}
|
@Test
public void getInitialSeedUniquifierPreservesInterrupt() {
try {
Thread.currentThread().interrupt();
assertTrue(Thread.currentThread().isInterrupted(),
"Assert that thread is interrupted before invocation of getInitialSeedUniquifier()");
ThreadLocalRandom.getInitialSeedUniquifier();
assertTrue(Thread.currentThread().isInterrupted(),
"Assert that thread is interrupted after invocation of getInitialSeedUniquifier()");
} finally {
Thread.interrupted(); // clear interrupted status in order to not affect other tests
}
}
|
@Override
public Object getObject(final int columnIndex) throws SQLException {
return mergeResultSet.getValue(columnIndex, Object.class);
}
|
@Test
void assertGetObjectWithBlob() throws SQLException {
Blob result = mock(Blob.class);
when(mergeResultSet.getValue(1, Blob.class)).thenReturn(result);
assertThat(shardingSphereResultSet.getObject(1, Blob.class), is(result));
}
|
@Override
public void removeDevice(DeviceId deviceId) {
checkNotNull(deviceId, DEVICE_ID_NULL);
DeviceEvent event = store.removeDevice(deviceId);
if (event != null) {
log.info("Device {} administratively removed", deviceId);
}
}
|
@Test
public void removeDevice() {
connectDevice(DID1, SW1);
connectDevice(DID2, SW2);
assertEquals("incorrect device count", 2, service.getDeviceCount());
assertEquals("incorrect available device count", 2, service.getAvailableDeviceCount());
admin.removeDevice(DID1);
assertNull("device should not be found", service.getDevice(DID1));
assertNotNull("device should be found", service.getDevice(DID2));
assertEquals("incorrect device count", 1, service.getDeviceCount());
assertEquals("incorrect available device count", 1, service.getAvailableDeviceCount());
}
|
@Override
protected boolean notExist() {
return Stream.of(DefaultPathConstants.PLUGIN_PARENT,
DefaultPathConstants.APP_AUTH_PARENT,
DefaultPathConstants.META_DATA,
DefaultPathConstants.PROXY_SELECTOR,
DefaultPathConstants.DISCOVERY_UPSTREAM).noneMatch(etcdClient::exists);
}
|
@Test
public void testNotExist() throws Exception {
EtcdDataChangedInit etcdDataChangedInit = new EtcdDataChangedInit(etcdClient);
assertNotNull(etcdDataChangedInit);
when(etcdClient.exists(DefaultPathConstants.PLUGIN_PARENT)).thenReturn(true);
boolean pluginExist = etcdDataChangedInit.notExist();
assertFalse(pluginExist, "plugin exist.");
when(etcdClient.exists(DefaultPathConstants.PLUGIN_PARENT)).thenReturn(false);
when(etcdClient.exists(DefaultPathConstants.APP_AUTH_PARENT)).thenReturn(true);
boolean appAuthExist = etcdDataChangedInit.notExist();
assertFalse(appAuthExist, "app auth exist.");
when(etcdClient.exists(DefaultPathConstants.APP_AUTH_PARENT)).thenReturn(false);
when(etcdClient.exists(DefaultPathConstants.META_DATA)).thenReturn(true);
boolean metaDataExist = etcdDataChangedInit.notExist();
assertFalse(metaDataExist, "metadata exist.");
when(etcdClient.exists(DefaultPathConstants.META_DATA)).thenReturn(false);
boolean metaDataNotExist = etcdDataChangedInit.notExist();
assertTrue(metaDataNotExist, "metadata not exist.");
}
|
@VisibleForTesting
static void writeFileConservatively(Path file, String content) throws IOException {
if (Files.exists(file)) {
String oldContent = new String(Files.readAllBytes(file), StandardCharsets.UTF_8);
if (oldContent.equals(content)) {
return;
}
}
Files.createDirectories(file.getParent());
Files.write(file, content.getBytes(StandardCharsets.UTF_8));
}
|
@Test
public void testWriteFileConservatively_noWriteIfUnchanged() throws IOException {
Path file = temporaryFolder.newFile().toPath();
Files.write(file, "some content".getBytes(StandardCharsets.UTF_8));
FileTime fileTime = Files.getLastModifiedTime(file);
PluginConfigurationProcessor.writeFileConservatively(file, "some content");
String content = new String(Files.readAllBytes(file), StandardCharsets.UTF_8);
assertThat(content).isEqualTo("some content");
assertThat(Files.getLastModifiedTime(file)).isEqualTo(fileTime);
}
|
public static boolean isMatchWithPrefix(final byte[] candidate, final byte[] expected, final int prefixLength)
{
if (candidate.length != expected.length)
{
return false;
}
if (candidate.length == 4)
{
final int mask = prefixLengthToIpV4Mask(prefixLength);
return (toInt(candidate) & mask) == (toInt(expected) & mask);
}
else if (candidate.length == 16)
{
final long upperMask = prefixLengthToIpV6Mask(min(prefixLength, 64));
final long lowerMask = prefixLengthToIpV6Mask(max(prefixLength - 64, 0));
return
(upperMask & toLong(candidate, 0)) == (upperMask & toLong(expected, 0)) &&
(lowerMask & toLong(candidate, 8)) == (lowerMask & toLong(expected, 8));
}
throw new IllegalArgumentException("how many bytes does an IP address have again?");
}
|
@Test
void shouldMatchIfAllBytesMatch()
{
final byte[] a = { 'a', 'b', 'c', 'd' };
final byte[] b = { 'a', 'b', 'c', 'd' };
assertTrue(isMatchWithPrefix(a, b, 32));
}
|
@Override
public String convert(IAccessEvent accessEvent) {
if (!isStarted()) {
return "INACTIVE_REQUEST_PARAM_CONV";
}
// This call should be safe, because the request map is cached beforehand
final String[] paramArray = accessEvent.getRequestParameterMap().get(key);
if (paramArray == null || paramArray.length == 0) {
return "-";
} else if (paramArray.length == 1) {
return paramArray[0];
} else {
return Arrays.toString(paramArray);
}
}
|
@Test
void testConvertOneParameter() throws Exception {
Mockito.when(httpServletRequest.getParameterValues("name")).thenReturn(new String[]{"Alice"});
Mockito.when(httpServletRequest.getParameterNames())
.thenReturn(Collections.enumeration(Collections.singleton("name")));
// Invoked by AccessEvent#prepareForDeferredProcessing
accessEvent.buildRequestParameterMap();
// Jetty recycled the request
Mockito.reset(httpServletRequest);
String value = safeRequestParameterConverter.convert(accessEvent);
assertThat(value).isEqualTo("Alice");
}
|
@GetMapping(value = "/node/self")
@Secured(action = ActionTypes.READ, resource = "nacos/admin", signType = SignType.CONSOLE)
public Result<Member> self() {
return Result.success(nacosClusterOperationService.self());
}
|
@Test
void testSelf() {
Member self = new Member();
when(nacosClusterOperationService.self()).thenReturn(self);
Result<Member> result = nacosClusterControllerV2.self();
assertEquals(ErrorCode.SUCCESS.getCode(), result.getCode());
assertEquals(self, result.getData());
}
|
public List<BookDto> getAllBooks() {
List<BookDto> books = Collections.emptyList();
try {
AuditDto audit = null;
Call<List<BookDto>> callBookResponse = libraryClient.getAllBooks("all");
Response<List<BookDto>> allBooksResponse = callBookResponse.execute();
if (allBooksResponse.isSuccessful()) {
books = allBooksResponse.body();
log.info("Get All Books : {}", books);
audit = auditMapper.populateAuditLogForGet(books);
} else {
log.error("Error calling library client: {}", allBooksResponse.errorBody());
if (Objects.nonNull(allBooksResponse.errorBody())) {
audit = auditMapper.populateAuditLogForException(
null, HttpMethod.GET, allBooksResponse.errorBody().string());
}
}
if (Objects.nonNull(audit)) {
AuditLog savedObj = auditRepository.save(libraryMapper.auditDtoToAuditLog(audit));
log.info("Saved into audit successfully: {}", savedObj);
}
return books;
} catch (Exception ex) {
log.error("Error handling retrofit call for getAllBooks", ex);
return books;
}
}
|
@Test
@DisplayName("Successful getAllBooks call")
public void getAllBooksTest() throws Exception {
String booksResponse = getBooksResponse("/response/getAllBooks.json");
List<BookDto> bookDtoList =
new ObjectMapper().readValue(booksResponse, new TypeReference<>() {
});
when(libraryClient.getAllBooks("all")).thenReturn(Calls.response(bookDtoList));
doReturn(null).when(auditRepository).save(any());
List<BookDto> allBooks = libraryAuditService.getAllBooks();
assertAll(
() -> assertNotNull(allBooks),
() -> assertTrue(allBooks.size()>0)
);
}
|
public void generate() throws IOException
{
packageNameByTypes.clear();
generatePackageInfo();
generateTypeStubs();
generateMessageHeaderStub();
for (final List<Token> tokens : ir.messages())
{
final Token msgToken = tokens.get(0);
final List<Token> messageBody = getMessageBody(tokens);
final boolean hasVarData = -1 != findSignal(messageBody, Signal.BEGIN_VAR_DATA);
int i = 0;
final List<Token> fields = new ArrayList<>();
i = collectFields(messageBody, i, fields);
final List<Token> groups = new ArrayList<>();
i = collectGroups(messageBody, i, groups);
final List<Token> varData = new ArrayList<>();
collectVarData(messageBody, i, varData);
final String decoderClassName = formatClassName(decoderName(msgToken.name()));
final String decoderStateClassName = decoderClassName + "#CodecStates";
final FieldPrecedenceModel decoderPrecedenceModel = precedenceChecks.createDecoderModel(
decoderStateClassName, tokens);
generateDecoder(decoderClassName, msgToken, fields, groups, varData, hasVarData, decoderPrecedenceModel);
final String encoderClassName = formatClassName(encoderName(msgToken.name()));
final String encoderStateClassName = encoderClassName + "#CodecStates";
final FieldPrecedenceModel encoderPrecedenceModel = precedenceChecks.createEncoderModel(
encoderStateClassName, tokens);
generateEncoder(encoderClassName, msgToken, fields, groups, varData, hasVarData, encoderPrecedenceModel);
}
}
|
@Test
void shouldGenerateGetFixedLengthStringUsingAppendable() throws Exception
{
final UnsafeBuffer buffer = new UnsafeBuffer(new byte[4096]);
final StringBuilder result = new StringBuilder();
generator().generate();
final Object encoder = wrap(buffer, compileCarEncoder().getDeclaredConstructor().newInstance());
final Object decoder = getCarDecoder(buffer, encoder);
set(encoder, "vehicleCode", String.class, "R11");
get(decoder, "vehicleCode", result);
assertThat(result.toString(), is("R11"));
result.setLength(0);
set(encoder, "vehicleCode", String.class, "");
get(decoder, "vehicleCode", result);
assertThat(result.toString(), is(""));
result.setLength(0);
set(encoder, "vehicleCode", String.class, "R11R12");
get(decoder, "vehicleCode", result);
assertThat(result.toString(), is("R11R12"));
}
|
@Udf
public Integer abs(@UdfParameter final Integer val) {
return (val == null) ? null : Math.abs(val);
}
|
@Test
public void shouldHandleNull() {
assertThat(udf.abs((Integer) null), is(nullValue()));
assertThat(udf.abs((Long)null), is(nullValue()));
assertThat(udf.abs((Double)null), is(nullValue()));
assertThat(udf.abs((BigDecimal) null), is(nullValue()));
}
|
@Override
public boolean mayHaveMergesPending(String bucketSpace, int contentNodeIndex) {
if (!stats.hasUpdatesFromAllDistributors()) {
return true;
}
ContentNodeStats nodeStats = stats.getStats().getNodeStats(contentNodeIndex);
if (nodeStats != null) {
ContentNodeStats.BucketSpaceStats bucketSpaceStats = nodeStats.getBucketSpace(bucketSpace);
return (bucketSpaceStats != null &&
bucketSpaceStats.mayHaveBucketsPending(minMergeCompletionRatio));
}
return true;
}
|
@Test
void valid_bucket_space_stats_can_have_no_merges_pending() {
Fixture f = Fixture.fromBucketsPending(0);
assertFalse(f.mayHaveMergesPending("default", 1));
}
|
@Override
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException {
// Must split otherwise 413 Request Entity Too Large is returned
for(List<Path> partition : new Partition<>(new ArrayList<>(files.keySet()),
new HostPreferences(session.getHost()).getInteger("googledrive.delete.multiple.partition"))) {
final BatchRequest batch = session.getClient().batch();
final List<BackgroundException> failures = new CopyOnWriteArrayList<>();
for(Path file : partition) {
try {
this.queue(file, batch, callback, failures);
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map("Cannot delete {0}", e, file);
}
}
if(!partition.isEmpty()) {
try {
batch.execute();
}
catch(IOException e) {
throw new DriveExceptionMappingService(fileid).map(e);
}
for(BackgroundException e : failures) {
throw e;
}
}
}
}
|
@Test
public void testDeleteFolder() throws Exception {
final DriveFileIdProvider fileid = new DriveFileIdProvider(session);
final Path directory = new DriveDirectoryFeature(session, fileid).mkdir(new Path(DriveHomeFinderService.MYDRIVE_FOLDER,
new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus());
assertTrue(new DriveFindFeature(session, fileid).find(directory, new DisabledListProgressListener()));
final Path file = new DriveTouchFeature(session, fileid).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus());
assertTrue(new DriveFindFeature(session, fileid).find(file, new DisabledListProgressListener()));
new DriveBatchDeleteFeature(session, fileid).delete(Collections.singletonList(directory), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse((new DriveFindFeature(session, fileid).find(directory, new DisabledListProgressListener())));
assertFalse((new DriveFindFeature(session, fileid).find(file, new DisabledListProgressListener())));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.