focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
static CompletableFuture<DLInputStream> openReaderAsync(DistributedLogManager distributedLogManager) {
return distributedLogManager.openAsyncLogReader(DLSN.InitialDLSN)
.thenApply(r -> new DLInputStream(distributedLogManager, r));
}
|
@Test
public void openAsyncLogReaderFailed() {
when(dlm.openAsyncLogReader(any(DLSN.class))).thenReturn(failedFuture(new Exception("Open reader was failed")));
try {
DLInputStream.openReaderAsync(dlm).get();
} catch (Exception e) {
assertEquals(e.getCause().getMessage(), "Open reader was failed");
}
}
|
@Override
public T add(K name, V value) {
throw new UnsupportedOperationException("read only");
}
|
@Test
public void testAddStringValue() {
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() {
HEADERS.add("name", "value");
}
});
}
|
public final boolean schedule(Runnable task, long delay, TimeUnit unit) {
checkNotNull(task);
checkNotNegative(delay, "delay");
checkNotNull(unit);
ScheduledTask scheduledTask = new ScheduledTask(this);
scheduledTask.task = task;
long deadlineNanos = nanoClock.nanoTime() + unit.toNanos(delay);
if (deadlineNanos < 0) {
// protection against overflow
deadlineNanos = Long.MAX_VALUE;
}
scheduledTask.deadlineNanos = deadlineNanos;
return scheduledTaskQueue.offer(scheduledTask);
}
|
@Test
public void test_schedule() {
Task task = new Task();
reactor.offer(() -> reactor.eventloop.schedule(task, 1, SECONDS));
assertTrueEventually(() -> assertEquals(1, task.count.get()));
}
|
static Result coerceUserList(
final Collection<Expression> expressions,
final ExpressionTypeManager typeManager
) {
return coerceUserList(expressions, typeManager, Collections.emptyMap());
}
|
@Test
public void shouldNotCoerceMapOfIncompatibleKeyLiterals() {
// Given:
final ImmutableList<Expression> expressions = ImmutableList.of(
new CreateMapExpression(
ImmutableMap.of(
new IntegerLiteral(10),
new IntegerLiteral(289476)
)
),
new CreateMapExpression(
ImmutableMap.of(
new BooleanLiteral(false),
new StringLiteral("123456789000")
)
)
);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> CoercionUtil.coerceUserList(expressions, typeManager)
);
// Then:
assertThat(e.getMessage(),
startsWith("operator does not exist: MAP<INTEGER, INTEGER> = MAP<BOOLEAN, STRING> (MAP(false:='123456789000'))"));
}
|
@Override
public boolean unregister(final Application application) {
return SMAppService.loginItemServiceWithIdentifier(application.getIdentifier()).unregisterAndReturnError(null);
}
|
@Test
public void testUnregister() {
assumeFalse(Factory.Platform.osversion.matches("(10|11|12)\\..*"));
assertFalse(new SMAppServiceApplicationLoginRegistry().unregister(
new Application("bundle.helper")));
}
|
public static void doRegister(final String json, final String url, final String type, final String accessToken) throws IOException {
if (StringUtils.isBlank(accessToken)) {
LOGGER.error("{} client register error accessToken is null, please check the config : {} ", type, json);
return;
}
Headers headers = new Headers.Builder().add(Constants.X_ACCESS_TOKEN, accessToken).build();
String result = OkHttpTools.getInstance().post(url, json, headers);
if (Objects.equals(SUCCESS, result)) {
LOGGER.info("{} client register success: {} ", type, json);
} else {
LOGGER.error("{} client register error: {} ", type, json);
}
}
|
@Test
public void testDoRegisterWhenSuccess() throws IOException {
when(okHttpTools.post(url, json)).thenReturn("success");
Headers headers = new Headers.Builder().add(Constants.X_ACCESS_TOKEN, accessToken).build();
when(okHttpTools.post(url, json, headers)).thenReturn("success");
try (MockedStatic<OkHttpTools> okHttpToolsMockedStatic = mockStatic(OkHttpTools.class)) {
okHttpToolsMockedStatic.when(OkHttpTools::getInstance).thenReturn(okHttpTools);
RegisterUtils.doRegister(json, url, RegisterTypeEnum.DUBBO.getName());
verify(okHttpTools, times(1)).post(eq(url), eq(json));
RegisterUtils.doRegister(json, url, RegisterTypeEnum.DUBBO.getName(), accessToken);
verify(okHttpTools, times(1)).post(eq(url), eq(json));
}
}
|
@Override public boolean processRow( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException {
Preconditions.checkArgument( first,
BaseMessages.getString( PKG, "BaseStreamStep.ProcessRowsError" ) );
Preconditions.checkNotNull( source );
Preconditions.checkNotNull( window );
try {
source.open();
bufferStream().forEach( result -> {
if ( result.isSafeStop() ) {
getTrans().safeStop();
}
putRows( result.getRows() );
} );
super.setOutputDone();
} finally {
// Needed for when an Abort Step is used.
source.close();
}
return false;
}
|
@Test
public void testAlwaysCloses() throws KettleException {
when( streamWindow.buffer( any() ) ).thenThrow( new IllegalStateException( "run for your life!!!" ) );
try {
baseStreamStep.processRow( meta, stepData );
} catch ( IllegalStateException ignored ) {
}
verify( streamSource ).close();
}
|
public FallbackPath getFallback() {
return fallback;
}
|
@Test
public void testFallbackPath() {
ShenyuConfig.FallbackPath fallback = config.getFallback();
fallback.setEnabled(true);
fallback.setPaths(Collections.emptyList());
List<String> paths = fallback.getPaths();
Boolean enabled = fallback.getEnabled();
notEmptyElements(paths, enabled);
}
|
public List<Partition> getPartitions(Connection connection, Table table) {
JDBCTable jdbcTable = (JDBCTable) table;
String query = getPartitionQuery(table);
try (PreparedStatement ps = connection.prepareStatement(query)) {
ps.setString(1, jdbcTable.getDbName());
ps.setString(2, jdbcTable.getJdbcTable());
ResultSet rs = ps.executeQuery();
ImmutableList.Builder<Partition> list = ImmutableList.builder();
if (null != rs) {
while (rs.next()) {
String[] partitionNames = rs.getString("NAME").
replace("'", "").split(",");
long createTime = rs.getTimestamp("MODIFIED_TIME").getTime();
for (String partitionName : partitionNames) {
list.add(new Partition(partitionName, createTime));
}
}
return list.build();
} else {
return Lists.newArrayList();
}
} catch (SQLException | NullPointerException e) {
throw new StarRocksConnectorException(e.getMessage(), e);
}
}
|
@Test
public void testGetPartitions_NonPartitioned() throws DdlException {
JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource);
List<Column> columns = Arrays.asList(new Column("d", Type.VARCHAR));
JDBCTable jdbcTable = new JDBCTable(100000, "tbl1", columns, Lists.newArrayList(),
"test", "catalog", properties);
int size = jdbcMetadata.getPartitions(jdbcTable, Arrays.asList("20230810")).size();
Assert.assertEquals(1, size);
List<String> partitionNames = PartitionUtil.getPartitionNames(jdbcTable);
Assert.assertEquals(Arrays.asList("tbl1"), partitionNames);
}
|
@ExecuteOn(TaskExecutors.IO)
@Get(uri = "/flows/{namespace}/{flowId}")
@Operation(tags = {"Executions"}, summary = "Get flow information's for an execution")
public FlowForExecution getFlowForExecution(
@Parameter(description = "The namespace of the flow") @PathVariable String namespace,
@Parameter(description = "The flow id") @PathVariable String flowId,
@Parameter(description = "The flow revision") @Nullable Integer revision
) {
return FlowForExecution.of(flowRepository.findByIdWithoutAcl(tenantService.resolveTenant(), namespace, flowId, Optional.ofNullable(revision)).orElseThrow());
}
|
@SuppressWarnings("DataFlowIssue")
@Test
void getFlowForExecution() {
FlowForExecution result = client.toBlocking().retrieve(
GET("/api/v1/executions/flows/io.kestra.tests/full"),
FlowForExecution.class
);
assertThat(result, notNullValue());
assertThat(result.getTasks(), hasSize(5));
assertThat((result.getTasks().getFirst() instanceof TaskForExecution), is(true));
}
|
public void setSimpleLoadBalancerState(SimpleLoadBalancerState state)
{
_watcherManager.updateWatcher(state, this::doRegisterLoadBalancerState);
doRegisterLoadBalancerState(state, null);
state.register(new SimpleLoadBalancerStateListener()
{
@Override
public void onStrategyAdded(String serviceName, String scheme, LoadBalancerStrategy strategy)
{
_watcherManager.updateWatcher(serviceName, scheme, strategy,
(item, mode) -> doRegisterLoadBalancerStrategy(serviceName, scheme, item, mode));
doRegisterLoadBalancerStrategy(serviceName, scheme, strategy, null);
}
@Override
public void onStrategyRemoved(String serviceName, String scheme, LoadBalancerStrategy strategy)
{
_watcherManager.removeWatcherForLoadBalancerStrategy(serviceName, scheme);
_jmxManager.unregister(getLoadBalancerStrategyJmxName(serviceName, scheme, null));
}
@Override
public void onClientAdded(String clusterName, TrackerClient client)
{
// We currently think we can make this no-op as the info provided is not helpful
// _jmxManager.checkReg(new DegraderControl((DegraderImpl) client.getDegrader(DefaultPartitionAccessor.DEFAULT_PARTITION_ID)),
// _prefix + "-" + clusterName + "-" + client.getUri().toString().replace("://", "-") + "-TrackerClient-Degrader");
}
@Override
public void onClientRemoved(String clusterName, TrackerClient client)
{
// We currently think we can make this no-op as the info provided is not helpful
// _jmxManager.unregister(_prefix + "-" + clusterName + "-" + client.getUri().toString().replace("://", "-") + "-TrackerClient-Degrader");
}
@Override
public void onClusterInfoUpdate(ClusterInfoItem clusterInfoItem)
{
if (clusterInfoItem != null && clusterInfoItem.getClusterPropertiesItem() != null
&& clusterInfoItem.getClusterPropertiesItem().getProperty() != null)
{
String clusterName = clusterInfoItem.getClusterPropertiesItem().getProperty().getClusterName();
_watcherManager.updateWatcher(clusterName, clusterInfoItem,
(item, mode) -> doRegisterClusterInfo(clusterName, item, mode));
doRegisterClusterInfo(clusterName, clusterInfoItem, null);
}
}
@Override
public void onClusterInfoRemoval(ClusterInfoItem clusterInfoItem)
{
if (clusterInfoItem != null && clusterInfoItem.getClusterPropertiesItem() != null
&& clusterInfoItem.getClusterPropertiesItem().getProperty() != null)
{
String clusterName = clusterInfoItem.getClusterPropertiesItem().getProperty().getClusterName();
_watcherManager.removeWatcherForClusterInfoItem(clusterName);
_jmxManager.unregister(getClusterInfoJmxName(clusterName, null));
}
}
@Override
public void onServicePropertiesUpdate(LoadBalancerStateItem<ServiceProperties> serviceProperties)
{
if (serviceProperties != null && serviceProperties.getProperty() != null)
{
String serviceName = serviceProperties.getProperty().getServiceName();
_watcherManager.updateWatcher(serviceName, serviceProperties,
(item, mode) -> doRegisterServiceProperties(serviceName, item, mode));
doRegisterServiceProperties(serviceName, serviceProperties, null);
}
}
@Override
public void onServicePropertiesRemoval(LoadBalancerStateItem<ServiceProperties> serviceProperties)
{
if (serviceProperties != null && serviceProperties.getProperty() != null)
{
String serviceName = serviceProperties.getProperty().getServiceName();
_watcherManager.removeWatcherForServiceProperties(serviceName);
_jmxManager.unregister(getServicePropertiesJmxName(serviceName, null));
}
}
private void doRegisterLoadBalancerStrategy(String serviceName, String scheme, LoadBalancerStrategy strategy,
@Nullable DualReadModeProvider.DualReadMode mode)
{
String jmxName = getLoadBalancerStrategyJmxName(serviceName, scheme, mode);
_jmxManager.registerLoadBalancerStrategy(jmxName, strategy);
}
private void doRegisterClusterInfo(String clusterName, ClusterInfoItem clusterInfoItem,
@Nullable DualReadModeProvider.DualReadMode mode)
{
String jmxName = getClusterInfoJmxName(clusterName, mode);
_jmxManager.registerClusterInfo(jmxName, clusterInfoItem);
}
private void doRegisterServiceProperties(String serviceName, LoadBalancerStateItem<ServiceProperties> serviceProperties,
@Nullable DualReadModeProvider.DualReadMode mode)
{
_jmxManager.registerServiceProperties(getServicePropertiesJmxName(serviceName, mode), serviceProperties);
}
private String getClusterInfoJmxName(String clusterName, @Nullable DualReadModeProvider.DualReadMode mode)
{
return String.format("%s%s-ClusterInfo", getClusterPrefixForLBPropertyJmxNames(clusterName, mode), clusterName);
}
private String getServicePropertiesJmxName(String serviceName, @Nullable DualReadModeProvider.DualReadMode mode)
{
return String.format("%s%s-ServiceProperties", getServicePrefixForLBPropertyJmxNames(serviceName, mode), serviceName);
}
private String getLoadBalancerStrategyJmxName(String serviceName, String scheme, @Nullable DualReadModeProvider.DualReadMode mode)
{
return String.format("%s%s-%s-LoadBalancerStrategy", getServicePrefixForLBPropertyJmxNames(serviceName, mode), serviceName, scheme);
}
});
}
|
@Test(dataProvider = "nonDualReadD2ClientJmxManagers")
public void testSetSimpleLBStateListenerUpdateClusterInfo(String prefix, D2ClientJmxManager.DiscoverySourceType sourceType,
Boolean isDualReadLB)
{
D2ClientJmxManagerFixture fixture = new D2ClientJmxManagerFixture();
D2ClientJmxManager d2ClientJmxManager = fixture.getD2ClientJmxManager(prefix, sourceType, isDualReadLB);
d2ClientJmxManager.setSimpleLoadBalancerState(fixture._simpleLoadBalancerState);
fixture._simpleLoadBalancerStateListenerCaptor.getValue().onClusterInfoUpdate(null);
Mockito.verify(fixture._jmxManager, never()).registerClusterInfo(any(), any());
fixture._simpleLoadBalancerStateListenerCaptor.getValue().onClusterInfoUpdate(fixture._noPropertyClusterInfoItem);
Mockito.verify(fixture._jmxManager, never()).registerClusterInfo(any(), any());
fixture._simpleLoadBalancerStateListenerCaptor.getValue().onClusterInfoUpdate(fixture._clusterInfoItem);
Assert.assertEquals(
fixture._registerObjectNameCaptor.getValue(),
"C_Foo-ClusterInfo"
);
Assert.assertEquals(
fixture._clusterInfoArgumentCaptor.getValue(),
fixture._clusterInfoItem
);
}
|
protected String getDumpPath() {
final String dumpPath = url.getParameter(DUMP_DIRECTORY);
if (StringUtils.isEmpty(dumpPath)) {
return USER_HOME;
}
final File dumpDirectory = new File(dumpPath);
if (!dumpDirectory.exists()) {
if (dumpDirectory.mkdirs()) {
logger.info(format("Dubbo dump directory[%s] created", dumpDirectory.getAbsolutePath()));
} else {
logger.warn(
COMMON_UNEXPECTED_CREATE_DUMP,
"",
"",
format(
"Dubbo dump directory[%s] can't be created, use the 'user.home'[%s]",
dumpDirectory.getAbsolutePath(), USER_HOME));
return USER_HOME;
}
}
return dumpPath;
}
|
@Test
void jStackDumpTest_dumpDirectoryNotExists_canBeCreated() {
final String dumpDirectory = UUID.randomUUID().toString();
URL url = URL.valueOf("dubbo://admin:[email protected]:20880/context/path?dump.directory="
+ dumpDirectory
+ "&version=1.0.0&application=morgan&noValue=true");
AbortPolicyWithReport abortPolicyWithReport = new AbortPolicyWithReport("Test", url);
Assertions.assertNotEquals(System.getProperty("user.home"), abortPolicyWithReport.getDumpPath());
}
|
@Override
public AclConfig getAllAclConfig() {
return aclPlugEngine.getAllAclConfig();
}
|
@Test
public void getAllAclConfigTest() {
PlainAccessValidator plainAccessValidator = new PlainAccessValidator();
AclConfig aclConfig = plainAccessValidator.getAllAclConfig();
Assert.assertEquals(aclConfig.getGlobalWhiteAddrs().size(), 4);
Assert.assertEquals(aclConfig.getPlainAccessConfigs().size(), 2);
}
|
public static Set<Result> anaylze(String log) {
Set<Result> results = new HashSet<>();
for (Rule rule : Rule.values()) {
Matcher matcher = rule.pattern.matcher(log);
if (matcher.find()) {
results.add(new Result(rule, log, matcher));
}
}
return results;
}
|
@Test
public void tooOldJava2() throws IOException {
CrashReportAnalyzer.Result result = findResultByRule(
CrashReportAnalyzer.anaylze(loadLog("/crash-report/too_old_java2.txt")),
CrashReportAnalyzer.Rule.TOO_OLD_JAVA);
assertEquals("52", result.getMatcher().group("expected"));
}
|
@Override
public void run() {
try {
PushDataWrapper wrapper = generatePushData();
ClientManager clientManager = delayTaskEngine.getClientManager();
for (String each : getTargetClientIds()) {
Client client = clientManager.getClient(each);
if (null == client) {
// means this client has disconnect
continue;
}
Subscriber subscriber = client.getSubscriber(service);
// skip if null
if (subscriber == null) {
continue;
}
delayTaskEngine.getPushExecutor().doPushWithCallback(each, subscriber, wrapper,
new ServicePushCallback(each, subscriber, wrapper.getOriginalData(), delayTask.isPushToAll()));
}
} catch (Exception e) {
Loggers.PUSH.error("Push task for service" + service.getGroupedServiceName() + " execute failed ", e);
delayTaskEngine.addTask(service, new PushDelayTask(service, 1000L));
}
}
|
@Test
void testRunFailedWithHandleException() {
PushDelayTask delayTask = new PushDelayTask(service, 0L);
PushExecuteTask executeTask = new PushExecuteTask(service, delayTaskExecuteEngine, delayTask);
when(delayTaskExecuteEngine.getServiceStorage()).thenThrow(new RuntimeException());
executeTask.run();
assertEquals(0, MetricsMonitor.getFailedPushMonitor().get());
verify(delayTaskExecuteEngine).addTask(eq(service), any(PushDelayTask.class));
}
|
@VisibleForTesting
static void checkSamePrefixedProviders(
Map<String, DelegationTokenProvider> providers, Set<String> warnings) {
Set<String> providerPrefixes = new HashSet<>();
for (String name : providers.keySet()) {
String[] split = name.split("-");
if (!providerPrefixes.add(split[0])) {
String msg =
String.format(
"Multiple providers loaded with the same prefix: %s. This might lead to unintended consequences, please consider using only one of them.",
split[0]);
warnings.add(msg);
}
}
}
|
@Test
public void checkSamePrefixedProvidersShouldNotGiveErrorsWhenNoSamePrefix() {
Map<String, DelegationTokenProvider> providers = new HashMap<>();
providers.put("s3-hadoop", new TestDelegationTokenProvider());
Set<String> warnings = new HashSet<>();
DefaultDelegationTokenManager.checkSamePrefixedProviders(providers, warnings);
assertTrue(warnings.isEmpty());
}
|
public void start() {
commandTopic.start();
}
|
@Test
public void shouldStartCommandTopicOnStart() {
// When:
commandStore.start();
// Then:
verify(commandTopic).start();
}
|
public String generateQualityGate(Metric.Level level) {
return qualityGateTemplates.get(level);
}
|
@Test
public void generate_quality_gate() {
initSvgGenerator();
String result = underTest.generateQualityGate(ERROR);
checkQualityGate(result, ERROR);
}
|
public abstract Duration parse(String text);
|
@Test
public void testLongSeconds() {
Assert.assertEquals(Duration.ofSeconds(1), DurationStyle.LONG.parse("1 seconds"));
Assert.assertEquals(Duration.ofSeconds(10), DurationStyle.LONG.parse("10 seconds"));
Assert.assertThrows(DateTimeException.class, () -> DurationStyle.LONG.parse("a seconds"));
Assert.assertThrows(DateTimeException.class, () -> DurationStyle.LONG.parse("3 scond"));
Assert.assertThrows(DateTimeException.class, () -> DurationStyle.LONG.parse("3s"));
Assert.assertThrows(DateTimeException.class, () -> DurationStyle.LONG.parse("3S"));
}
|
public static Builder custom() {
return new Builder();
}
|
@Test
public void builderTimeoutIsNull() throws Exception {
exception.expect(NullPointerException.class);
exception.expectMessage(TIMEOUT_DURATION_MUST_NOT_BE_NULL);
RateLimiterConfig.custom()
.timeoutDuration(null);
}
|
@Override
public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) {
ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new);
return doSharding(availableTargetNames, Range.singleton(shardingValue.getValue())).stream().findFirst().orElse(null);
}
|
@Test
void assertRangeDoShardingByQuarter() {
Collection<String> actual = shardingAlgorithmByQuarter.doSharding(availableTablesForQuarterDataSources,
createShardingValue("2019-10-15 10:59:08", "2020-04-08 10:59:08"));
assertThat(actual.size(), is(3));
}
|
public static String lowercaseFirstLetter(String string) {
if (string == null || string.length() == 0) {
return string;
} else {
return string.substring(0, 1).toLowerCase() + string.substring(1);
}
}
|
@Test
public void testLowercaseFirstLetter() {
assertEquals(lowercaseFirstLetter(""), (""));
assertEquals(lowercaseFirstLetter("A"), ("a"));
assertEquals(lowercaseFirstLetter("AA"), ("aA"));
assertEquals(lowercaseFirstLetter("a"), ("a"));
assertEquals(lowercaseFirstLetter("aB"), ("aB"));
}
|
@SuppressWarnings("unchecked")
public static <T> Map<String, T> getEnumConstants(Class<T> type) {
return Arrays.stream(type.getDeclaredFields())
.filter(field -> field.getType() == type
&& (field.getModifiers() & ENUM_CONSTANT_MASK) == ENUM_CONSTANT_MASK)
.collect(toMap(Field::getName, field -> {
try {
return (T) field.get(null);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}));
}
|
@Test
public void test_getEnumConstants() {
assertThat(FieldUtils.getEnumConstants(EnumLikeClass.class))
.containsExactlyInAnyOrderEntriesOf(Map.of(
"VALUE1", EnumLikeClass.VALUE1,
"VALUE2", EnumLikeClass.VALUE2,
"VALUE3", EnumLikeClass.VALUE3
));
}
|
public RestOpenApiProcessor(Map<String, Object> parameters,
RestConfiguration configuration) {
this.configuration = configuration;
this.support = new RestOpenApiSupport();
this.openApiConfig = new BeanConfig();
if (parameters == null) {
parameters = Collections.emptyMap();
}
support.initOpenApi(openApiConfig, parameters);
}
|
@Test
public void testRestOpenApiProcessor() throws Exception {
CamelContext context = new DefaultCamelContext();
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
rest().get("/foo").description("Foo endpoint").to("mock:foo")
.post("/bar").description("Bar endpoint").to("mock:foo");
}
});
RestOpenApiProcessor processor = new RestOpenApiProcessor(null, context.getRestConfiguration());
processor.setCamelContext(context);
processor.start();
Exchange exchange = new DefaultExchange(context);
processor.process(exchange);
String json = exchange.getMessage().getBody(String.class);
assertNotNull(json);
assertTrue(json.contains("\"/foo\""));
assertTrue(json.contains("\"/bar\""));
assertTrue(json.contains("\"summary\" : \"Foo endpoint\""));
assertTrue(json.contains("\"summary\" : \"Bar endpoint\""));
}
|
@Override
public void upgrade() {
if (clusterConfigService.get(V20230531135500_MigrateRemoveObsoleteItemsFromGrantsCollection.MigrationCompleted.class) != null) {
return;
}
final Set<String> names = new HashSet();
mongoConnection.getMongoDatabase().listCollectionNames().forEach(names::add);
if (names.contains(DBGrantService.COLLECTION_NAME)) {
var query = new BasicDBObject("target", Pattern.compile("^grn::::favorite:"));
mongoConnection.getMongoDatabase().getCollection(DBGrantService.COLLECTION_NAME).deleteMany(query);
query = new BasicDBObject("target", Pattern.compile("^grn::::last_opened:"));
mongoConnection.getMongoDatabase().getCollection(DBGrantService.COLLECTION_NAME).deleteMany(query);
}
clusterConfigService.write(new MigrationCompleted());
}
|
@Test
@MongoDBFixtures("V20230531135500_MigrateRemoveObsoleteItemsFromGrantsCollectionTest_noElements.json")
void notMigratingAnythingIfNoElementsArePresent() {
assertThat(this.collection.countDocuments()).isEqualTo(9);
this.migration.upgrade();
assertThat(migrationCompleted()).isNotNull();
assertThat(this.collection.countDocuments()).isEqualTo(9);
}
|
protected static byte rho(long x, int k) {
return (byte) (Long.numberOfLeadingZeros((x << k) | (1 << (k - 1))) + 1);
}
|
@Test
public void testRho() {
assertEquals(17, LogLog.rho(0, 16));
assertEquals(16, LogLog.rho(1, 16));
assertEquals(15, LogLog.rho(2, 16));
assertEquals(1, LogLog.rho(0x00008000, 16));
assertEquals(23, LogLog.rho(0, 10));
assertEquals(22, LogLog.rho(1, 10));
assertEquals(21, LogLog.rho(2, 10));
assertEquals(1, LogLog.rho(0x00200000, 10));
}
|
public synchronized OutputStream open() {
try {
close();
fileOutputStream = new FileOutputStream(file, true);
} catch (FileNotFoundException e) {
throw new RuntimeException("Unable to open output stream", e);
}
return fileOutputStream;
}
|
@Test
public void requireThatFileIsReopened() throws IOException {
FileLogTarget logTarget = new FileLogTarget(File.createTempFile("logfile", ".log"));
OutputStream out1 = logTarget.open();
assertNotNull(out1);
OutputStream out2 = logTarget.open();
assertNotNull(out2);
assertNotEquals(out1, out2);
}
|
@Override
public String getConsumerMethodProperty(String service, String method, String key) {
return config.getProperty(DynamicConfigKeyHelper.buildConsumerMethodProKey(service, method, key),
DynamicHelper.DEFAULT_DYNAMIC_VALUE);
}
|
@Test
public void getConsumerMethodProperty() {
}
|
@Udf(description = "When reducing an array, "
+ "the reduce function must have two arguments. "
+ "The two arguments for the reduce function are in order: "
+ "the state and the array item. "
+ "The final state is returned."
)
public <T,S> S reduceArray(
@UdfParameter(description = "The array.") final List<T> list,
@UdfParameter(description = "The initial state.") final S initialState,
@UdfParameter(description = "The reduce function.") final BiFunction<S, T, S> biFunction
) {
if (initialState == null || biFunction == null) {
return null;
}
if (list == null) {
return initialState;
}
S state = initialState;
for (T listItem: list) {
state = biFunction.apply(state, listItem);
}
return state;
}
|
@Test
public void shouldReduceArray() {
assertThat(udf.reduceArray( ImmutableList.of(), "", biFunction1()), is(""));
assertThat(udf.reduceArray(ImmutableList.of(), "answer", biFunction1()), is("answer"));
assertThat(udf.reduceArray(ImmutableList.of(2, 3, 4, 4, 1000), "", biFunction1()), is("evenoddeveneveneven"));
assertThat(udf.reduceArray(ImmutableList.of(3, -1, -5), "This is: ", biFunction1()), is("This is: oddoddodd"));
assertThat(udf.reduceArray(ImmutableList.of(), 0, biFunction2()), is(0));
assertThat(udf.reduceArray(Arrays.asList(-1, -13), 14, biFunction2()), is(0));
assertThat(udf.reduceArray(ImmutableList.of(-5, 10), 1, biFunction2()), is(6));
assertThat(udf.reduceArray(ImmutableList.of(100, 1000, 42), -100, biFunction2()), is(1042));
}
|
@Override
public boolean shouldCareAbout(Object entity) {
return securityConfigClasses.stream().anyMatch(aClass -> aClass.isAssignableFrom(entity.getClass()));
}
|
@Test
public void shouldCareAboutRolesConfigChange() {
SecurityConfigChangeListener securityConfigChangeListener = new SecurityConfigChangeListener() {
@Override
public void onEntityConfigChange(Object entity) {
}
};
assertThat(securityConfigChangeListener.shouldCareAbout(new RolesConfig()), is(true));
}
|
public Collection<String> getUsedConversionClasses(Schema schema) {
Collection<String> result = new HashSet<>();
for (Conversion<?> conversion : getUsedConversions(schema)) {
result.add(conversion.getClass().getCanonicalName());
}
return result;
}
|
@Test
void getUsedConversionClassesForNullableLogicalTypesInNestedRecord() throws Exception {
SpecificCompiler compiler = createCompiler();
final Schema schema = new Schema.Parser().parse(
"{\"type\":\"record\",\"name\":\"NestedLogicalTypesRecord\",\"namespace\":\"org.apache.avro.codegentest.testdata\",\"doc\":\"Test nested types with logical types in generated Java classes\",\"fields\":[{\"name\":\"nestedRecord\",\"type\":{\"type\":\"record\",\"name\":\"NestedRecord\",\"fields\":[{\"name\":\"nullableDateField\",\"type\":[\"null\",{\"type\":\"int\",\"logicalType\":\"date\"}]}]}}]}");
final Collection<String> usedConversionClasses = compiler.getUsedConversionClasses(schema);
assertEquals(1, usedConversionClasses.size());
assertEquals("org.apache.avro.data.TimeConversions.DateConversion", usedConversionClasses.iterator().next());
}
|
@Override
public ImmutableList<Fact> facts() {
return facts;
}
|
@GwtIncompatible
@Test
public void testSerialization_ComparisonFailureWithFacts() {
ImmutableList<String> messages = ImmutableList.of("hello");
ImmutableList<Fact> facts = ImmutableList.of(fact("first", "value"), simpleFact("second"));
String expected = "expected";
String actual = "actual";
Throwable cause = new Throwable("cause");
ComparisonFailureWithFacts original =
new ComparisonFailureWithFacts(messages, facts, expected, actual, cause);
ComparisonFailureWithFacts reserialized = reserialize(original);
assertThat(reserialized).hasMessageThat().isEqualTo(original.getMessage());
assertThat(reserialized).hasCauseThat().hasMessageThat().isEqualTo(cause.getMessage());
assertThat(reserialized.facts().get(0).key).isEqualTo("first");
assertThat(reserialized.facts().get(0).value).isEqualTo("value");
assertThat(reserialized.facts().get(1).key).isEqualTo("second");
assertThat(reserialized.getExpected()).isEqualTo("expected");
assertThat(reserialized.getActual()).isEqualTo("actual");
}
|
public boolean offer(E item) {
checkNotNull(item, "item");
if (tail - head + 1 == capacity) {
return false;
} else {
long t = tail + 1;
int index = (int) (t & mask);
array[index] = item;
tail = t;
return true;
}
}
|
@Test(expected = NullPointerException.class)
public void test_offer_whenNull() {
CircularQueue<Integer> queue = new CircularQueue<>(128);
queue.offer(null);
}
|
public static NamespaceName get(String tenant, String namespace) {
validateNamespaceName(tenant, namespace);
return get(tenant + '/' + namespace);
}
|
@Test(expectedExceptions = IllegalArgumentException.class)
public void namespace_emptyNamespace() {
NamespaceName.get("pulsar", "cluster", "");
}
|
public static synchronized void d(final String tag, String text) {
if (msLogger.supportsD()) {
msLogger.d(tag, text);
addLog(LVL_D, tag, text);
}
}
|
@Test
public void testDNotSupported() throws Exception {
Mockito.when(mMockLog.supportsD()).thenReturn(false);
Logger.d("mTag", "Text with %d digits", 1);
Mockito.verify(mMockLog, Mockito.never()).d("mTag", "Text with 1 digits");
Logger.d("mTag", "Text with no digits");
Mockito.verify(mMockLog, Mockito.never()).d("mTag", "Text with no digits");
}
|
@Override
public Range<T> lastRange() {
return rangeSet.lastRange();
}
|
@Test
public void testLastRange() {
set = new RangeSetWrapper<>(consumer, reverseConvert, managedCursor);
assertNull(set.lastRange());
Range<LongPair> range = Range.openClosed(new LongPair(0, 97), new LongPair(0, 99));
set.addOpenClosed(0, 97, 0, 99);
assertEquals(set.lastRange(), range);
assertEquals(set.size(), 1);
set.addOpenClosed(0, 98, 0, 105);
assertEquals(set.lastRange(), Range.openClosed(new LongPair(0, 97), new LongPair(0, 105)));
assertEquals(set.size(), 1);
range = Range.openClosed(new LongPair(1, 5), new LongPair(1, 75));
set.addOpenClosed(1, 5, 1, 75);
assertEquals(set.lastRange(), range);
assertEquals(set.size(), 2);
range = Range.openClosed(new LongPair(1, 80), new LongPair(1, 120));
set.addOpenClosed(1, 80, 1, 120);
assertEquals(set.lastRange(), range);
assertEquals(set.size(), 3);
}
|
@Override
public Integer call() throws Exception {
super.call();
try (var files = Files.walk(directory)) {
List<Template> templates = files
.filter(Files::isRegularFile)
.filter(YamlFlowParser::isValidExtension)
.map(path -> yamlFlowParser.parse(path.toFile(), Template.class))
.toList();
if (templates.isEmpty()) {
stdOut("No template found on '{}'", directory.toFile().getAbsolutePath());
}
try (DefaultHttpClient client = client()) {
MutableHttpRequest<List<Template>> request = HttpRequest
.POST(apiUri("/templates/") + namespace + "?delete=" + delete, templates);
List<UpdateResult> updated = client.toBlocking().retrieve(
this.requestOptions(request),
Argument.listOf(UpdateResult.class)
);
stdOut(updated.size() + " template(s) for namespace '" + namespace + "' successfully updated !");
updated.forEach(template -> stdOut("- " + template.getNamespace() + "." + template.getId()));
} catch (HttpClientResponseException e) {
TemplateValidateCommand.handleHttpException(e, "template");
return 1;
}
} catch (ConstraintViolationException e) {
TemplateValidateCommand.handleException(e, "template");
return 1;
}
return 0;
}
|
@Test
void runNoDelete() {
URL directory = TemplateNamespaceUpdateCommandTest.class.getClassLoader().getResource("templates");
URL subDirectory = TemplateNamespaceUpdateCommandTest.class.getClassLoader().getResource("templates/templatesSubFolder");
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.run(Map.of("kestra.templates.enabled", "true"), Environment.CLI, Environment.TEST)) {
EmbeddedServer embeddedServer = ctx.getBean(EmbeddedServer.class);
embeddedServer.start();
String[] args = {
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"io.kestra.tests",
directory.getPath(),
};
PicocliRunner.call(TemplateNamespaceUpdateCommand.class, ctx, args);
assertThat(out.toString(), containsString("3 template(s)"));
String[] newArgs = {
"--server",
embeddedServer.getURL().toString(),
"--user",
"myuser:pass:word",
"io.kestra.tests",
subDirectory.getPath(),
"--no-delete"
};
PicocliRunner.call(TemplateNamespaceUpdateCommand.class, ctx, newArgs);
assertThat(out.toString(), containsString("1 template(s)"));
}
}
|
@Override
public void finished(boolean allStepsExecuted) {
if (postProjectAnalysisTasks.length == 0) {
return;
}
ProjectAnalysisImpl projectAnalysis = createProjectAnalysis(allStepsExecuted ? SUCCESS : FAILED);
for (PostProjectAnalysisTask postProjectAnalysisTask : postProjectAnalysisTasks) {
executeTask(projectAnalysis, postProjectAnalysisTask);
}
}
|
@Test
@UseDataProvider("booleanValues")
public void logStatistics_adds_statistics_to_end_of_task_log(boolean allStepsExecuted) {
Map<String, Object> stats = new HashMap<>();
for (int i = 0; i <= new Random().nextInt(10); i++) {
stats.put("statKey_" + i, "statVal_" + i);
}
PostProjectAnalysisTask logStatisticsTask = mock(PostProjectAnalysisTask.class);
when(logStatisticsTask.getDescription()).thenReturn("PT1");
doAnswer(i -> {
PostProjectAnalysisTask.Context context = i.getArgument(0);
stats.forEach((k, v) -> context.getLogStatistics().add(k, v));
return null;
}).when(logStatisticsTask)
.finished(any(PostProjectAnalysisTask.Context.class));
new PostProjectAnalysisTasksExecutor(
ceTask, analysisMetadataHolder, qualityGateHolder, qualityGateStatusHolder, reportReader, new PostProjectAnalysisTask[] {logStatisticsTask})
.finished(allStepsExecuted);
verify(logStatisticsTask).finished(taskContextCaptor.capture());
assertThat(logTester.logs()).hasSize(1);
List<String> logs = logTester.logs(Level.INFO);
assertThat(logs).hasSize(1);
StringBuilder expectedLog = new StringBuilder("^PT1 ");
stats.forEach((k, v) -> expectedLog.append("\\| " + k + "=" + v + " "));
expectedLog.append("\\| status=SUCCESS \\| time=\\d+ms$");
assertThat(logs.get(0)).matches(expectedLog.toString());
}
|
@JsonIgnore
public boolean canHaveProfile() {
return !this.indexTemplateType().map(TEMPLATE_TYPES_FOR_INDEX_SETS_WITH_IMMUTABLE_FIELD_TYPES::contains).orElse(false);
}
|
@Test
public void testFailureIndexWithProfileSetIsIllegal() {
assertFalse(testIndexSetConfig(IndexTemplateProvider.FAILURE_TEMPLATE_TYPE,
null,
"profile").canHaveProfile());
}
|
public static AppsInfo mergeAppsInfo(ArrayList<AppInfo> appsInfo,
boolean returnPartialResult) {
AppsInfo allApps = new AppsInfo();
Map<String, AppInfo> federationAM = new HashMap<>();
Map<String, AppInfo> federationUAMSum = new HashMap<>();
for (AppInfo a : appsInfo) {
// Check if this AppInfo is an AM
if (a.getAMHostHttpAddress() != null) {
// Insert in the list of AM
federationAM.put(a.getAppId(), a);
// Check if there are any UAM found before
if (federationUAMSum.containsKey(a.getAppId())) {
// Merge the current AM with the found UAM
mergeAMWithUAM(a, federationUAMSum.get(a.getAppId()));
// Remove the sum of the UAMs
federationUAMSum.remove(a.getAppId());
}
// This AppInfo is an UAM
} else {
if (federationAM.containsKey(a.getAppId())) {
// Merge the current UAM with its own AM
mergeAMWithUAM(federationAM.get(a.getAppId()), a);
} else if (federationUAMSum.containsKey(a.getAppId())) {
// Merge the current UAM with its own UAM and update the list of UAM
federationUAMSum.put(a.getAppId(),
mergeUAMWithUAM(federationUAMSum.get(a.getAppId()), a));
} else {
// Insert in the list of UAM
federationUAMSum.put(a.getAppId(), a);
}
}
}
// Check the remaining UAMs are depending or not from federation
for (AppInfo a : federationUAMSum.values()) {
if (returnPartialResult || (a.getName() != null
&& !(a.getName().startsWith(UnmanagedApplicationManager.APP_NAME)
|| a.getName().startsWith(PARTIAL_REPORT)))) {
federationAM.put(a.getAppId(), a);
}
}
allApps.addAll(new ArrayList<>(federationAM.values()));
return allApps;
}
|
@Test
public void testMerge2UAM() {
AppsInfo apps = new AppsInfo();
AppInfo app1 = new AppInfo();
app1.setAppId(APPID1.toString());
app1.setName(UnmanagedApplicationManager.APP_NAME);
app1.setState(YarnApplicationState.RUNNING);
apps.add(app1);
AppInfo app2 = new AppInfo();
app2.setAppId(APPID1.toString());
app2.setName(UnmanagedApplicationManager.APP_NAME);
app2.setState(YarnApplicationState.RUNNING);
apps.add(app2);
AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false);
Assert.assertNotNull(result);
Assert.assertEquals(0, result.getApps().size());
// By enabling partial result, the expected result would be a partial report
// of the 2 UAMs
AppsInfo result2 = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), true);
Assert.assertNotNull(result2);
Assert.assertEquals(1, result2.getApps().size());
Assert.assertEquals(YarnApplicationState.RUNNING,
result2.getApps().get(0).getState());
}
|
@Override
public String toString() {
return "CamelContextRouteCoverage{" +
"id='" + id + '\'' +
", exchangesTotal=" + exchangesTotal +
", totalProcessingTime=" + totalProcessingTime +
", routes=" + routes +
'}';
}
|
@Test
public void testToString() {
String toString = getInstance().toString();
assertNotNull(toString);
assertTrue(toString.contains("CamelContextRouteCoverage"));
}
|
DecodedJWT verifyJWT(PublicKey publicKey,
String publicKeyAlg,
DecodedJWT jwt) throws AuthenticationException {
if (publicKeyAlg == null) {
incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM);
throw new AuthenticationException("PublicKey algorithm cannot be null");
}
Algorithm alg;
try {
switch (publicKeyAlg) {
case ALG_RS256:
alg = Algorithm.RSA256((RSAPublicKey) publicKey, null);
break;
case ALG_RS384:
alg = Algorithm.RSA384((RSAPublicKey) publicKey, null);
break;
case ALG_RS512:
alg = Algorithm.RSA512((RSAPublicKey) publicKey, null);
break;
case ALG_ES256:
alg = Algorithm.ECDSA256((ECPublicKey) publicKey, null);
break;
case ALG_ES384:
alg = Algorithm.ECDSA384((ECPublicKey) publicKey, null);
break;
case ALG_ES512:
alg = Algorithm.ECDSA512((ECPublicKey) publicKey, null);
break;
default:
incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM);
throw new AuthenticationException("Unsupported algorithm: " + publicKeyAlg);
}
} catch (ClassCastException e) {
incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH);
throw new AuthenticationException("Expected PublicKey alg [" + publicKeyAlg + "] does match actual alg.");
}
// We verify issuer when retrieving the PublicKey, so it is not verified here.
// The claim presence requirements are based on https://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Verification verifierBuilder = JWT.require(alg)
.acceptLeeway(acceptedTimeLeewaySeconds)
.withAnyOfAudience(allowedAudiences)
.withClaimPresence(RegisteredClaims.ISSUED_AT)
.withClaimPresence(RegisteredClaims.EXPIRES_AT)
.withClaimPresence(RegisteredClaims.NOT_BEFORE)
.withClaimPresence(RegisteredClaims.SUBJECT);
if (isRoleClaimNotSubject) {
verifierBuilder = verifierBuilder.withClaimPresence(roleClaim);
}
JWTVerifier verifier = verifierBuilder.build();
try {
return verifier.verify(jwt);
} catch (TokenExpiredException e) {
incrementFailureMetric(AuthenticationExceptionCode.EXPIRED_JWT);
throw new AuthenticationException("JWT expired: " + e.getMessage());
} catch (SignatureVerificationException e) {
incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT_SIGNATURE);
throw new AuthenticationException("JWT signature verification exception: " + e.getMessage());
} catch (InvalidClaimException e) {
incrementFailureMetric(AuthenticationExceptionCode.INVALID_JWT_CLAIM);
throw new AuthenticationException("JWT contains invalid claim: " + e.getMessage());
} catch (AlgorithmMismatchException e) {
incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH);
throw new AuthenticationException("JWT algorithm does not match Public Key algorithm: " + e.getMessage());
} catch (JWTDecodeException e) {
incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
throw new AuthenticationException("Error while decoding JWT: " + e.getMessage());
} catch (JWTVerificationException | IllegalArgumentException e) {
incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT);
throw new AuthenticationException("JWT verification failed: " + e.getMessage());
}
}
|
@Test
public void testThatSupportedAlgWithMismatchedPublicKeyFromDifferentAlgFamilyFails() throws IOException {
KeyPair keyPair = Keys.keyPairFor(SignatureAlgorithm.RS256);
@Cleanup
AuthenticationProviderOpenID provider = new AuthenticationProviderOpenID();
DefaultJwtBuilder defaultJwtBuilder = new DefaultJwtBuilder();
addValidMandatoryClaims(defaultJwtBuilder, basicProviderAudience);
defaultJwtBuilder.signWith(keyPair.getPrivate());
DecodedJWT jwt = JWT.decode(defaultJwtBuilder.compact());
// Choose a different algorithm from a different alg family
Assert.assertThrows(AuthenticationException.class,
() -> provider.verifyJWT(keyPair.getPublic(), SignatureAlgorithm.ES512.getValue(), jwt));
}
|
@Override
public int configInfoTagCount() {
ConfigInfoTagMapper configInfoTagMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO_TAG);
String sql = configInfoTagMapper.count(null);
Integer result = databaseOperate.queryOne(sql, Integer.class);
if (result == null) {
throw new IllegalArgumentException("configInfoBetaCount error");
}
return result;
}
|
@Test
void testConfigInfoTagCount() {
Timestamp timestamp = new Timestamp(System.currentTimeMillis());
//mock count
Mockito.when(databaseOperate.queryOne(anyString(), eq(Integer.class))).thenReturn(308);
//execute & verify
int count = embeddedConfigInfoTagPersistService.configInfoTagCount();
assertEquals(308, count);
}
|
@Override
public Consumer createConsumer(Processor aProcessor) throws Exception {
// validate that all of the endpoint is configured properly
if (getMonitorType() != null) {
if (!isPlatformServer()) {
throw new IllegalArgumentException(ERR_PLATFORM_SERVER);
}
if (ObjectHelper.isEmpty(getObservedAttribute())) {
throw new IllegalArgumentException(ERR_OBSERVED_ATTRIBUTE);
}
if (getMonitorType().equals("string")) {
if (ObjectHelper.isEmpty(getStringToCompare())) {
throw new IllegalArgumentException(ERR_STRING_TO_COMPARE);
}
if (!isNotifyDiffer() && !isNotifyMatch()) {
throw new IllegalArgumentException(ERR_STRING_NOTIFY);
}
} else if (getMonitorType().equals("gauge")) {
if (!isNotifyHigh() && !isNotifyLow()) {
throw new IllegalArgumentException(ERR_GAUGE_NOTIFY);
}
if (getThresholdHigh() == null) {
throw new IllegalArgumentException(ERR_THRESHOLD_HIGH);
}
if (getThresholdLow() == null) {
throw new IllegalArgumentException(ERR_THRESHOLD_LOW);
}
}
JMXMonitorConsumer answer = new JMXMonitorConsumer(this, aProcessor);
configureConsumer(answer);
return answer;
} else {
// shouldn't need any other validation.
JMXConsumer answer = new JMXConsumer(this, aProcessor);
configureConsumer(answer);
return answer;
}
}
|
@Test
public void remoteServerWithMonitor() throws Exception {
JMXEndpoint ep = context.getEndpoint(
"jmx:service:jmx:rmi:///jndi/rmi://localhost:1099/jmxrmi?objectDomain=FooDomain&key.name=theObjectName&monitorType=gauge",
JMXEndpoint.class);
try {
ep.createConsumer(null);
fail("expected exception");
} catch (IllegalArgumentException e) {
assertEquals(JMXEndpoint.ERR_PLATFORM_SERVER, e.getMessage());
}
}
|
public Node deserializeObject(JsonReader reader) {
Log.info("Deserializing JSON to Node.");
JsonObject jsonObject = reader.readObject();
return deserializeObject(jsonObject);
}
|
@Test
void testNonMetaProperties() {
CompilationUnit cu = parse("public class X{} class Z{}");
String serialized = serialize(cu, false);
CompilationUnit deserialized =
(CompilationUnit) deserializer.deserializeObject(Json.createReader(new StringReader(serialized)));
assertTrue(deserialized.hasRange());
Range range = deserialized.getRange().get();
assertEquals(1, range.begin.line);
assertEquals(1, range.begin.line);
assertEquals(26, range.end.column);
assertTrue(deserialized.getTokenRange().isPresent());
TokenRange tokenRange = deserialized.getTokenRange().get();
assertEquals("public", tokenRange.getBegin().getText());
assertEquals("", tokenRange.getEnd().getText());
}
|
public static byte[] serialize(final Object body) throws IOException {
final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
final ObjectOutputStream outputStream = new ObjectOutputStream(byteArrayOutputStream);
try {
outputStream.writeObject(body);
return byteArrayOutputStream.toByteArray();
} catch (NotSerializableException exception) {
throw new RuntimeCamelException(exception);
} finally {
byteArrayOutputStream.close();
outputStream.close();
}
}
|
@Test
public void testSerialisationOnPrimitive() throws Exception {
byte[] expected = PulsarMessageUtils.serialize(10);
assertNotNull(expected);
}
|
@Override
public void pluginUnLoaded(GoPluginDescriptor pluginDescriptor) {
super.pluginUnLoaded(pluginDescriptor);
if (extension.canHandlePlugin(pluginDescriptor.id())) {
for (PluginMetadataChangeListener listener : listeners) {
listener.onPluginMetadataRemove(pluginDescriptor.id());
}
}
}
|
@Test
public void onPluginUnloaded_shouldRemoveTheCorrespondingPluginInfoFromStore() throws Exception {
GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build();
AnalyticsMetadataLoader metadataLoader = new AnalyticsMetadataLoader(pluginManager, metadataStore, infoBuilder, extension);
AnalyticsPluginInfo pluginInfo = new AnalyticsPluginInfo(descriptor, null, null, null);
metadataStore.setPluginInfo(pluginInfo);
metadataLoader.pluginUnLoaded(descriptor);
verify(metadataStore).remove(descriptor.id());
}
|
public List<Instance> generateInstancesByIps(String serviceName, String rawProductName, String clusterName,
String[] ipArray) {
if (StringUtils.isEmpty(serviceName) || StringUtils.isEmpty(clusterName) || ipArray == null
|| ipArray.length == 0) {
return Collections.emptyList();
}
List<Instance> instanceList = new ArrayList<>(ipArray.length);
for (String ip : ipArray) {
String[] ipAndPort = generateIpAndPort(ip);
Instance instance = new Instance();
instance.setIp(ipAndPort[0]);
instance.setPort(Integer.parseInt(ipAndPort[1]));
instance.setClusterName(clusterName);
instance.setServiceName(serviceName);
instance.setEphemeral(false);
instance.getMetadata().put("app", rawProductName);
instance.getMetadata().put("tenant", Constants.DEFAULT_NAMESPACE_ID);
instanceList.add(instance);
}
return instanceList;
}
|
@Test
void testGenerateInstancesByIps() {
AddressServerGeneratorManager manager = new AddressServerGeneratorManager();
final List<Instance> empty = manager.generateInstancesByIps(null, null, null, null);
assertNotNull(empty);
assertTrue(empty.isEmpty());
String[] ipArray = new String[]{"192.168.3.1:8848", "192.168.3.2:8848", "192.168.3.3:8848"};
final List<Instance> instanceList = manager.generateInstancesByIps("DEFAULT_GROUP@@nacos.as.test", "test", "test",
ipArray);
assertNotNull(instanceList);
assertFalse(instanceList.isEmpty());
assertEquals(3, instanceList.size());
final Instance instance1 = instanceList.get(0);
assertEquals("192.168.3.1", instance1.getIp());
final Instance instance2 = instanceList.get(1);
assertEquals("192.168.3.2", instance2.getIp());
final Instance instance3 = instanceList.get(2);
assertEquals("192.168.3.3", instance3.getIp());
}
|
public Set<String> makeReady(final Map<String, InternalTopicConfig> topics) {
// we will do the validation / topic-creation in a loop, until we have confirmed all topics
// have existed with the expected number of partitions, or some create topic returns fatal errors.
log.debug("Starting to validate internal topics {} in partition assignor.", topics);
long currentWallClockMs = time.milliseconds();
final long deadlineMs = currentWallClockMs + retryTimeoutMs;
Set<String> topicsNotReady = new HashSet<>(topics.keySet());
final Set<String> newlyCreatedTopics = new HashSet<>();
while (!topicsNotReady.isEmpty()) {
final Set<String> tempUnknownTopics = new HashSet<>();
topicsNotReady = validateTopics(topicsNotReady, topics, tempUnknownTopics);
newlyCreatedTopics.addAll(topicsNotReady);
if (!topicsNotReady.isEmpty()) {
final Set<NewTopic> newTopics = new HashSet<>();
for (final String topicName : topicsNotReady) {
if (tempUnknownTopics.contains(topicName)) {
// for the tempUnknownTopics, don't create topic for them
// we'll check again later if remaining retries > 0
continue;
}
final InternalTopicConfig internalTopicConfig = Objects.requireNonNull(topics.get(topicName));
final Map<String, String> topicConfig = internalTopicConfig.properties(defaultTopicConfigs, windowChangeLogAdditionalRetention);
log.debug("Going to create topic {} with {} partitions and config {}.",
internalTopicConfig.name(),
internalTopicConfig.numberOfPartitions(),
topicConfig);
newTopics.add(
new NewTopic(
internalTopicConfig.name(),
internalTopicConfig.numberOfPartitions(),
Optional.of(replicationFactor))
.configs(topicConfig));
}
// it's possible that although some topics are not ready yet because they
// are temporarily not available, not that they do not exist; in this case
// the new topics to create may be empty and hence we can skip here
if (!newTopics.isEmpty()) {
final CreateTopicsResult createTopicsResult = adminClient.createTopics(newTopics);
for (final Map.Entry<String, KafkaFuture<Void>> createTopicResult : createTopicsResult.values().entrySet()) {
final String topicName = createTopicResult.getKey();
try {
createTopicResult.getValue().get();
topicsNotReady.remove(topicName);
} catch (final InterruptedException fatalException) {
// this should not happen; if it ever happens it indicate a bug
Thread.currentThread().interrupt();
log.error(INTERRUPTED_ERROR_MESSAGE, fatalException);
throw new IllegalStateException(INTERRUPTED_ERROR_MESSAGE, fatalException);
} catch (final ExecutionException executionException) {
final Throwable cause = executionException.getCause();
if (cause instanceof TopicExistsException) {
// This topic didn't exist earlier or its leader not known before; just retain it for next round of validation.
log.info(
"Could not create topic {}. Topic is probably marked for deletion (number of partitions is unknown).\n"
+
"Will retry to create this topic in {} ms (to let broker finish async delete operation first).\n"
+
"Error message was: {}", topicName, retryBackOffMs,
cause.toString());
} else {
log.error("Unexpected error during topic creation for {}.\n" +
"Error message was: {}", topicName, cause.toString());
if (cause instanceof UnsupportedVersionException) {
final String errorMessage = cause.getMessage();
if (errorMessage != null &&
errorMessage.startsWith("Creating topics with default partitions/replication factor are only supported in CreateTopicRequest version 4+")) {
throw new StreamsException(String.format(
"Could not create topic %s, because brokers don't support configuration replication.factor=-1."
+ " You can change the replication.factor config or upgrade your brokers to version 2.4 or newer to avoid this error.",
topicName)
);
}
} else if (cause instanceof TimeoutException) {
log.error("Creating topic {} timed out.\n" +
"Error message was: {}", topicName, cause.toString());
} else {
throw new StreamsException(
String.format("Could not create topic %s.", topicName),
cause
);
}
}
}
}
}
}
if (!topicsNotReady.isEmpty()) {
currentWallClockMs = time.milliseconds();
if (currentWallClockMs >= deadlineMs) {
final String timeoutError = String.format("Could not create topics within %d milliseconds. " +
"This can happen if the Kafka cluster is temporarily not available.", retryTimeoutMs);
log.error(timeoutError);
throw new TimeoutException(timeoutError);
}
log.info(
"Topics {} could not be made ready. Will retry in {} milliseconds. Remaining time in milliseconds: {}",
topicsNotReady,
retryBackOffMs,
deadlineMs - currentWallClockMs
);
Utils.sleep(retryBackOffMs);
}
}
log.debug("Completed validating internal topics and created {}", newlyCreatedTopics);
return newlyCreatedTopics;
}
|
@Test
public void shouldNotCreateTopicIfExistsWithDifferentPartitions() {
mockAdminClient.addTopic(
false,
topic1,
new ArrayList<TopicPartitionInfo>() {
{
add(new TopicPartitionInfo(0, broker1, singleReplica, Collections.emptyList()));
add(new TopicPartitionInfo(1, broker1, singleReplica, Collections.emptyList()));
}
},
null);
try {
final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap());
internalTopicConfig.setNumberOfPartitions(1);
internalTopicManager.makeReady(Collections.singletonMap(topic1, internalTopicConfig));
fail("Should have thrown StreamsException");
} catch (final StreamsException expected) { /* pass */ }
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String dmType = typeDefine.getDataType().toUpperCase();
switch (dmType) {
case DM_BIT:
builder.sourceType(DM_BIT);
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case DM_TINYINT:
builder.sourceType(DM_TINYINT);
builder.dataType(BasicType.BYTE_TYPE);
break;
case DM_BYTE:
builder.sourceType(DM_BYTE);
builder.dataType(BasicType.BYTE_TYPE);
break;
case DM_SMALLINT:
builder.sourceType(DM_SMALLINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case DM_INT:
builder.sourceType(DM_INT);
builder.dataType(BasicType.INT_TYPE);
break;
case DM_INTEGER:
builder.sourceType(DM_INTEGER);
builder.dataType(BasicType.INT_TYPE);
break;
case DM_PLS_INTEGER:
builder.sourceType(DM_PLS_INTEGER);
builder.dataType(BasicType.INT_TYPE);
break;
case DM_BIGINT:
builder.sourceType(DM_BIGINT);
builder.dataType(BasicType.LONG_TYPE);
break;
case DM_REAL:
builder.sourceType(DM_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case DM_FLOAT:
builder.sourceType(DM_FLOAT);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DM_DOUBLE:
builder.sourceType(DM_DOUBLE);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DM_DOUBLE_PRECISION:
builder.sourceType(DM_DOUBLE_PRECISION);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DM_NUMERIC:
case DM_NUMBER:
case DM_DECIMAL:
case DM_DEC:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.sourceType(
String.format(
"%s(%s,%s)",
DM_DECIMAL, decimalType.getPrecision(), decimalType.getScale()));
builder.dataType(decimalType);
builder.columnLength((long) decimalType.getPrecision());
builder.scale(decimalType.getScale());
break;
case DM_CHAR:
case DM_CHARACTER:
builder.sourceType(String.format("%s(%s)", DM_CHAR, typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
break;
case DM_VARCHAR:
case DM_VARCHAR2:
builder.sourceType(String.format("%s(%s)", DM_VARCHAR2, typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
break;
case DM_TEXT:
builder.sourceType(DM_TEXT);
builder.dataType(BasicType.STRING_TYPE);
// dm text max length is 2147483647
builder.columnLength(typeDefine.getLength());
break;
case DM_LONG:
builder.sourceType(DM_LONG);
builder.dataType(BasicType.STRING_TYPE);
// dm long max length is 2147483647
builder.columnLength(typeDefine.getLength());
break;
case DM_LONGVARCHAR:
builder.sourceType(DM_LONGVARCHAR);
builder.dataType(BasicType.STRING_TYPE);
// dm longvarchar max length is 2147483647
builder.columnLength(typeDefine.getLength());
break;
case DM_CLOB:
builder.sourceType(DM_CLOB);
builder.dataType(BasicType.STRING_TYPE);
// dm clob max length is 2147483647
builder.columnLength(typeDefine.getLength());
break;
case DM_BINARY:
builder.sourceType(String.format("%s(%s)", DM_BINARY, typeDefine.getLength()));
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_VARBINARY:
builder.sourceType(String.format("%s(%s)", DM_VARBINARY, typeDefine.getLength()));
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_LONGVARBINARY:
builder.sourceType(DM_LONGVARBINARY);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_IMAGE:
builder.sourceType(DM_IMAGE);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_BLOB:
builder.sourceType(DM_BLOB);
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(typeDefine.getLength());
break;
case DM_BFILE:
builder.sourceType(DM_BFILE);
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(typeDefine.getLength());
break;
case DM_DATE:
builder.sourceType(DM_DATE);
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case DM_TIME:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_TIME);
} else {
builder.sourceType(String.format("%s(%s)", DM_TIME, typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case DM_TIME_WITH_TIME_ZONE:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_TIME_WITH_TIME_ZONE);
} else {
builder.sourceType(
String.format("TIME(%s) WITH TIME ZONE", typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case DM_TIMESTAMP:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_TIMESTAMP);
} else {
builder.sourceType(
String.format("%s(%s)", DM_TIMESTAMP, typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case DM_DATETIME:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_DATETIME);
} else {
builder.sourceType(String.format("%s(%s)", DM_DATETIME, typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
case DM_DATETIME_WITH_TIME_ZONE:
if (typeDefine.getScale() == null) {
builder.sourceType(DM_DATETIME_WITH_TIME_ZONE);
} else {
builder.sourceType(
String.format("DATETIME(%s) WITH TIME ZONE", typeDefine.getScale()));
}
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.DAMENG, typeDefine.getDataType(), typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertImage() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder()
.name("test")
.columnType("image")
.dataType("image")
.length(2147483647L)
.build();
Column column = DmdbTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(2147483647L, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType().toLowerCase());
}
|
@Override
public void run() {
try {
// We kill containers until the kernel reports the OOM situation resolved
// Note: If the kernel has a delay this may kill more than necessary
while (true) {
String status = cgroups.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL);
if (!status.contains(CGroupsHandler.UNDER_OOM)) {
break;
}
boolean containerKilled = killContainer();
if (!containerKilled) {
// This can happen, if SIGKILL did not clean up
// non-PGID or containers or containers launched by other users
// or if a process was put to the root YARN cgroup.
throw new YarnRuntimeException(
"Could not find any containers but CGroups " +
"reserved for containers ran out of memory. " +
"I am giving up");
}
}
} catch (ResourceHandlerException ex) {
LOG.warn("Could not fetch OOM status. " +
"This is expected at shutdown. Exiting.", ex);
}
}
|
@Test
public void testNoGuaranteedContainerOverLimitOOM() throws Exception {
ConcurrentHashMap<ContainerId, Container> containers =
new ConcurrentHashMap<>();
Container c1 = createContainer(1, true, 1L, true);
containers.put(c1.getContainerId(), c1);
Container c2 = createContainer(2, true, 2L, true);
containers.put(c2.getContainerId(), c2);
ContainerExecutor ex = createContainerExecutor(containers);
Context context = mock(Context.class);
when(context.getContainers()).thenReturn(containers);
when(context.getContainerExecutor()).thenReturn(ex);
CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class);
when(cGroupsHandler.getCGroupParam(
CGroupsHandler.CGroupController.MEMORY,
"",
CGROUP_PARAM_MEMORY_OOM_CONTROL))
.thenReturn("under_oom 1").thenReturn("under_oom 0");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1234").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(9));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(9));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PROCS_FILE))
.thenReturn("1235").thenReturn("");
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES))
.thenReturn(getMB(9));
when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY,
c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES))
.thenReturn(getMB(9));
DefaultOOMHandler handler =
new DefaultOOMHandler(context, false) {
@Override
protected CGroupsHandler getCGroupsHandler() {
return cGroupsHandler;
}
};
handler.run();
verify(ex, times(1)).signalContainer(
new ContainerSignalContext.Builder()
.setPid("1235")
.setContainer(c2)
.setSignal(ContainerExecutor.Signal.KILL)
.build()
);
verify(ex, times(1)).signalContainer(any());
}
|
public DropSourceCommand create(final DropStream statement) {
return create(
statement.getName(),
statement.getIfExists(),
statement.isDeleteTopic(),
DataSourceType.KSTREAM
);
}
|
@Test
public void shouldFailDeleteTopicForSourceStream() {
// Given:
final DropStream dropStream = new DropStream(SOME_NAME, false, true);
when(ksqlStream.isSource()).thenReturn(true);
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> dropSourceFactory.create(dropStream)
);
// Then:
assertThat(e.getMessage(), containsString("Cannot delete topic for read-only source: bob"));
}
|
public ColumnMeta getColumnMeta(String colName) {
return allColumns.get(colName);
}
|
@Test
public void testGetColumnMeta() {
ColumnMeta columnMeta = new ColumnMeta();
tableMeta.getAllColumns().put("col1", columnMeta);
assertEquals(columnMeta, tableMeta.getColumnMeta("col1"), "Should return the correct ColumnMeta object");
}
|
public static AuthorizationDoc fromDto(IndexType indexType, IndexPermissions dto) {
AuthorizationDoc res = new AuthorizationDoc(indexType, dto.getEntityUuid());
if (dto.isAllowAnyone()) {
return res.setAllowAnyone();
}
return res.setRestricted(dto.getGroupUuids(), dto.getUserUuids());
}
|
@Test
@UseDataProvider("dtos")
public void getRouting_returns_projectUuid(IndexPermissions dto) {
AuthorizationDoc underTest = AuthorizationDoc.fromDto(IndexType.main(Index.simple("foo"), "bar"), dto);
assertThat(underTest.getRouting()).contains(dto.getEntityUuid());
}
|
public static Object project(Schema source, Object record, Schema target) throws SchemaProjectorException {
checkMaybeCompatible(source, target);
if (source.isOptional() && !target.isOptional()) {
if (target.defaultValue() != null) {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return target.defaultValue();
}
} else {
throw new SchemaProjectorException("Writer schema is optional, however, target schema does not provide a default value.");
}
} else {
if (record != null) {
return projectRequiredSchema(source, record, target);
} else {
return null;
}
}
}
|
@Test
public void testStructDefaultValue() {
Schema source = SchemaBuilder.struct().optional()
.field("field", Schema.INT32_SCHEMA)
.field("field2", Schema.INT32_SCHEMA)
.build();
SchemaBuilder builder = SchemaBuilder.struct()
.field("field", Schema.INT32_SCHEMA)
.field("field2", Schema.INT32_SCHEMA);
Struct defaultStruct = new Struct(builder).put("field", 12).put("field2", 345);
builder.defaultValue(defaultStruct);
Schema target = builder.build();
Object projected = SchemaProjector.project(source, null, target);
assertEquals(defaultStruct, projected);
Struct sourceStruct = new Struct(source).put("field", 45).put("field2", 678);
Struct targetStruct = (Struct) SchemaProjector.project(source, sourceStruct, target);
assertEquals(sourceStruct.get("field"), targetStruct.get("field"));
assertEquals(sourceStruct.get("field2"), targetStruct.get("field2"));
}
|
Number applyCastInteger(double predictionDouble) {
return targetField.getCastInteger() != null ? targetField.getCastInteger().getScaledValue(predictionDouble) :
predictionDouble;
}
|
@Test
void applyCastInteger() {
TargetField targetField = new TargetField(Collections.emptyList(), null, "string", null, null, null, null,
null);
KiePMMLTarget kiePMMLTarget = getBuilder(targetField).build();
assertThat((double) kiePMMLTarget.applyCastInteger(2.718)).isCloseTo(2.718, Offset.offset(0.0));
targetField = new TargetField(Collections.emptyList(), null, "string", CAST_INTEGER.ROUND, null, null, null,
null);
kiePMMLTarget = getBuilder(targetField).build();
assertThat((double) kiePMMLTarget.applyCastInteger(2.718)).isCloseTo(3.0, Offset.offset(0.0));
}
|
public void registerNewEntity(final String id, final User user, final GRNType grnType) {
final GRN grn = grnRegistry.newGRN(grnType, id);
registerNewEntity(grn, user);
}
|
@Test
void registersNewEntityForEachType() {
final User mockUser = mock(User.class);
when(mockUser.getName()).thenReturn("mockuser");
when(mockUser.getId()).thenReturn("mockuser");
final String id = "1234";
for (GRNType type : GRNTypes.builtinTypes()) {
entityOwnershipService.registerNewEntity(id, mockUser, type);
ArgumentCaptor<GrantDTO> grant = ArgumentCaptor.forClass(GrantDTO.class);
ArgumentCaptor<User> user = ArgumentCaptor.forClass(User.class);
grnRegistryInOrderVerification.verify(dbGrantService).create(grant.capture(), user.capture());
assertThat(grant.getValue()).satisfies(g -> {
assertThat(g.capability()).isEqualTo(Capability.OWN);
assertThat(g.target().type()).isEqualTo(type.type());
assertThat(g.target().entity()).isEqualTo(id);
assertThat(g.grantee().type()).isEqualTo(GRNTypes.USER.type());
assertThat(g.grantee().entity()).isEqualTo("mockuser");
});
}
}
|
public static Set<PathSpec> getPresentPaths(MaskTree filter, Set<PathSpec> paths)
{
// this emulates the behavior of Rest.li server
// if client does not specify any mask, the server receives null when retrieving the MaskTree
// in this case, all fields are returned
if (filter == null)
{
return paths;
}
final DataMap filterMap = filter.getDataMap();
if (filter.getDataMap().isEmpty())
{
return Collections.emptySet();
}
final DataMap pathSpecMap = createPathSpecMap(paths);
@SuppressWarnings("unchecked")
final DataMap filteredPathSpecs = (DataMap) new PathSpecFilter().filter(pathSpecMap, filterMap);
return validate(filteredPathSpecs, paths);
}
|
@Test
public void testPositiveMultiPaths()
{
final MaskTree filter = new MaskTree();
filter.addOperation(new PathSpec("foo", "bar", "baz"), MaskOperation.POSITIVE_MASK_OP);
final Collection<PathSpec> positivePaths = new HashSet<>(Arrays.asList(
new PathSpec("foo"),
new PathSpec("foo", "bar"),
new PathSpec("foo", "bar", "baz"),
new PathSpec("foo", "bar", "baz", "xyz"),
new PathSpec("foo", "bar", "baz", "abc", "xyz")
));
final Collection<PathSpec> negativePaths = new HashSet<>(Arrays.asList(
new PathSpec("xyz"),
new PathSpec("foo", "baz"),
new PathSpec("foo", "xyz"),
new PathSpec("foo", "bar", "xyz")
));
// test false positive
final Set<PathSpec> positiveResult = ProjectionUtil.getPresentPaths(filter, new HashSet<>(positivePaths));
Assert.assertEquals(positiveResult, positivePaths);
// test false negative
final Set<PathSpec> negativeResult = ProjectionUtil.getPresentPaths(filter, new HashSet<>(negativePaths));
Assert.assertTrue(negativeResult.isEmpty());
final Set<PathSpec> combinedPaths = new HashSet<>(positivePaths);
combinedPaths.addAll(negativePaths);
// combine both to test internal ordering, overwrites, etc.
final Set<PathSpec> combinedResult = ProjectionUtil.getPresentPaths(filter, combinedPaths);
Assert.assertEquals(combinedResult, new HashSet<>(positivePaths));
for (PathSpec p : negativePaths)
{
Assert.assertFalse(combinedResult.contains(p));
}
}
|
public MemorySize divide(long by) {
if (by < 0) {
throw new IllegalArgumentException("divisor must be != 0");
}
return new MemorySize(bytes / by);
}
|
@Test
void testDivideByLong() {
final MemorySize memory = new MemorySize(100L);
assertThat(memory.divide(23)).isEqualTo(new MemorySize(4L));
}
|
public static String decodeName(ByteBuf in) {
return DnsCodecUtil.decodeDomainName(in);
}
|
@Test
public void testDecodeName() {
testDecodeName("netty.io.", Unpooled.wrappedBuffer(new byte[] {
5, 'n', 'e', 't', 't', 'y', 2, 'i', 'o', 0
}));
}
|
public static MetricsReporter combine(MetricsReporter first, MetricsReporter second) {
if (null == first) {
return second;
} else if (null == second || first == second) {
return first;
}
Set<MetricsReporter> reporters = Sets.newIdentityHashSet();
if (first instanceof CompositeMetricsReporter) {
reporters.addAll(((CompositeMetricsReporter) first).reporters());
} else {
reporters.add(first);
}
if (second instanceof CompositeMetricsReporter) {
reporters.addAll(((CompositeMetricsReporter) second).reporters());
} else {
reporters.add(second);
}
return new CompositeMetricsReporter(reporters);
}
|
@Test
public void combineSameClassButDifferentInstances() {
MetricsReporter first = LoggingMetricsReporter.instance();
MetricsReporter second = new LoggingMetricsReporter();
MetricsReporter combined = MetricsReporters.combine(first, second);
assertThat(combined).isInstanceOf(MetricsReporters.CompositeMetricsReporter.class);
assertThat(((MetricsReporters.CompositeMetricsReporter) combined).reporters())
.hasSize(2)
.containsExactlyInAnyOrder(first, second);
}
|
@Override
public Collection<SQLToken> generateSQLTokens(final InsertStatementContext insertStatementContext) {
Optional<InsertColumnsSegment> insertColumnsSegment = insertStatementContext.getSqlStatement().getInsertColumns();
Preconditions.checkState(insertColumnsSegment.isPresent());
Collection<ColumnSegment> insertColumns = insertColumnsSegment.get().getColumns();
if (null != insertStatementContext.getInsertSelectContext()) {
Collection<Projection> projections = insertStatementContext.getInsertSelectContext().getSelectStatementContext().getProjectionsContext().getExpandProjections();
ShardingSpherePreconditions.checkState(insertColumns.size() == projections.size(), () -> new UnsupportedSQLOperationException("Column count doesn't match value count."));
ShardingSpherePreconditions.checkState(InsertSelectColumnsEncryptorComparator.isSame(insertColumns, projections, encryptRule),
() -> new UnsupportedSQLOperationException("Can not use different encryptor in insert select columns"));
}
EncryptTable encryptTable = encryptRule.getEncryptTable(insertStatementContext.getSqlStatement().getTable().getTableName().getIdentifier().getValue());
Collection<SQLToken> result = new LinkedList<>();
for (ColumnSegment each : insertColumns) {
String columnName = each.getIdentifier().getValue();
if (encryptTable.isEncryptColumn(columnName)) {
Collection<Projection> projections =
Collections.singleton(new ColumnProjection(null, encryptTable.getEncryptColumn(columnName).getCipher().getName(), null, insertStatementContext.getDatabaseType()));
result.add(new SubstitutableColumnNameToken(each.getStartIndex(), each.getStopIndex(), projections, insertStatementContext.getDatabaseType()));
}
}
return result;
}
|
@Test
void assertGenerateSQLTokensWithInsertStatementContext() {
assertThat(generator.generateSQLTokens(EncryptGeneratorFixtureBuilder.createInsertStatementContext(Collections.emptyList())).size(), is(1));
}
|
static int getMaskCharacter(final String mask) {
if (mask == null) {
return NO_MASK;
}
if (mask.length() != 1) {
throw new KsqlException("Invalid mask character. "
+ "Must be only single character, but was '" + mask + "'");
}
return mask.codePointAt(0);
}
|
@Test(expected = KsqlException.class)
public void shoudThrowOnEmptyMask() {
Masker.getMaskCharacter("");
}
|
@Override
public Stream<Pair<String, CompactionOperation>> getPendingCompactionOperations() {
return execute(preferredView::getPendingCompactionOperations, () -> getSecondaryView().getPendingCompactionOperations());
}
|
@Test
public void testGetPendingCompactionOperations() {
Stream<Pair<String, CompactionOperation>> actual;
Stream<Pair<String, CompactionOperation>> expected = Collections.singleton(
(Pair<String, CompactionOperation>) new ImmutablePair<>("test", new CompactionOperation()))
.stream();
when(primary.getPendingCompactionOperations()).thenReturn(expected);
actual = fsView.getPendingCompactionOperations();
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getPendingCompactionOperations()).thenThrow(new RuntimeException());
when(secondary.getPendingCompactionOperations()).thenReturn(expected);
actual = fsView.getPendingCompactionOperations();
assertEquals(expected, actual);
resetMocks();
when(secondary.getPendingCompactionOperations()).thenReturn(expected);
actual = fsView.getPendingCompactionOperations();
assertEquals(expected, actual);
resetMocks();
when(secondary.getPendingCompactionOperations()).thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getPendingCompactionOperations();
});
}
|
public static boolean isCreditCode(CharSequence creditCode) {
if (false == isCreditCodeSimple(creditCode)) {
return false;
}
final int parityBit = getParityBit(creditCode);
if (parityBit < 0) {
return false;
}
return creditCode.charAt(17) == BASE_CODE_ARRAY[parityBit];
}
|
@Test
public void isCreditCode2() {
// 由于早期部分试点地区推行 法人和其他组织统一社会信用代码 较早,会存在部分代码不符合国家标准的情况。
// 见:https://github.com/bluesky335/IDCheck
String testCreditCode = "91350211M00013FA1N";
assertFalse(CreditCodeUtil.isCreditCode(testCreditCode));
}
|
public String process(final Expression expression) {
return formatExpression(expression);
}
|
@Test
public void shouldGenerateCorrectCodeForTimestampDateGT() {
// Given:
final ComparisonExpression compExp = new ComparisonExpression(
Type.GREATER_THAN,
TIMESTAMPCOL,
DATECOL
);
// When:
final String java = sqlToJavaVisitor.process(compExp);
// Then:
assertThat(java, containsString("(((java.sql.Timestamp) arguments.get(\"COL10\")).compareTo(((java.sql.Date) arguments.get(\"COL13\"))) > 0)"));
}
|
@Override
public void metricChange(final KafkaMetric metric) {
if (!THROUGHPUT_METRIC_NAMES.contains(metric.metricName().name())
|| !StreamsMetricsImpl.TOPIC_LEVEL_GROUP.equals(metric.metricName().group())) {
return;
}
addMetric(
metric,
getQueryId(metric),
getTopic(metric)
);
}
|
@Test
public void shouldAggregateMetricsByQueryIdForTransientQueries() {
// Given:
final Map<String, String> transientQueryTags = ImmutableMap.of(
"logical_cluster_id", "lksqlc-12345",
"query-id", "blahblah_4",
"member", TRANSIENT_THREAD_ID,
"topic", TOPIC_NAME
);
listener.metricChange(mockMetric(
BYTES_CONSUMED_TOTAL,
2D,
ImmutableMap.of(
"thread-id", TRANSIENT_THREAD_ID,
"task-id", TASK_ID_1,
"processor-node-id", PROCESSOR_NODE_ID,
"topic", TOPIC_NAME))
);
Measurable bytesConsumed = verifyAndGetMetric(BYTES_CONSUMED_TOTAL, transientQueryTags);
Object bytesConsumedValue =
bytesConsumed.measure(new MetricConfig().tags(transientQueryTags), 0L);
assertThat(bytesConsumedValue, equalTo(2D));
// When:
listener.metricChange(mockMetric(
BYTES_CONSUMED_TOTAL,
15D,
ImmutableMap.of(
"thread-id", TRANSIENT_THREAD_ID,
"task-id", TASK_ID_2,
"processor-node-id", PROCESSOR_NODE_ID,
"topic", TOPIC_NAME
))
);
// Then:
bytesConsumed = verifyAndGetMetric(BYTES_CONSUMED_TOTAL, transientQueryTags);
bytesConsumedValue = bytesConsumed.measure(new MetricConfig().tags(transientQueryTags), 0L);
assertThat(bytesConsumedValue, equalTo(17D));
}
|
public static CompositeEvictionChecker newCompositeEvictionChecker(CompositionOperator compositionOperator,
EvictionChecker... evictionCheckers) {
Preconditions.isNotNull(compositionOperator, "composition");
Preconditions.isNotNull(evictionCheckers, "evictionCheckers");
if (evictionCheckers.length == 0) {
throw new IllegalArgumentException("EvictionCheckers cannot be empty!");
}
switch (compositionOperator) {
case AND:
return new CompositeEvictionCheckerWithAndComposition(evictionCheckers);
case OR:
return new CompositeEvictionCheckerWithOrComposition(evictionCheckers);
default:
throw new IllegalArgumentException("Invalid composition operator: " + compositionOperator);
}
}
|
@Test
public void resultShouldReturnFalse_whenAllIsFalse_withAndCompositionOperator() {
EvictionChecker evictionChecker1ReturnsFalse = mock(EvictionChecker.class);
EvictionChecker evictionChecker2ReturnsFalse = mock(EvictionChecker.class);
when(evictionChecker1ReturnsFalse.isEvictionRequired()).thenReturn(false);
when(evictionChecker2ReturnsFalse.isEvictionRequired()).thenReturn(false);
CompositeEvictionChecker compositeEvictionChecker =
CompositeEvictionChecker.newCompositeEvictionChecker(
CompositeEvictionChecker.CompositionOperator.AND, evictionChecker1ReturnsFalse,
evictionChecker2ReturnsFalse);
assertFalse(compositeEvictionChecker.isEvictionRequired());
}
|
List<Condition> run(boolean useKRaft) {
List<Condition> warnings = new ArrayList<>();
checkKafkaReplicationConfig(warnings);
checkKafkaBrokersStorage(warnings);
if (useKRaft) {
// Additional checks done for KRaft clusters
checkKRaftControllerStorage(warnings);
checkKRaftControllerCount(warnings);
checkKafkaMetadataVersion(warnings);
checkInterBrokerProtocolVersionInKRaft(warnings);
checkLogMessageFormatVersionInKRaft(warnings);
} else {
// Additional checks done for ZooKeeper-based clusters
checkKafkaLogMessageFormatVersion(warnings);
checkKafkaInterBrokerProtocolVersion(warnings);
checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings);
}
return warnings;
}
|
@Test
public void testUnusedConfigInKRaftBasedClusters() {
Kafka kafka = new KafkaBuilder(KAFKA)
.editSpec()
.editKafka()
.addToConfig(Map.of(
"inter.broker.protocol.version", "3.5",
"log.message.format.version", "3.5"
))
.endKafka()
.endSpec()
.build();
KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE);
List<Condition> warnings = checker.run(true);
assertThat(warnings, hasSize(2));
assertThat(warnings.get(0).getReason(), is("KafkaInterBrokerProtocolVersionInKRaft"));
assertThat(warnings.get(0).getMessage(), is("inter.broker.protocol.version is not used in KRaft-based Kafka clusters and should be removed from the Kafka custom resource."));
assertThat(warnings.get(1).getReason(), is("KafkaLogMessageFormatVersionInKRaft"));
assertThat(warnings.get(1).getMessage(), is("log.message.format.version is not used in KRaft-based Kafka clusters and should be removed from the Kafka custom resource."));
}
|
@Override
public Object read(final PostgreSQLPacketPayload payload, final int parameterValueLength) {
return payload.getByteBuf().readDouble();
}
|
@Test
void assertRead() {
when(byteBuf.readDouble()).thenReturn(1D);
assertThat(new PostgreSQLDoubleBinaryProtocolValue().read(new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8), 8), is(1D));
}
|
@Override
public Object getValue(final int columnIndex, final Class<?> type) throws SQLException {
if (boolean.class == type) {
return resultSet.getBoolean(columnIndex);
}
if (byte.class == type) {
return resultSet.getByte(columnIndex);
}
if (short.class == type) {
return resultSet.getShort(columnIndex);
}
if (int.class == type) {
return resultSet.getInt(columnIndex);
}
if (long.class == type) {
return resultSet.getLong(columnIndex);
}
if (float.class == type) {
return resultSet.getFloat(columnIndex);
}
if (double.class == type) {
return resultSet.getDouble(columnIndex);
}
if (String.class == type) {
return resultSet.getString(columnIndex);
}
if (BigDecimal.class == type) {
return resultSet.getBigDecimal(columnIndex);
}
if (byte[].class == type) {
return resultSet.getBytes(columnIndex);
}
if (Date.class == type) {
return resultSet.getDate(columnIndex);
}
if (Time.class == type) {
return resultSet.getTime(columnIndex);
}
if (Timestamp.class == type) {
return resultSet.getTimestamp(columnIndex);
}
if (Blob.class == type) {
return resultSet.getBlob(columnIndex);
}
if (Clob.class == type) {
return resultSet.getClob(columnIndex);
}
if (Array.class == type) {
return resultSet.getArray(columnIndex);
}
return resultSet.getObject(columnIndex);
}
|
@Test
void assertGetValueByDouble() throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.getDouble(1)).thenReturn(1.0D);
assertThat(new JDBCStreamQueryResult(resultSet).getValue(1, double.class), is(1.0D));
}
|
public static void info(final Logger logger, final String format, final Supplier<Object> supplier) {
if (logger.isInfoEnabled()) {
logger.info(format, supplier.get());
}
}
|
@Test
public void testNeverInfo() {
when(logger.isInfoEnabled()).thenReturn(false);
LogUtils.info(logger, supplier);
verify(supplier, never()).get();
}
|
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String db2Type = typeDefine.getDataType().toUpperCase();
switch (db2Type) {
case DB2_BOOLEAN:
builder.sourceType(DB2_BOOLEAN);
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case DB2_SMALLINT:
builder.sourceType(DB2_SMALLINT);
builder.dataType(BasicType.SHORT_TYPE);
break;
case DB2_INT:
case DB2_INTEGER:
builder.sourceType(DB2_INT);
builder.dataType(BasicType.INT_TYPE);
break;
case DB2_BIGINT:
builder.sourceType(DB2_BIGINT);
builder.dataType(BasicType.LONG_TYPE);
break;
case DB2_REAL:
builder.sourceType(DB2_REAL);
builder.dataType(BasicType.FLOAT_TYPE);
break;
case DB2_DOUBLE:
builder.sourceType(DB2_DOUBLE);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DB2_DECFLOAT:
builder.sourceType(DB2_DECFLOAT);
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case DB2_DECIMAL:
builder.sourceType(
String.format(
"%s(%s,%s)",
DB2_DECIMAL, typeDefine.getPrecision(), typeDefine.getScale()));
builder.dataType(
new DecimalType(
Math.toIntExact(typeDefine.getPrecision()), typeDefine.getScale()));
builder.columnLength(typeDefine.getPrecision());
builder.scale(typeDefine.getScale());
break;
case DB2_CHARACTER:
case DB2_CHAR:
builder.sourceType(String.format("%s(%d)", DB2_CHAR, typeDefine.getLength()));
// For char/varchar this length is in bytes
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_VARCHAR:
builder.sourceType(String.format("%s(%d)", DB2_VARCHAR, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_LONG_VARCHAR:
builder.sourceType(DB2_LONG_VARCHAR);
// default length is 32700
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_CLOB:
builder.sourceType(String.format("%s(%d)", DB2_CLOB, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_GRAPHIC:
builder.sourceType(String.format("%s(%d)", DB2_GRAPHIC, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_VARGRAPHIC:
builder.sourceType(String.format("%s(%d)", DB2_VARGRAPHIC, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_DBCLOB:
builder.sourceType(String.format("%s(%d)", DB2_DBCLOB, typeDefine.getLength()));
builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength()));
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_XML:
builder.sourceType(DB2_XML);
builder.columnLength((long) Integer.MAX_VALUE);
builder.dataType(BasicType.STRING_TYPE);
break;
case DB2_BINARY:
builder.sourceType(String.format("%s(%d)", DB2_BINARY, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_VARBINARY:
builder.sourceType(String.format("%s(%d)", DB2_VARBINARY, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_BLOB:
builder.sourceType(String.format("%s(%d)", DB2_BLOB, typeDefine.getLength()));
builder.columnLength(typeDefine.getLength());
builder.dataType(PrimitiveByteArrayType.INSTANCE);
break;
case DB2_DATE:
builder.sourceType(DB2_DATE);
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case DB2_TIME:
builder.sourceType(DB2_TIME);
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
break;
case DB2_TIMESTAMP:
builder.sourceType(String.format("%s(%d)", DB2_TIMESTAMP, typeDefine.getScale()));
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
builder.scale(typeDefine.getScale());
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.DB_2, db2Type, typeDefine.getName());
}
return builder.build();
}
|
@Test
public void testConvertDate() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("DATE").dataType("DATE").build();
Column column = DB2TypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TYPE, column.getDataType());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
}
|
public static void addConfiguredSecurityProviders(Map<String, ?> configs) {
String securityProviderClassesStr = (String) configs.get(SecurityConfig.SECURITY_PROVIDERS_CONFIG);
if (securityProviderClassesStr == null || securityProviderClassesStr.isEmpty()) {
return;
}
try {
String[] securityProviderClasses = securityProviderClassesStr.replaceAll("\\s+", "").split(",");
for (int index = 0; index < securityProviderClasses.length; index++) {
SecurityProviderCreator securityProviderCreator =
(SecurityProviderCreator) Class.forName(securityProviderClasses[index]).getConstructor().newInstance();
securityProviderCreator.configure(configs);
Security.insertProviderAt(securityProviderCreator.getProvider(), index + 1);
}
} catch (ClassCastException e) {
LOGGER.error("Creators provided through " + SecurityConfig.SECURITY_PROVIDERS_CONFIG +
" are expected to be sub-classes of SecurityProviderCreator");
} catch (ClassNotFoundException cnfe) {
LOGGER.error("Unrecognized security provider creator class", cnfe);
} catch (ReflectiveOperationException e) {
LOGGER.error("Unexpected implementation of security provider creator class", e);
}
}
|
@Test
public void testAddCustomSecurityProvider() {
String customProviderClasses = testScramSaslServerProviderCreator.getClass().getName() + "," +
testPlainSaslServerProviderCreator.getClass().getName();
Map<String, String> configs = new HashMap<>();
configs.put(SecurityConfig.SECURITY_PROVIDERS_CONFIG, customProviderClasses);
SecurityUtils.addConfiguredSecurityProviders(configs);
Provider[] providers = Security.getProviders();
int testScramSaslServerProviderIndex = getProviderIndexFromName(testScramSaslServerProvider.getName(), providers);
int testPlainSaslServerProviderIndex = getProviderIndexFromName(testPlainSaslServerProvider.getName(), providers);
assertEquals(0, testScramSaslServerProviderIndex,
testScramSaslServerProvider.getName() + " testProvider not found at expected index");
assertEquals(1, testPlainSaslServerProviderIndex,
testPlainSaslServerProvider.getName() + " testProvider not found at expected index");
}
|
@VisibleForTesting
void validateDictTypeNameUnique(Long id, String name) {
DictTypeDO dictType = dictTypeMapper.selectByName(name);
if (dictType == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的字典类型
if (id == null) {
throw exception(DICT_TYPE_NAME_DUPLICATE);
}
if (!dictType.getId().equals(id)) {
throw exception(DICT_TYPE_NAME_DUPLICATE);
}
}
|
@Test
public void testValidateDictTypeNameUnique_nameDuplicateForCreate() {
// 准备参数
String name = randomString();
// mock 数据
dictTypeMapper.insert(randomDictTypeDO(o -> o.setName(name)));
// 调用,校验异常
assertServiceException(() -> dictTypeService.validateDictTypeNameUnique(null, name),
DICT_TYPE_NAME_DUPLICATE);
}
|
@VisibleForTesting
boolean parseArguments(String[] args) throws IOException {
Options opts = new Options();
opts.addOption(Option.builder("h").build());
opts.addOption(Option.builder("help").build());
opts.addOption(Option.builder("input")
.desc("Input class path. Defaults to the default classpath.")
.hasArg().build());
opts.addOption(Option.builder("whitelist")
.desc(
"Regex specifying the full path of jars to include in the" +
" framework tarball. Default is a hardcoded set of jars" +
" considered necessary to include")
.hasArg().build());
opts.addOption(Option.builder("blacklist")
.desc(
"Regex specifying the full path of jars to exclude in the" +
" framework tarball. Default is a hardcoded set of jars" +
" considered unnecessary to include")
.hasArg().build());
opts.addOption(Option.builder("fs")
.desc(
"Target file system to upload to." +
" Example: hdfs://foo.com:8020")
.hasArg().build());
opts.addOption(Option.builder("target")
.desc(
"Target file to upload to with a reference name." +
" Example: /usr/mr-framework.tar.gz#mr-framework")
.hasArg().build());
opts.addOption(Option.builder("initialReplication")
.desc(
"Desired initial replication count. Default 3.")
.hasArg().build());
opts.addOption(Option.builder("finalReplication")
.desc(
"Desired final replication count. Default 10.")
.hasArg().build());
opts.addOption(Option.builder("acceptableReplication")
.desc(
"Desired acceptable replication count. Default 9.")
.hasArg().build());
opts.addOption(Option.builder("timeout")
.desc(
"Desired timeout for the acceptable" +
" replication in seconds. Default 10")
.hasArg().build());
opts.addOption(Option.builder("nosymlink")
.desc("Ignore symlinks into the same directory")
.build());
GenericOptionsParser parser = new GenericOptionsParser(opts, args);
if (parser.getCommandLine().hasOption("help") ||
parser.getCommandLine().hasOption("h")) {
printHelp(opts);
return false;
}
input = parser.getCommandLine().getOptionValue(
"input", System.getProperty("java.class.path"));
whitelist = parser.getCommandLine().getOptionValue(
"whitelist", DefaultJars.DEFAULT_MR_JARS);
blacklist = parser.getCommandLine().getOptionValue(
"blacklist", DefaultJars.DEFAULT_EXCLUDED_MR_JARS);
initialReplication =
Short.parseShort(parser.getCommandLine().getOptionValue(
"initialReplication", "3"));
finalReplication =
Short.parseShort(parser.getCommandLine().getOptionValue(
"finalReplication", "10"));
acceptableReplication =
Short.parseShort(
parser.getCommandLine().getOptionValue(
"acceptableReplication", "9"));
timeout =
Integer.parseInt(
parser.getCommandLine().getOptionValue("timeout", "10"));
if (parser.getCommandLine().hasOption("nosymlink")) {
ignoreSymlink = true;
}
String fs = parser.getCommandLine()
.getOptionValue("fs", null);
String path = parser.getCommandLine().getOptionValue("target",
"/usr/lib/mr-framework.tar.gz#mr-framework");
boolean isFullPath =
path.startsWith("hdfs://") ||
path.startsWith("file://");
if (fs == null) {
fs = conf.getTrimmed(FS_DEFAULT_NAME_KEY);
if (fs == null && !isFullPath) {
LOG.error("No filesystem specified in either fs or target.");
printHelp(opts);
return false;
} else {
LOG.info(String.format(
"Target file system not specified. Using default %s", fs));
}
}
if (path.isEmpty()) {
LOG.error("Target directory not specified");
printHelp(opts);
return false;
}
StringBuilder absolutePath = new StringBuilder();
if (!isFullPath) {
absolutePath.append(fs);
absolutePath.append(path.startsWith("/") ? "" : "/");
}
absolutePath.append(path);
target = absolutePath.toString();
if (parser.getRemainingArgs().length > 0) {
LOG.warn("Unexpected parameters");
printHelp(opts);
return false;
}
return true;
}
|
@Test
void testHelp() throws IOException {
String[] args = new String[]{"-help"};
FrameworkUploader uploader = new FrameworkUploader();
boolean success = uploader.parseArguments(args);
assertFalse(success, "Expected to print help");
assertThat(uploader.input)
.withFailMessage("Expected ignore run")
.isNull();
assertThat(uploader.whitelist)
.withFailMessage("Expected ignore run")
.isNull();
assertThat(uploader.target)
.withFailMessage("Expected ignore run")
.isNull();
}
|
@Override
public Long createArticleCategory(ArticleCategoryCreateReqVO createReqVO) {
// 插入
ArticleCategoryDO category = ArticleCategoryConvert.INSTANCE.convert(createReqVO);
articleCategoryMapper.insert(category);
// 返回
return category.getId();
}
|
@Test
public void testCreateArticleCategory_success() {
// 准备参数
ArticleCategoryCreateReqVO reqVO = randomPojo(ArticleCategoryCreateReqVO.class);
// 调用
Long articleCategoryId = articleCategoryService.createArticleCategory(reqVO);
// 断言
assertNotNull(articleCategoryId);
// 校验记录的属性是否正确
ArticleCategoryDO articleCategory = articleCategoryMapper.selectById(articleCategoryId);
assertPojoEquals(reqVO, articleCategory);
}
|
public boolean acceptsXml( String nodeName ) {
if ( "transformation".equals( nodeName ) ) {
return true;
}
return false;
}
|
@Test
public void testAcceptsXml() throws Exception {
assertFalse( transFileListener.acceptsXml( null ) );
assertFalse( transFileListener.acceptsXml( "" ) );
assertFalse( transFileListener.acceptsXml( "Transformation" ) );
assertTrue( transFileListener.acceptsXml( "transformation" ) );
}
|
void removePartitionEpochs(
Map<Uuid, Set<Integer>> assignment,
int expectedEpoch
) {
assignment.forEach((topicId, assignedPartitions) -> {
currentPartitionEpoch.compute(topicId, (__, partitionsOrNull) -> {
if (partitionsOrNull != null) {
assignedPartitions.forEach(partitionId -> {
Integer prevValue = partitionsOrNull.remove(partitionId);
if (prevValue != expectedEpoch) {
throw new IllegalStateException(
String.format("Cannot remove the epoch %d from %s-%s because the partition is " +
"still owned at a different epoch %d", expectedEpoch, topicId, partitionId, prevValue));
}
});
if (partitionsOrNull.isEmpty()) {
return null;
} else {
return partitionsOrNull;
}
} else {
throw new IllegalStateException(
String.format("Cannot remove the epoch %d from %s because it does not have any epoch",
expectedEpoch, topicId));
}
});
});
}
|
@Test
public void testRemovePartitionEpochs() {
Uuid fooTopicId = Uuid.randomUuid();
ConsumerGroup consumerGroup = createConsumerGroup("foo");
// Removing should fail because there is no epoch set.
assertThrows(IllegalStateException.class, () -> consumerGroup.removePartitionEpochs(
mkAssignment(
mkTopicAssignment(fooTopicId, 1)
),
10
));
ConsumerGroupMember m1 = new ConsumerGroupMember.Builder("m1")
.setMemberEpoch(10)
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 1)))
.build();
consumerGroup.updateMember(m1);
// Removing should fail because the expected epoch is incorrect.
assertThrows(IllegalStateException.class, () -> consumerGroup.removePartitionEpochs(
mkAssignment(
mkTopicAssignment(fooTopicId, 1)
),
11
));
}
|
@Override
public DataflowPipelineJob run(Pipeline pipeline) {
// Multi-language pipelines and pipelines that include upgrades should automatically be upgraded
// to Runner v2.
if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_runner_v2")) {
LOG.info(
"Automatically enabling Dataflow Runner v2 since the pipeline used cross-language"
+ " transforms or pipeline needed a transform upgrade.");
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build());
}
}
if (useUnifiedWorker(options)) {
if (hasExperiment(options, "disable_runner_v2")
|| hasExperiment(options, "disable_runner_v2_until_2023")
|| hasExperiment(options, "disable_prime_runner_v2")) {
throw new IllegalArgumentException(
"Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set.");
}
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("use_runner_v2")) {
experiments.add("use_runner_v2");
}
if (!experiments.contains("use_unified_worker")) {
experiments.add("use_unified_worker");
}
if (!experiments.contains("beam_fn_api")) {
experiments.add("beam_fn_api");
}
if (!experiments.contains("use_portable_job_submission")) {
experiments.add("use_portable_job_submission");
}
options.setExperiments(ImmutableList.copyOf(experiments));
}
logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline);
logWarningIfBigqueryDLQUnused(pipeline);
if (shouldActAsStreaming(pipeline)) {
options.setStreaming(true);
if (useUnifiedWorker(options)) {
options.setEnableStreamingEngine(true);
List<String> experiments =
new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true
if (!experiments.contains("enable_streaming_engine")) {
experiments.add("enable_streaming_engine");
}
if (!experiments.contains("enable_windmill_service")) {
experiments.add("enable_windmill_service");
}
}
}
if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) {
ProjectionPushdownOptimizer.optimize(pipeline);
}
LOG.info(
"Executing pipeline on the Dataflow Service, which will have billing implications "
+ "related to Google Compute Engine usage and other Google Cloud Services.");
DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class);
String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions);
// This incorrectly puns the worker harness container image (which implements v1beta3 API)
// with the SDK harness image (which implements Fn API).
//
// The same Environment is used in different and contradictory ways, depending on whether
// it is a v1 or v2 job submission.
RunnerApi.Environment defaultEnvironmentForDataflow =
Environments.createDockerEnvironment(workerHarnessContainerImageURL);
// The SdkComponents for portable an non-portable job submission must be kept distinct. Both
// need the default environment.
SdkComponents portableComponents = SdkComponents.create();
portableComponents.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
RunnerApi.Pipeline portablePipelineProto =
PipelineTranslation.toProto(pipeline, portableComponents, false);
// Note that `stageArtifacts` has to be called before `resolveArtifact` because
// `resolveArtifact` updates local paths to staged paths in pipeline proto.
portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto);
List<DataflowPackage> packages = stageArtifacts(portablePipelineProto);
portablePipelineProto = resolveArtifacts(portablePipelineProto);
portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Portable pipeline proto:\n{}",
TextFormat.printer().printToString(portablePipelineProto));
}
// Stage the portable pipeline proto, retrieving the staged pipeline path, then update
// the options on the new job
// TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options
LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation());
byte[] serializedProtoPipeline = portablePipelineProto.toByteArray();
DataflowPackage stagedPipeline =
options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME);
dataflowOptions.setPipelineUrl(stagedPipeline.getLocation());
if (useUnifiedWorker(options)) {
LOG.info("Skipping v1 transform replacements since job will run on v2.");
} else {
// Now rewrite things to be as needed for v1 (mutates the pipeline)
// This way the job submitted is valid for v1 and v2, simultaneously
replaceV1Transforms(pipeline);
}
// Capture the SdkComponents for look up during step translations
SdkComponents dataflowV1Components = SdkComponents.create();
dataflowV1Components.registerEnvironment(
defaultEnvironmentForDataflow
.toBuilder()
.addAllDependencies(getDefaultArtifacts())
.addAllCapabilities(Environments.getJavaCapabilities())
.build());
// No need to perform transform upgrading for the Runner v1 proto.
RunnerApi.Pipeline dataflowV1PipelineProto =
PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false);
if (LOG.isDebugEnabled()) {
LOG.debug(
"Dataflow v1 pipeline proto:\n{}",
TextFormat.printer().printToString(dataflowV1PipelineProto));
}
// Set a unique client_request_id in the CreateJob request.
// This is used to ensure idempotence of job creation across retried
// attempts to create a job. Specifically, if the service returns a job with
// a different client_request_id, it means the returned one is a different
// job previously created with the same job name, and that the job creation
// has been effectively rejected. The SDK should return
// Error::Already_Exists to user in that case.
int randomNum = new Random().nextInt(9000) + 1000;
String requestId =
DateTimeFormat.forPattern("YYYYMMddHHmmssmmm")
.withZone(DateTimeZone.UTC)
.print(DateTimeUtils.currentTimeMillis())
+ "_"
+ randomNum;
JobSpecification jobSpecification =
translator.translate(
pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages);
if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
if (!experiments.contains("use_staged_dataflow_worker_jar")) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("use_staged_dataflow_worker_jar")
.build());
}
}
Job newJob = jobSpecification.getJob();
try {
newJob
.getEnvironment()
.setSdkPipelineOptions(
MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class));
} catch (IOException e) {
throw new IllegalArgumentException(
"PipelineOptions specified failed to serialize to JSON.", e);
}
newJob.setClientRequestId(requestId);
DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo();
String version = dataflowRunnerInfo.getVersion();
checkState(
!"${pom.version}".equals(version),
"Unable to submit a job to the Dataflow service with unset version ${pom.version}");
LOG.info("Dataflow SDK version: {}", version);
newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties());
// The Dataflow Service may write to the temporary directory directly, so
// must be verified.
if (!isNullOrEmpty(options.getGcpTempLocation())) {
newJob
.getEnvironment()
.setTempStoragePrefix(
dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation()));
}
newJob.getEnvironment().setDataset(options.getTempDatasetId());
if (options.getWorkerRegion() != null) {
newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion());
}
if (options.getWorkerZone() != null) {
newJob.getEnvironment().setWorkerZone(options.getWorkerZone());
}
if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED");
} else if (options.getFlexRSGoal()
== DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) {
newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED");
}
// Represent the minCpuPlatform pipeline option as an experiment, if not already present.
if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) {
List<String> experiments =
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList());
List<String> minCpuFlags =
experiments.stream()
.filter(p -> p.startsWith("min_cpu_platform"))
.collect(Collectors.toList());
if (minCpuFlags.isEmpty()) {
dataflowOptions.setExperiments(
ImmutableList.<String>builder()
.addAll(experiments)
.add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform())
.build());
} else {
LOG.warn(
"Flag min_cpu_platform is defined in both top level PipelineOption, "
+ "as well as under experiments. Proceed using {}.",
minCpuFlags.get(0));
}
}
newJob
.getEnvironment()
.setExperiments(
ImmutableList.copyOf(
firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList())));
// Set the Docker container image that executes Dataflow worker harness, residing in Google
// Container Registry. Translator is guaranteed to create a worker pool prior to this point.
// For runner_v1, only worker_harness_container is set.
// For runner_v2, both worker_harness_container and sdk_harness_container are set to the same
// value.
String containerImage = getContainerImageForJob(options);
for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) {
workerPool.setWorkerHarnessContainerImage(containerImage);
}
configureSdkHarnessContainerImages(options, portablePipelineProto, newJob);
newJob.getEnvironment().setVersion(getEnvironmentVersion(options));
if (hooks != null) {
hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment());
}
// enable upload_graph when the graph is too large
byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8);
int jobGraphByteSize = jobGraphBytes.length;
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES
&& !hasExperiment(options, "upload_graph")
&& !useUnifiedWorker(options)) {
List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList());
options.setExperiments(
ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build());
LOG.info(
"The job graph size ({} in bytes) is larger than {}. Automatically add "
+ "the upload_graph option to experiments.",
jobGraphByteSize,
CREATE_JOB_REQUEST_LIMIT_BYTES);
}
if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) {
ArrayList<String> experiments = new ArrayList<>(options.getExperiments());
while (experiments.remove("upload_graph")) {}
options.setExperiments(experiments);
LOG.warn(
"The upload_graph experiment was specified, but it does not apply "
+ "to runner v2 jobs. Option has been automatically removed.");
}
// Upload the job to GCS and remove the graph object from the API call. The graph
// will be downloaded from GCS by the service.
if (hasExperiment(options, "upload_graph")) {
DataflowPackage stagedGraph =
options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME);
newJob.getSteps().clear();
newJob.setStepsLocation(stagedGraph.getLocation());
}
if (!isNullOrEmpty(options.getDataflowJobFile())
|| !isNullOrEmpty(options.getTemplateLocation())) {
boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation());
if (isTemplate) {
checkArgument(
isNullOrEmpty(options.getDataflowJobFile()),
"--dataflowJobFile and --templateLocation are mutually exclusive.");
}
String fileLocation =
firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile());
checkArgument(
fileLocation.startsWith("/") || fileLocation.startsWith("gs://"),
"Location must be local or on Cloud Storage, got %s.",
fileLocation);
ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */);
String workSpecJson = DataflowPipelineTranslator.jobToString(newJob);
try (PrintWriter printWriter =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(
Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)),
UTF_8)))) {
printWriter.print(workSpecJson);
LOG.info("Printed job specification to {}", fileLocation);
} catch (IOException ex) {
String error = String.format("Cannot create output file at %s", fileLocation);
if (isTemplate) {
throw new RuntimeException(error, ex);
} else {
LOG.warn(error, ex);
}
}
if (isTemplate) {
LOG.info("Template successfully created.");
return new DataflowTemplateJob();
}
}
String jobIdToUpdate = null;
if (options.isUpdate()) {
jobIdToUpdate = getJobIdFromName(options.getJobName());
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setReplaceJobId(jobIdToUpdate);
}
if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) {
newJob.setTransformNameMapping(options.getTransformNameMapping());
newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot());
}
Job jobResult;
try {
jobResult = dataflowClient.createJob(newJob);
} catch (GoogleJsonResponseException e) {
String errorMessages = "Unexpected errors";
if (e.getDetails() != null) {
if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) {
errorMessages =
"The size of the serialized JSON representation of the pipeline "
+ "exceeds the allowable limit. "
+ "For more information, please see the documentation on job submission:\n"
+ "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs";
} else {
errorMessages = e.getDetails().getMessage();
}
}
throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e);
} catch (IOException e) {
throw new RuntimeException("Failed to create a workflow job", e);
}
// Use a raw client for post-launch monitoring, as status calls may fail
// regularly and need not be retried automatically.
DataflowPipelineJob dataflowPipelineJob =
new DataflowPipelineJob(
DataflowClient.create(options),
jobResult.getId(),
options,
jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(),
portablePipelineProto);
// If the service returned client request id, the SDK needs to compare it
// with the original id generated in the request, if they are not the same
// (i.e., the returned job is not created by this request), throw
// DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException
// depending on whether this is a reload or not.
if (jobResult.getClientRequestId() != null
&& !jobResult.getClientRequestId().isEmpty()
&& !jobResult.getClientRequestId().equals(requestId)) {
// If updating a job.
if (options.isUpdate()) {
throw new DataflowJobAlreadyUpdatedException(
dataflowPipelineJob,
String.format(
"The job named %s with id: %s has already been updated into job id: %s "
+ "and cannot be updated again.",
newJob.getName(), jobIdToUpdate, jobResult.getId()));
} else {
throw new DataflowJobAlreadyExistsException(
dataflowPipelineJob,
String.format(
"There is already an active job named %s with id: %s. If you want to submit a"
+ " second job, try again by setting a different name using --jobName.",
newJob.getName(), jobResult.getId()));
}
}
LOG.info(
"To access the Dataflow monitoring console, please navigate to {}",
MonitoringUtil.getJobMonitoringPageURL(
options.getProject(), options.getRegion(), jobResult.getId()));
LOG.info("Submitted job: {}", jobResult.getId());
LOG.info(
"To cancel the job using the 'gcloud' tool, run:\n> {}",
MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId()));
return dataflowPipelineJob;
}
|
@Test
public void testBatchOnSuccessMatcherWhenPipelineSucceeds() throws Exception {
Pipeline p = TestPipeline.create(options);
PCollection<Integer> pc = p.apply(Create.of(1, 2, 3));
PAssert.that(pc).containsInAnyOrder(1, 2, 3);
final DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class);
when(mockJob.getState()).thenReturn(State.DONE);
when(mockJob.getProjectId()).thenReturn("test-project");
when(mockJob.getJobId()).thenReturn("test-job");
DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class);
when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob);
TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient);
options.as(TestPipelineOptions.class).setOnSuccessMatcher(new TestSuccessMatcher(mockJob, 1));
when(mockClient.getJobMetrics(anyString()))
.thenReturn(generateMockMetricResponse(true /* success */, true /* tentative */));
runner.run(p, mockRunner);
}
|
public static List<Common.MessageFormatting> dbMessageFormattingListToWs(@Nullable List<DbIssues.MessageFormatting> dbFormattings) {
if (dbFormattings == null) {
return List.of();
}
return dbFormattings.stream()
.map(f -> Common.MessageFormatting.newBuilder()
.setStart(f.getStart())
.setEnd(f.getEnd())
.setType(Common.MessageFormattingType.valueOf(f.getType().name())).build())
.toList();
}
|
@Test
public void nullFormattingListShouldBeEmptyList() {
assertThat(MessageFormattingUtils.dbMessageFormattingListToWs(null)).isEmpty();
}
|
@Override
public void open() throws Exception {
this.timerService =
getInternalTimerService("processing timer", VoidNamespaceSerializer.INSTANCE, this);
this.keySet = new HashSet<>();
super.open();
}
|
@Test
void testProcessRecord() throws Exception {
List<Integer> fromNonBroadcastInput = new ArrayList<>();
List<Long> fromBroadcastInput = new ArrayList<>();
KeyedTwoInputBroadcastProcessOperator<Long, Integer, Long, Long> processOperator =
new KeyedTwoInputBroadcastProcessOperator<>(
new TwoInputBroadcastStreamProcessFunction<Integer, Long, Long>() {
@Override
public void processRecordFromNonBroadcastInput(
Integer record,
Collector<Long> output,
PartitionedContext ctx) {
fromNonBroadcastInput.add(record);
}
@Override
public void processRecordFromBroadcastInput(
Long record, NonPartitionedContext<Long> ctx) {
fromBroadcastInput.add(record);
}
});
try (KeyedTwoInputStreamOperatorTestHarness<Long, Integer, Long, Long> testHarness =
new KeyedTwoInputStreamOperatorTestHarness<>(
processOperator,
(KeySelector<Integer, Long>) (data) -> (long) (data + 1),
(KeySelector<Long, Long>) value -> value + 1,
Types.LONG)) {
testHarness.open();
testHarness.processElement1(new StreamRecord<>(1));
testHarness.processElement2(new StreamRecord<>(2L));
testHarness.processElement2(new StreamRecord<>(4L));
testHarness.processElement1(new StreamRecord<>(3));
}
assertThat(fromNonBroadcastInput).containsExactly(1, 3);
assertThat(fromBroadcastInput).containsExactly(2L, 4L);
}
|
@Override
@SuppressWarnings("unchecked")
public <T> T get(final PluginConfigSpec<T> configSpec) {
if (rawSettings.containsKey(configSpec.name())) {
Object o = rawSettings.get(configSpec.name());
if (configSpec.type().isAssignableFrom(o.getClass())) {
return (T) o;
} else if (configSpec.type() == Double.class && o.getClass() == Long.class) {
return configSpec.type().cast(((Long)o).doubleValue());
} else if (configSpec.type() == Boolean.class && o instanceof String) {
return configSpec.type().cast(Boolean.parseBoolean((String) o));
} else if (configSpec.type() == Codec.class && o instanceof String && pluginFactory != null) {
Codec codec = pluginFactory.buildDefaultCodec((String) o);
return configSpec.type().cast(codec);
} else if (configSpec.type() == Codec.class && o instanceof RubyObject && RubyCodecDelegator.isRubyCodecSubclass((RubyObject) o)) {
Codec codec = pluginFactory.buildRubyCodecWrapper((RubyObject) o);
return configSpec.type().cast(codec);
} else if (configSpec.type() == URI.class && o instanceof String) {
try {
URI uri = new URI((String) o);
return configSpec.type().cast(uri);
} catch (URISyntaxException ex) {
throw new IllegalStateException(
String.format("Invalid URI specified for '%s'", configSpec.name()));
}
} else if (configSpec.type() == Password.class && o instanceof String) {
Password p = new Password((String) o);
return configSpec.type().cast(p);
} else {
throw new IllegalStateException(
String.format("Setting value for '%s' of type '%s' incompatible with defined type of '%s'",
configSpec.name(), o.getClass(), configSpec.type()));
}
} else if (configSpec.type() == Codec.class && configSpec.getRawDefaultValue() != null && pluginFactory != null) {
Codec codec = pluginFactory.buildDefaultCodec(configSpec.getRawDefaultValue());
return configSpec.type().cast(codec);
} else if (configSpec.type() == URI.class && configSpec.getRawDefaultValue() != null) {
try {
URI uri = new URI(configSpec.getRawDefaultValue());
return configSpec.type().cast(uri);
} catch (URISyntaxException ex) {
throw new IllegalStateException(
String.format("Invalid default URI specified for '%s'", configSpec.name()));
}
} else if (configSpec.type() == Password.class && configSpec.getRawDefaultValue() != null) {
Password p = new Password(configSpec.getRawDefaultValue());
return configSpec.type().cast(p);
} else {
return configSpec.defaultValue();
}
}
|
@Test
public void testUriDefaultValue() {
String defaultUri = "https://user:[email protected]:99";
PluginConfigSpec<URI> uriConfig = PluginConfigSpec.uriSetting("test", defaultUri);
Configuration config = new ConfigurationImpl(Collections.emptyMap());
URI u = config.get(uriConfig);
Assert.assertEquals(defaultUri, u.toString());
}
|
public static JibContainerBuilder toJibContainerBuilder(
ArtifactProcessor processor,
CommonCliOptions commonCliOptions,
CommonContainerConfigCliOptions commonContainerConfigCliOptions,
ConsoleLogger logger)
throws IOException, InvalidImageReferenceException {
String baseImage = commonContainerConfigCliOptions.getFrom().orElse("jetty");
JibContainerBuilder containerBuilder =
ContainerBuilders.create(baseImage, Collections.emptySet(), commonCliOptions, logger);
List<String> programArguments = commonContainerConfigCliOptions.getProgramArguments();
if (!commonContainerConfigCliOptions.getProgramArguments().isEmpty()) {
containerBuilder.setProgramArguments(programArguments);
}
containerBuilder
.setEntrypoint(computeEntrypoint(commonContainerConfigCliOptions))
.setFileEntriesLayers(processor.createLayers())
.setExposedPorts(commonContainerConfigCliOptions.getExposedPorts())
.setVolumes(commonContainerConfigCliOptions.getVolumes())
.setEnvironment(commonContainerConfigCliOptions.getEnvironment())
.setLabels(commonContainerConfigCliOptions.getLabels());
commonContainerConfigCliOptions.getUser().ifPresent(containerBuilder::setUser);
commonContainerConfigCliOptions.getFormat().ifPresent(containerBuilder::setFormat);
commonContainerConfigCliOptions.getCreationTime().ifPresent(containerBuilder::setCreationTime);
return containerBuilder;
}
|
@Test
public void testToJibContainerBuilder_explodedStandard_basicInfo()
throws IOException, InvalidImageReferenceException {
FileEntriesLayer layer =
FileEntriesLayer.builder()
.setName("classes")
.addEntry(
Paths.get("path/to/tempDirectory/WEB-INF/classes/class1.class"),
AbsoluteUnixPath.get("/my/app/WEB-INF/classes/class1.class"))
.build();
when(mockStandardWarExplodedProcessor.createLayers()).thenReturn(Arrays.asList(layer));
when(mockCommonContainerConfigCliOptions.isJettyBaseimage()).thenReturn(true);
JibContainerBuilder containerBuilder =
WarFiles.toJibContainerBuilder(
mockStandardWarExplodedProcessor,
mockCommonCliOptions,
mockCommonContainerConfigCliOptions,
mockLogger);
ContainerBuildPlan buildPlan = containerBuilder.toContainerBuildPlan();
assertThat(buildPlan.getBaseImage()).isEqualTo("jetty");
assertThat(buildPlan.getEntrypoint())
.containsExactly("java", "-jar", "/usr/local/jetty/start.jar", "--module=ee10-deploy")
.inOrder();
assertThat(buildPlan.getLayers()).hasSize(1);
assertThat(buildPlan.getLayers().get(0).getName()).isEqualTo("classes");
assertThat(((FileEntriesLayer) buildPlan.getLayers().get(0)).getEntries())
.containsExactlyElementsIn(
FileEntriesLayer.builder()
.addEntry(
Paths.get("path/to/tempDirectory/WEB-INF/classes/class1.class"),
AbsoluteUnixPath.get("/my/app/WEB-INF/classes/class1.class"))
.build()
.getEntries());
}
|
@VisibleForTesting
void handleResponse(DiscoveryResponseData response)
{
ResourceType resourceType = response.getResourceType();
switch (resourceType)
{
case NODE:
handleD2NodeResponse(response);
break;
case D2_URI_MAP:
handleD2URIMapResponse(response);
break;
case D2_URI:
handleD2URICollectionResponse(response);
break;
default:
throw new AssertionError("Missing case in enum switch: " + resourceType);
}
}
|
@Test
public void testHandleD2URICollectionUpdateWithBadData()
{
DiscoveryResponseData badData = new DiscoveryResponseData(
D2_URI,
Collections.singletonList(Resource.newBuilder().setVersion(VERSION1).setName(URI_URN1)
// resource field not set
.build()),
null,
NONCE,
null);
XdsClientImplFixture fixture = new XdsClientImplFixture();
fixture._clusterSubscriber.setData(null);
fixture._xdsClientImpl.handleResponse(badData);
fixture.verifyNackSent(1);
verify(fixture._resourceWatcher).onChanged(eq(D2_URI_MAP.emptyData()));
verifyZeroInteractions(fixture._serverMetricsProvider);
D2URIMapUpdate actualData = (D2URIMapUpdate) fixture._clusterSubscriber.getData();
Assert.assertNull(Objects.requireNonNull(actualData).getURIMap());
fixture._clusterSubscriber.setData(D2_URI_MAP_UPDATE_WITH_DATA1);
fixture._xdsClientImpl.handleResponse(badData);
fixture.verifyNackSent(2);
// Due to the way glob collection updates are handled, bad data is dropped rather than showing any visible side
// effects other than NACKing the response.
verify(fixture._resourceWatcher, times(0)).onChanged(eq(D2_URI_MAP_UPDATE_WITH_DATA1));
verifyZeroInteractions(fixture._serverMetricsProvider);
}
|
public boolean isWatching() {
BasicKeyChain.State basicState = basic.isWatching();
BasicKeyChain.State activeState = BasicKeyChain.State.EMPTY;
if (chains != null && !chains.isEmpty()) {
if (getActiveKeyChain().isWatching())
activeState = BasicKeyChain.State.WATCHING;
else
activeState = BasicKeyChain.State.REGULAR;
}
if (basicState == BasicKeyChain.State.EMPTY) {
if (activeState == BasicKeyChain.State.EMPTY)
throw new IllegalStateException("Empty key chain group: cannot answer isWatching() query");
return activeState == BasicKeyChain.State.WATCHING;
} else if (activeState == BasicKeyChain.State.EMPTY)
return basicState == BasicKeyChain.State.WATCHING;
else {
if (activeState != basicState)
throw new IllegalStateException("Mix of watching and non-watching keys in wallet");
return activeState == BasicKeyChain.State.WATCHING;
}
}
|
@Test
public void isWatching() {
group = KeyChainGroup.builder(BitcoinNetwork.MAINNET)
.addChain(DeterministicKeyChain.builder().watch(DeterministicKey.deserializeB58(
"xpub69bjfJ91ikC5ghsqsVDHNq2dRGaV2HHVx7Y9LXi27LN9BWWAXPTQr4u8U3wAtap8bLdHdkqPpAcZmhMS5SnrMQC4ccaoBccFhh315P4UYzo",
BitcoinNetwork.MAINNET)).outputScriptType(ScriptType.P2PKH).build())
.build();
final ECKey watchingKey = ECKey.fromPublicOnly(new ECKey());
group.importKeys(watchingKey);
assertTrue(group.isWatching());
}
|
public static void checkCacheConfig(CacheSimpleConfig cacheSimpleConfig,
SplitBrainMergePolicyProvider mergePolicyProvider) {
checkCacheConfig(cacheSimpleConfig.getInMemoryFormat(), cacheSimpleConfig.getEvictionConfig(),
cacheSimpleConfig.getMergePolicyConfig().getPolicy(),
SplitBrainMergeTypes.CacheMergeTypes.class, mergePolicyProvider, COMMONLY_SUPPORTED_EVICTION_POLICIES);
}
|
@Test(expected = IllegalArgumentException.class)
public void checkCacheConfig_withEntryCountMaxSizePolicy_NATIVE() {
EvictionConfig evictionConfig = new EvictionConfig()
.setMaxSizePolicy(MaxSizePolicy.ENTRY_COUNT);
CacheSimpleConfig cacheSimpleConfig = new CacheSimpleConfig()
.setInMemoryFormat(NATIVE)
.setEvictionConfig(evictionConfig);
checkCacheConfig(cacheSimpleConfig, splitBrainMergePolicyProvider);
}
|
@NonNull
public String processShownotes() {
String shownotes = rawShownotes;
if (TextUtils.isEmpty(shownotes)) {
Log.d(TAG, "shownotesProvider contained no shownotes. Returning 'no shownotes' message");
shownotes = "<html><head></head><body><p id='apNoShownotes'>" + noShownotesLabel + "</p></body></html>";
}
// replace ASCII line breaks with HTML ones if shownotes don't contain HTML line breaks already
if (!LINE_BREAK_REGEX.matcher(shownotes).find() && !shownotes.contains("<p>")) {
shownotes = shownotes.replace("\n", "<br />");
}
Document document = Jsoup.parse(shownotes);
cleanCss(document);
document.head().appendElement("style").attr("type", "text/css").text(webviewStyle);
addTimecodes(document);
return document.toString();
}
|
@Test
public void testProcessShownotesAddTimecodeHhmmssNoChapters() {
final String timeStr = "10:11:12";
final long time = 3600 * 1000 * 10 + 60 * 1000 * 11 + 12 * 1000;
String shownotes = "<p> Some test text with a timecode " + timeStr + " here.</p>";
ShownotesCleaner t = new ShownotesCleaner(context, shownotes, Integer.MAX_VALUE);
String res = t.processShownotes();
checkLinkCorrect(res, new long[]{time}, new String[]{timeStr});
}
|
public PrepareResult prepare(HostValidator hostValidator, DeployLogger logger, PrepareParams params,
Optional<ApplicationVersions> activeApplicationVersions, Instant now, File serverDbSessionDir,
ApplicationPackage applicationPackage, SessionZooKeeperClient sessionZooKeeperClient) {
ApplicationId applicationId = params.getApplicationId();
Preparation preparation = new Preparation(hostValidator, logger, params, activeApplicationVersions,
TenantRepository.getTenantPath(applicationId.tenant()),
serverDbSessionDir, applicationPackage, sessionZooKeeperClient,
onnxModelCost, endpointCertificateSecretStores);
preparation.preprocess();
try {
AllocatedHosts allocatedHosts = preparation.buildModels(now);
preparation.makeResult(allocatedHosts);
if ( ! params.isDryRun()) {
FileReference fileReference = preparation.triggerDistributionOfApplicationPackage();
preparation.writeStateZK(fileReference);
preparation.writeEndpointCertificateMetadataZK();
preparation.writeContainerEndpointsZK();
}
log.log(Level.FINE, () -> "time used " + params.getTimeoutBudget().timesUsed() + " : " + applicationId);
return preparation.result();
}
catch (IllegalArgumentException e) {
if (e instanceof InvalidApplicationException)
throw e;
throw new InvalidApplicationException("Invalid application package", e);
}
}
|
@Test
public void require_that_endpoint_certificate_metadata_is_written() throws IOException {
var applicationId = applicationId("test");
var params = new PrepareParams.Builder().applicationId(applicationId).endpointCertificateMetadata("{\"keyName\": \"vespa.tlskeys.tenant1--app1-key\", \"certName\":\"vespa.tlskeys.tenant1--app1-cert\", \"version\": 7}").build();
secretStore.put("vespa.tlskeys.tenant1--app1-cert", 7, X509CertificateUtils.toPem(certificate));
secretStore.put("vespa.tlskeys.tenant1--app1-key", 7, KeyUtils.toPem(keyPair.getPrivate()));
prepare(new File("src/test/resources/deploy/hosted-app"), params);
// Read from zk and verify cert and key are available
Path tenantPath = TenantRepository.getTenantPath(applicationId.tenant());
Optional<EndpointCertificateSecrets> endpointCertificateSecrets = new EndpointCertificateMetadataStore(curator, tenantPath)
.readEndpointCertificateMetadata(applicationId)
.flatMap(p -> new EndpointCertificateRetriever(List.of(new DefaultEndpointCertificateSecretStore(secretStore))).readEndpointCertificateSecrets(p));
assertTrue(endpointCertificateSecrets.isPresent());
assertTrue(endpointCertificateSecrets.get().key().startsWith("-----BEGIN EC PRIVATE KEY"));
assertTrue(endpointCertificateSecrets.get().certificate().startsWith("-----BEGIN CERTIFICATE"));
}
|
static void parseAuthority(final StringReader reader, final Host host, final Consumer<HostParserException> decorator) throws HostParserException {
parseUserInfo(reader, host, decorator);
parseHostname(reader, host, decorator);
parsePath(reader, host, false, decorator);
}
|
@Test
public void testParseAuthoritySimpleDomain() throws HostParserException {
final Host host = new Host(new TestProtocol());
final String authority = "domain.tld";
final HostParser.StringReader reader = new HostParser.StringReader(authority);
HostParser.parseAuthority(reader, host, null);
assertEquals(authority, host.getHostname());
}
|
static void sanityCheckExcludedRackIds(Builder builder) {
if (builder._excludedRackIds != null && builder._numBrokers == DEFAULT_OPTIONAL_INT) {
throw new IllegalArgumentException("Excluded rack ids can be specified only with the number of brokers.");
}
}
|
@Test
public void testSanityCheckExcludedRackIds() {
// Set excluded rack ids without numBrokers
assertThrows(IllegalArgumentException.class, () -> ProvisionRecommendation.sanityCheckExcludedRackIds(
new ProvisionRecommendation.Builder(ProvisionStatus.UNDER_PROVISIONED).numRacks(1).excludedRackIds(Collections.singleton("1"))));
}
|
public static Identifier parse(String stringValue) {
return parse(stringValue, -1);
}
|
@Test(expected = IllegalArgumentException.class)
public void testParseIntegerBelowMin() {
Identifier.parse("-1");
}
|
public void deleteGroup(String groupName) {
Iterator<PipelineConfigs> iterator = this.iterator();
while (iterator.hasNext()) {
PipelineConfigs currentGroup = iterator.next();
if (currentGroup.isNamed(groupName)) {
if (!currentGroup.isEmpty()) {
throw new UnprocessableEntityException("Failed to delete group " + groupName + " because it was not empty.");
}
iterator.remove();
break;
}
}
}
|
@Test
public void shouldDeleteGroupWithSameNameWhenEmpty() {
PipelineConfigs group = createGroup("group", new PipelineConfig[]{});
group.setAuthorization(new Authorization(new ViewConfig(new AdminUser(new CaseInsensitiveString("user")))));
PipelineGroups groups = new PipelineGroups(group);
groups.deleteGroup("group");
assertThat(groups.size(), is(0));
}
|
public PriorityFutureTask<Void> submit(PriorityRunnable task) {
if (task == null) {
throw new NullPointerException();
}
final PriorityFutureTask<Void> ftask = new PriorityFutureTask(task, null);
execute(ftask);
return ftask;
}
|
@Test
public void testDefault() throws InterruptedException, ExecutionException {
PriorityBlockingQueue<Runnable> workQueue = new PriorityBlockingQueue<Runnable>(1000);
PriorityThreadPoolExecutor pool = new PriorityThreadPoolExecutor(1, 1, 1, TimeUnit.MINUTES, workQueue);
Future[] futures = new Future[20];
StringBuffer buffer = new StringBuffer();
for (int i = 0; i < futures.length; i++) {
int index = i;
futures[i] = pool.submit(new PriorityRunnable(0) {
@Override
public void run() {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
e.printStackTrace();
}
buffer.append(index + ", ");
}
});
}
for (int i = 0; i < futures.length; i++) {
futures[i].get();
}
assertEquals("0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, ", buffer.toString());
}
|
@VisibleForTesting
static void writeFileConservatively(Path file, String content) throws IOException {
if (Files.exists(file)) {
String oldContent = new String(Files.readAllBytes(file), StandardCharsets.UTF_8);
if (oldContent.equals(content)) {
return;
}
}
Files.createDirectories(file.getParent());
Files.write(file, content.getBytes(StandardCharsets.UTF_8));
}
|
@Test
public void testWriteFileConservatively() throws IOException {
Path file = temporaryFolder.getRoot().toPath().resolve("file.txt");
PluginConfigurationProcessor.writeFileConservatively(file, "some content");
String content = new String(Files.readAllBytes(file), StandardCharsets.UTF_8);
assertThat(content).isEqualTo("some content");
}
|
@CanDistro
@PostMapping
@TpsControl(pointName = "NamingInstanceRegister", name = "HttpNamingInstanceRegister")
@Secured(action = ActionTypes.WRITE)
public String register(HttpServletRequest request) throws Exception {
final String namespaceId = WebUtils.optional(request, CommonParams.NAMESPACE_ID,
Constants.DEFAULT_NAMESPACE_ID);
final String serviceName = WebUtils.required(request, CommonParams.SERVICE_NAME);
NamingUtils.checkServiceNameFormat(serviceName);
final Instance instance = HttpRequestInstanceBuilder.newBuilder()
.setDefaultInstanceEphemeral(switchDomain.isDefaultInstanceEphemeral()).setRequest(request).build();
getInstanceOperator().registerInstance(namespaceId, serviceName, instance);
NotifyCenter.publishEvent(new RegisterInstanceTraceEvent(System.currentTimeMillis(),
NamingRequestUtil.getSourceIpForHttpRequest(request), false, namespaceId,
NamingUtils.getGroupName(serviceName), NamingUtils.getServiceName(serviceName), instance.getIp(),
instance.getPort()));
return "ok";
}
|
@Test
void testRegister() throws Exception {
assertEquals("ok", instanceController.register(request));
verify(instanceServiceV2).registerInstance(eq(Constants.DEFAULT_NAMESPACE_ID), eq(TEST_GROUP_NAME + "@@" + TEST_SERVICE_NAME),
any(Instance.class));
TimeUnit.SECONDS.sleep(1);
assertEquals(RegisterInstanceTraceEvent.class, eventReceivedClass);
}
|
public boolean produce(T data) {
if (driver != null) {
if (!driver.isRunning(channels)) {
return false;
}
}
return this.channels.save(data);
}
|
@Test
public void testIfPossibleProduce() {
DataCarrier<SampleData> carrier = new DataCarrier<>(2, 100, BufferStrategy.IF_POSSIBLE);
for (int i = 0; i < 200; i++) {
assertTrue(carrier.produce(new SampleData().setName("d" + i)));
}
for (int i = 0; i < 200; i++) {
Assertions.assertFalse(carrier.produce(new SampleData().setName("d" + i + "_2")));
}
Channels<SampleData> channels = Whitebox.getInternalState(carrier, "channels");
QueueBuffer<SampleData> buffer1 = channels.getBuffer(0);
List<SampleData> result = new ArrayList<>();
buffer1.obtain(result);
QueueBuffer<SampleData> buffer2 = channels.getBuffer(1);
buffer2.obtain(result);
assertEquals(200, result.size());
}
|
void doSubmit(final Runnable action) {
CONTINUATION.get().submit(action);
}
|
@Test
public void testDeepRecursion() {
final Continuations CONT = new Continuations();
final AtomicInteger result = new AtomicInteger();
CONT.doSubmit(() -> {
deepRecursion(CONT, result, 0);
});
assertEquals(result.get(), 1000001);
}
|
@Nullable static String channelName(@Nullable Destination destination) {
if (destination == null) return null;
boolean isQueue = isQueue(destination);
try {
if (isQueue) {
return ((Queue) destination).getQueueName();
} else {
return ((Topic) destination).getTopicName();
}
} catch (Throwable t) {
propagateIfFatal(t);
log(t, "error getting destination name from {0}", destination, null);
}
return null;
}
|
@Test void channelName_queueAndTopic_queueOnQueueName() throws JMSException {
QueueAndTopic destination = mock(QueueAndTopic.class);
when(destination.getQueueName()).thenReturn("queue-foo");
assertThat(MessageParser.channelName(destination))
.isEqualTo("queue-foo");
}
|
@Override
public Float parse(final String value) {
return Float.parseFloat(value);
}
|
@Test
void assertParse() {
assertThat(new PostgreSQLFloatValueParser().parse("1"), is(1F));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.