focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void execute(String commandName, BufferedReader reader, BufferedWriter writer)
throws Py4JException, IOException {
char subCommand = safeReadLine(reader).charAt(0);
String returnCommand = null;
if (subCommand == GET_UNKNOWN_SUB_COMMAND_NAME) {
returnCommand = getUnknownMember(reader);
} else if (subCommand == GET_JAVA_LANG_CLASS_SUB_COMMAND_NAME) {
returnCommand = getJavaLangClass(reader);
} else {
returnCommand = getMember(reader);
}
logger.finest("Returning command: " + returnCommand);
writer.write(returnCommand);
writer.flush();
}
|
@Test
public void testUnknown() {
String inputCommand1 = ReflectionCommand.GET_UNKNOWN_SUB_COMMAND_NAME + "\n" + "java" + "\nrj\ne\n";
String inputCommand2 = ReflectionCommand.GET_UNKNOWN_SUB_COMMAND_NAME + "\n" + "java.lang" + "\nrj\ne\n";
String inputCommand3 = ReflectionCommand.GET_UNKNOWN_SUB_COMMAND_NAME + "\n" + "java.lang.String" + "\nrj\ne\n";
String inputCommand4 = ReflectionCommand.GET_UNKNOWN_SUB_COMMAND_NAME + "\n" + "p1.Cat" + "\nrj\ne\n";
String inputCommand5 = ReflectionCommand.GET_UNKNOWN_SUB_COMMAND_NAME + "\n" + "byte" + "\nrj\ne\n";
String inputCommand6 = ReflectionCommand.GET_UNKNOWN_SUB_COMMAND_NAME + "\n" + "System" + "\nrj\ne\n";
String inputCommand7 = ReflectionCommand.GET_UNKNOWN_SUB_COMMAND_NAME + "\n" + "File" + "\nrj\ne\n";
try {
this.gateway.getDefaultJVMView().addSingleImport("java.util.List");
this.gateway.getDefaultJVMView().addStarImport("java.io.*");
command.execute("r", new BufferedReader(new StringReader(inputCommand1)), writer);
assertEquals("!yp\n", sWriter.toString());
command.execute("r", new BufferedReader(new StringReader(inputCommand2)), writer);
assertEquals("!yp\n!yp\n", sWriter.toString());
command.execute("r", new BufferedReader(new StringReader(inputCommand3)), writer);
assertEquals("!yp\n!yp\n!ycjava.lang.String\n", sWriter.toString());
command.execute("r", new BufferedReader(new StringReader(inputCommand4)), writer);
assertEquals("!yp\n!yp\n!ycjava.lang.String\n!ycp1.Cat\n", sWriter.toString());
command.execute("r", new BufferedReader(new StringReader(inputCommand5)), writer);
assertEquals("!yp\n!yp\n!ycjava.lang.String\n!ycp1.Cat\n!ycbyte\n", sWriter.toString());
command.execute("r", new BufferedReader(new StringReader(inputCommand6)), writer);
assertEquals("!yp\n!yp\n!ycjava.lang.String\n!ycp1.Cat\n!ycbyte\n!ycjava.lang.System\n",
sWriter.toString());
command.execute("r", new BufferedReader(new StringReader(inputCommand7)), writer);
assertEquals(
"!yp\n!yp\n!ycjava.lang.String\n!ycp1" + ".Cat\n!ycbyte\n!ycjava.lang.System\n!ycjava.io.File\n",
sWriter.toString());
} catch (Exception e) {
e.printStackTrace();
fail();
}
}
|
static List<IdentifierTree> getVariableUses(ExpressionTree tree) {
List<IdentifierTree> freeVars = new ArrayList<>();
new TreeScanner<Void, Void>() {
@Override
public Void visitIdentifier(IdentifierTree node, Void v) {
if (((JCIdent) node).sym instanceof VarSymbol) {
freeVars.add(node);
}
return super.visitIdentifier(node, null);
}
}.scan(tree, null);
return freeVars;
}
|
@Test
public void getVariableUses() {
writeFile("A.java", "public class A {", " public String b;", " void foo() {}", "}");
writeFile(
"B.java",
"public class B {",
" A my;",
" B bar() { return null; }",
" void foo(String x, A a) {",
" x.trim().intern();",
" a.b.trim().intern();",
" this.my.foo();",
" my.foo();",
" this.bar();",
" String.valueOf(0);",
" java.lang.String.valueOf(1);",
" bar().bar();",
" System.out.println();",
" a.b.indexOf(x.substring(1));",
" }",
"}");
TestScanner scanner =
new TestScanner.Builder()
.add("x.trim().intern()", "x")
.add("a.b.trim().intern()", "a")
.add("this.my.foo()", "this")
.add("my.foo()", "my")
.add("this.bar()", "this")
.add("String.valueOf(0)")
.add("java.lang.String.valueOf(1)")
.add("bar().bar()")
.add("System.out.println()")
.add("a.b.indexOf(x.substring(1))", "a", "x")
.build();
assertCompiles(scanner);
scanner.assertFoundAll();
}
|
@Override
public void start() {
try {
forceMkdir(fs.getUninstalledPluginsDir());
} catch (IOException e) {
throw new IllegalStateException("Fail to create the directory: " + fs.getUninstalledPluginsDir(), e);
}
}
|
@Test
public void create_uninstall_dir() {
File dir = new File(testFolder.getRoot(), "dir");
when(fs.getUninstalledPluginsDir()).thenReturn(dir);
assertThat(dir).doesNotExist();
underTest.start();
assertThat(dir).isDirectory();
}
|
@Override
public void store(Measure newMeasure) {
saveMeasure(newMeasure.inputComponent(), (DefaultMeasure<?>) newMeasure);
}
|
@Test
public void should_not_skip_file_measures_on_pull_request_when_file_status_is_SAME() {
DefaultInputFile file = new TestInputFileBuilder("foo", "src/Foo.php").setStatus(InputFile.Status.SAME).build();
when(branchConfiguration.isPullRequest()).thenReturn(true);
underTest.store(new DefaultMeasure()
.on(file)
.forMetric(CoreMetrics.NCLOC)
.withValue(10));
ScannerReport.Measure m = reportReader.readComponentMeasures(file.scannerId()).next();
assertThat(m.getIntValue().getValue()).isEqualTo(10);
assertThat(m.getMetricKey()).isEqualTo(CoreMetrics.NCLOC_KEY);
}
|
@Override
public boolean equals(final Object o) {
if(this == o) {
return true;
}
if(o == null || getClass() != o.getClass()) {
return false;
}
final Header header = (Header) o;
if(!Objects.equals(name, header.name)) {
return false;
}
if(!Objects.equals(value, header.value)) {
return false;
}
return true;
}
|
@Test
public void testEquals() {
assertEquals(new Header("k", "v"), new Header("k", "v"));
assertNotEquals(new Header("k", "v"), new Header("k", "f"));
assertNotEquals(new Header("k", "v"), new Header("m", "v"));
}
|
@Override
public KeyVersion createKey(final String name, final byte[] material,
final Options options) throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.createKey(name, material, options);
}
}, nextIdx(), false);
}
|
@Test
public void testClientRetriesSucceedsSecondTime() throws Exception {
Configuration conf = new Configuration();
conf.setInt(
CommonConfigurationKeysPublic.KMS_CLIENT_FAILOVER_MAX_RETRIES_KEY, 3);
KMSClientProvider p1 = mock(KMSClientProvider.class);
when(p1.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new ConnectTimeoutException("p1"))
.thenReturn(new KMSClientProvider.KMSKeyVersion("test3", "v1",
new byte[0]));
KMSClientProvider p2 = mock(KMSClientProvider.class);
when(p2.createKey(Mockito.anyString(), Mockito.any(Options.class)))
.thenThrow(new ConnectTimeoutException("p2"));
when(p1.getKMSUrl()).thenReturn("p1");
when(p2.getKMSUrl()).thenReturn("p2");
LoadBalancingKMSClientProvider kp = new LoadBalancingKMSClientProvider(
new KMSClientProvider[] {p1, p2}, 0, conf);
try {
kp.createKey("test3", new Options(conf));
} catch (Exception e) {
fail("Provider p1 should have answered the request second time.");
}
verify(p1, Mockito.times(2)).createKey(Mockito.eq("test3"),
Mockito.any(Options.class));
verify(p2, Mockito.times(1)).createKey(Mockito.eq("test3"),
Mockito.any(Options.class));
}
|
@Override
public CreatePartitionsResult createPartitions(final Map<String, NewPartitions> newPartitions,
final CreatePartitionsOptions options) {
final Map<String, KafkaFutureImpl<Void>> futures = new HashMap<>(newPartitions.size());
final CreatePartitionsTopicCollection topics = new CreatePartitionsTopicCollection(newPartitions.size());
for (Map.Entry<String, NewPartitions> entry : newPartitions.entrySet()) {
final String topic = entry.getKey();
final NewPartitions newPartition = entry.getValue();
List<List<Integer>> newAssignments = newPartition.assignments();
List<CreatePartitionsAssignment> assignments = newAssignments == null ? null :
newAssignments.stream()
.map(brokerIds -> new CreatePartitionsAssignment().setBrokerIds(brokerIds))
.collect(Collectors.toList());
topics.add(new CreatePartitionsTopic()
.setName(topic)
.setCount(newPartition.totalCount())
.setAssignments(assignments));
futures.put(topic, new KafkaFutureImpl<>());
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreatePartitionsCall(options, futures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreatePartitionsResult(new HashMap<>(futures));
}
|
@Test
public void testCreatePartitionsDontRetryThrottlingExceptionWhenDisabled() throws Exception {
try (AdminClientUnitTestEnv env = mockClientEnv()) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(
expectCreatePartitionsRequestWithTopics("topic1", "topic2", "topic3"),
prepareCreatePartitionsResponse(1000,
createPartitionsTopicResult("topic1", Errors.NONE),
createPartitionsTopicResult("topic2", Errors.THROTTLING_QUOTA_EXCEEDED),
createPartitionsTopicResult("topic3", Errors.TOPIC_ALREADY_EXISTS)));
Map<String, NewPartitions> counts = new HashMap<>();
counts.put("topic1", NewPartitions.increaseTo(1));
counts.put("topic2", NewPartitions.increaseTo(2));
counts.put("topic3", NewPartitions.increaseTo(3));
CreatePartitionsResult result = env.adminClient().createPartitions(
counts, new CreatePartitionsOptions().retryOnQuotaViolation(false));
assertNull(result.values().get("topic1").get());
ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.values().get("topic2"),
ThrottlingQuotaExceededException.class);
assertEquals(1000, e.throttleTimeMs());
TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class);
}
}
|
@Override
public Processor<K, Change<V>, K, Change<VOut>> get() {
return new KTableTransformValuesProcessor(transformerSupplier.get());
}
|
@Test
public void shouldThrowOnGetIfSupplierReturnsNull() {
final KTableTransformValues<String, String, String> transformer =
new KTableTransformValues<>(parent, new NullSupplier(), QUERYABLE_NAME);
try {
transformer.get();
fail("NPE expected");
} catch (final NullPointerException expected) {
// expected
}
}
|
public static String buildGlueExpression(Map<Column, Domain> partitionPredicates)
{
List<String> perColumnExpressions = new ArrayList<>();
int expressionLength = 0;
for (Map.Entry<Column, Domain> partitionPredicate : partitionPredicates.entrySet()) {
String columnName = partitionPredicate.getKey().getName();
if (JSQL_PARSER_RESERVED_KEYWORDS.contains(columnName.toUpperCase(ENGLISH))) {
// The column name is a reserved keyword in the grammar of the SQL parser used internally by Glue API
continue;
}
Domain domain = partitionPredicate.getValue();
if (domain != null && !domain.isAll()) {
Optional<String> columnExpression = buildGlueExpressionForSingleDomain(columnName, domain);
if (columnExpression.isPresent()) {
int newExpressionLength = expressionLength + columnExpression.get().length();
if (expressionLength > 0) {
newExpressionLength += CONJUNCT_SEPARATOR.length();
}
if (newExpressionLength > GLUE_EXPRESSION_CHAR_LIMIT) {
continue;
}
perColumnExpressions.add((columnExpression.get()));
expressionLength = newExpressionLength;
}
}
}
return Joiner.on(CONJUNCT_SEPARATOR).join(perColumnExpressions);
}
|
@Test
public void testBuildGlueExpressionTupleDomainEqualsAndInClause()
{
Map<Column, Domain> predicates = new PartitionFilterBuilder(HIVE_TYPE_TRANSLATOR)
.addStringValues("col1", "2020-01-01")
.addStringValues("col2", "2020-02-20", "2020-02-28")
.build();
String expression = buildGlueExpression(predicates);
assertEquals(expression, "((col1 = '2020-01-01')) AND ((col2 in ('2020-02-20', '2020-02-28')))");
}
|
public Formula(Term response, Term... predictors) {
if (response instanceof Dot || response instanceof FactorCrossing) {
throw new IllegalArgumentException("The response variable cannot be '.' or FactorCrossing.");
}
this.response = response;
this.predictors = predictors;
}
|
@Test
public void testFormula() {
System.out.println("formula");
Formula formula = Formula.of("salary", $("age"));
assertEquals("salary ~ age", formula.toString());
DataFrame output = formula.frame(df);
System.out.println(output);
assertEquals(df.size(), output.size());
assertEquals(2, output.ncol());
smile.data.type.StructType schema = DataTypes.struct(
new StructField("salary", DataTypes.object(Double.class)),
new StructField("age", DataTypes.IntegerType)
);
assertEquals(schema, output.schema());
DataFrame x = formula.x(df);
System.out.println(x);
assertEquals(df.size(), x.size());
assertEquals(1, x.ncol());
smile.data.type.StructType xschema = DataTypes.struct(
new StructField("age", DataTypes.IntegerType)
);
assertEquals(xschema, x.schema());
assertEquals(10000.0, formula.y(df.get(0)), 1E-7);
assertEquals(Double.NaN, formula.y(df.get(1)), 1E-7);
Matrix matrix = formula.matrix(df);
assertEquals(df.size(), matrix.nrow());
assertEquals(2, matrix.ncol());
}
|
private static void handleSetTabletBinlogConfig(long backendId, Map<Long, TTablet> backendTablets) {
List<Pair<Long, BinlogConfig>> tabletToBinlogConfig = Lists.newArrayList();
TabletInvertedIndex invertedIndex = GlobalStateMgr.getCurrentState().getTabletInvertedIndex();
for (TTablet backendTablet : backendTablets.values()) {
for (TTabletInfo tabletInfo : backendTablet.tablet_infos) {
if (!tabletInfo.isSetBinlog_config_version()) {
continue;
}
long tabletId = tabletInfo.getTablet_id();
long beBinlogConfigVersion = tabletInfo.binlog_config_version;
TabletMeta tabletMeta = invertedIndex.getTabletMeta(tabletId);
long dbId = tabletMeta != null ? tabletMeta.getDbId() : TabletInvertedIndex.NOT_EXIST_VALUE;
long tableId = tabletMeta != null ? tabletMeta.getTableId() : TabletInvertedIndex.NOT_EXIST_VALUE;
Database db = GlobalStateMgr.getCurrentState().getDb(dbId);
if (db == null) {
continue;
}
boolean needToCheck = false;
OlapTable olapTable = (OlapTable) db.getTable(tableId);
if (olapTable == null) {
continue;
}
Locker locker = new Locker();
locker.lockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.READ);
try {
BinlogConfig binlogConfig = olapTable.getCurBinlogConfig();
// backward compatible
if (binlogConfig == null) {
continue;
}
Long feBinlogConfigVersion = binlogConfig.getVersion();
if (beBinlogConfigVersion < feBinlogConfigVersion) {
tabletToBinlogConfig.add(new Pair<>(tabletId, olapTable.getCurBinlogConfig()));
} else if (beBinlogConfigVersion == feBinlogConfigVersion) {
if (olapTable.isBinlogEnabled() && olapTable.getBinlogAvailableVersion().isEmpty()) {
// not to check here is that the function may need to get the write db lock
needToCheck = true;
}
} else {
LOG.warn("table {} binlog config version of tabletId: {}, BeId: {}, is {} " +
"greater than version of FE, which is {}", olapTable.getName(), tabletId, backendId,
beBinlogConfigVersion, feBinlogConfigVersion);
}
} finally {
locker.unLockTablesWithIntensiveDbLock(db, Lists.newArrayList(olapTable.getId()), LockType.READ);
}
if (needToCheck) {
GlobalStateMgr.getCurrentState().getBinlogManager().checkAndSetBinlogAvailableVersion(db,
olapTable, tabletId, backendId);
}
}
}
LOG.debug("find [{}] tablets need set binlog config ", tabletToBinlogConfig.size());
if (!tabletToBinlogConfig.isEmpty()) {
AgentBatchTask batchTask = new AgentBatchTask();
TabletMetadataUpdateAgentTask task = TabletMetadataUpdateAgentTaskFactory.createBinlogConfigUpdateTask(
backendId, tabletToBinlogConfig);
batchTask.addTask(task);
AgentTaskExecutor.submit(batchTask);
}
}
|
@Test
public void testHandleSetTabletBinlogConfig() {
Database db = GlobalStateMgr.getCurrentState().getDb("test");
long dbId = db.getId();
OlapTable olapTable = (OlapTable) db.getTable("binlog_report_handler_test");
long backendId = 10001L;
List<Long> tabletIds = GlobalStateMgr.getCurrentState().getTabletInvertedIndex().getTabletIdsByBackendId(10001);
Assert.assertFalse(tabletIds.isEmpty());
Map<Long, TTablet> backendTablets = new HashMap<Long, TTablet>();
List<TTabletInfo> tabletInfos = Lists.newArrayList();
TTablet tablet = new TTablet(tabletInfos);
for (Long tabletId : tabletIds) {
TTabletInfo tabletInfo = new TTabletInfo();
tabletInfo.setTablet_id(tabletId);
tabletInfo.setSchema_hash(60000);
tabletInfo.setBinlog_config_version(-1);
tablet.tablet_infos.add(tabletInfo);
}
backendTablets.put(backendId, tablet);
ReportHandler handler = new ReportHandler();
handler.testHandleSetTabletBinlogConfig(backendId, backendTablets);
for (Long tabletId : tabletIds) {
TTabletInfo tabletInfo = new TTabletInfo();
tabletInfo.setTablet_id(tabletId);
tabletInfo.setSchema_hash(60000);
tabletInfo.setBinlog_config_version(0);
tablet.tablet_infos.add(tabletInfo);
}
backendTablets.put(backendId, tablet);
handler.testHandleSetTabletBinlogConfig(backendId, backendTablets);
Assert.assertTrue(GlobalStateMgr.getCurrentState().getBinlogManager().isBinlogAvailable(dbId, olapTable.getId()));
}
|
public IntersectAllOperator(OpChainExecutionContext opChainExecutionContext,
List<MultiStageOperator> inputOperators,
DataSchema dataSchema) {
super(opChainExecutionContext, inputOperators, dataSchema);
}
|
@Test
public void testIntersectAllOperator() {
DataSchema schema = new DataSchema(new String[]{"int_col"},
new DataSchema.ColumnDataType[]{DataSchema.ColumnDataType.INT});
Mockito.when(_leftOperator.nextBlock())
.thenReturn(OperatorTestUtil.block(schema, new Object[]{1}, new Object[]{2}, new Object[]{3}))
.thenReturn(TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0));
Mockito.when(_rightOperator.nextBlock()).thenReturn(
OperatorTestUtil.block(schema, new Object[]{1}, new Object[]{2}, new Object[]{4}))
.thenReturn(TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0));
IntersectAllOperator intersectOperator =
new IntersectAllOperator(OperatorTestUtil.getTracingContext(), ImmutableList.of(_leftOperator, _rightOperator),
schema);
TransferableBlock result = intersectOperator.nextBlock();
while (result.getType() != DataBlock.Type.ROW) {
result = intersectOperator.nextBlock();
}
List<Object[]> resultRows = result.getContainer();
List<Object[]> expectedRows = Arrays.asList(new Object[]{1}, new Object[]{2});
Assert.assertEquals(resultRows.size(), expectedRows.size());
for (int i = 0; i < resultRows.size(); i++) {
Assert.assertEquals(resultRows.get(i), expectedRows.get(i));
}
}
|
@Override
public boolean apply(InputFile inputFile) {
return extension.equals(getExtension(inputFile));
}
|
@Test
public void should_not_match_incorrect_extension() throws IOException {
FileExtensionPredicate predicate = new FileExtensionPredicate("bat");
assertThat(predicate.apply(mockWithName("prog.batt"))).isFalse();
assertThat(predicate.apply(mockWithName("prog.abat"))).isFalse();
assertThat(predicate.apply(mockWithName("prog."))).isFalse();
assertThat(predicate.apply(mockWithName("prog.bat."))).isFalse();
assertThat(predicate.apply(mockWithName("prog.bat.batt"))).isFalse();
assertThat(predicate.apply(mockWithName("prog"))).isFalse();
}
|
public static String convertToBitcoinURI(Address address, Coin amount,
String label, String message) {
return convertToBitcoinURI(address.network(), address.toString(), amount, label, message);
}
|
@Test
public void testConvertToBitcoinURI_segwit() {
Address segwitAddress = AddressParser.getDefault(MAINNET).parseAddress(MAINNET_GOOD_SEGWIT_ADDRESS);
assertEquals("bitcoin:" + MAINNET_GOOD_SEGWIT_ADDRESS + "?message=segwit%20rules", BitcoinURI.convertToBitcoinURI(
segwitAddress, null, null, "segwit rules"));
}
|
public String getName() {
return name;
}
|
@Test
public void hasAName() throws Exception {
assertThat(handler.getName())
.isEqualTo("handler");
}
|
public void start(XmppDeviceFactory deviceFactory) {
log.info("XMPP Server has started.");
this.run(deviceFactory);
}
|
@Test
public void testStart() {
XmppDeviceFactory mockXmppDeviceFactory = EasyMock.createMock(XmppDeviceFactory.class);
server.start(mockXmppDeviceFactory);
assertNotNull(server.channel);
assertNotNull(server.channelClass);
assertNotNull(server.eventLoopGroup);
}
|
@Override
protected void handlePut(final String listenTo, final ClusterProperties discoveryProperties)
{
if (discoveryProperties != null)
{
ActivePropertiesResult pickedPropertiesResult = pickActiveProperties(discoveryProperties);
ClusterInfoItem newClusterInfoItem = new ClusterInfoItem(
_simpleLoadBalancerState,
pickedPropertiesResult.clusterProperties,
PartitionAccessorFactory.getPartitionAccessor(
pickedPropertiesResult.clusterProperties.getClusterName(),
_partitionAccessorRegistry,
pickedPropertiesResult.clusterProperties.getPartitionProperties()),
pickedPropertiesResult.distribution, getFailoutProperties(discoveryProperties));
if (_simpleLoadBalancerState.getClusterInfo().put(listenTo, newClusterInfoItem) == null) {
info(_log, "getting new ClusterInfoItem for cluster ", listenTo, ": ", newClusterInfoItem);
}
_simpleLoadBalancerState.notifyListenersOnClusterInfoUpdates(newClusterInfoItem);
// notify the cluster listeners only when discoveryProperties is not null, because we don't
// want to count initialization (just because listenToCluster is called)
_simpleLoadBalancerState.notifyClusterListenersOnAdd(listenTo);
}
else
{
_log.warn("Received a null cluster properties for {}", listenTo);
// still insert the ClusterInfoItem when discoveryProperties is null, but don't create accessor
_simpleLoadBalancerState.getClusterInfo().put(listenTo,
new ClusterInfoItem(_simpleLoadBalancerState, null, null, null));
}
}
|
@Test(dataProvider = "getConfigsAndDistributions")
public void testWithCanaryConfigs(ClusterProperties stableConfigs, ClusterProperties canaryConfigs, CanaryDistributionStrategy distributionStrategy,
CanaryDistributionProvider.Distribution distribution, FailoutProperties failoutProperties)
{
ClusterLoadBalancerSubscriberFixture fixture = new ClusterLoadBalancerSubscriberFixture();
when(fixture._canaryDistributionProvider.distribute(any())).thenReturn(distribution);
fixture.getMockSubscriber(distribution != null).handlePut(CLUSTER_NAME,
new ClusterStoreProperties(stableConfigs, canaryConfigs, distributionStrategy, failoutProperties));
Assert.assertEquals(fixture._clusterInfo.get(CLUSTER_NAME).getClusterPropertiesItem().getProperty(),
distribution == CanaryDistributionProvider.Distribution.CANARY ? canaryConfigs : stableConfigs);
verify(fixture._simpleLoadBalancerState, times(1)).notifyClusterListenersOnAdd(CLUSTER_NAME);
Assert.assertEquals(fixture._clusterInfoUpdatesCaptor.getValue().getClusterPropertiesItem().getProperty(),
distribution == CanaryDistributionProvider.Distribution.CANARY ? canaryConfigs : stableConfigs);
Assert.assertEquals(fixture._clusterInfoUpdatesCaptor.getValue().getClusterPropertiesItem().getDistribution(),
distribution == null ? CanaryDistributionProvider.Distribution.STABLE : distribution);
}
|
@Override
public void write(T record) {
recordConsumer.startMessage();
try {
messageWriter.writeTopLevelMessage(record);
} catch (RuntimeException e) {
Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record;
LOG.error("Cannot write message {}: {}", e.getMessage(), m);
throw e;
}
recordConsumer.endMessage();
}
|
@Test
public void testProto3MapIntMessageEmpty() throws Exception {
RecordConsumer readConsumerMock = Mockito.mock(RecordConsumer.class);
ProtoWriteSupport<TestProto3.MapIntMessage> instance =
createReadConsumerInstance(TestProto3.MapIntMessage.class, readConsumerMock);
TestProto3.MapIntMessage.Builder msg = TestProto3.MapIntMessage.newBuilder();
instance.write(msg.build());
InOrder inOrder = Mockito.inOrder(readConsumerMock);
inOrder.verify(readConsumerMock).startMessage();
inOrder.verify(readConsumerMock).endMessage();
Mockito.verifyNoMoreInteractions(readConsumerMock);
}
|
public Properties build() {
checkNotNull(logDir, config);
checkState(jsonOutput || (logPattern != null), "log pattern must be specified if not using json output");
configureGlobalFileLog();
if (allLogsToConsole) {
configureGlobalStdoutLog();
}
ofNullable(logLevelConfig).ifPresent(this::applyLogLevelConfiguration);
Properties res = new Properties();
res.putAll(log4j2Properties);
return res;
}
|
@Test
public void buildLogPattern_does_not_put_threadIdFieldPattern_from_RootLoggerConfig_is_null() {
String pattern = newLog4JPropertiesBuilder().buildLogPattern(
newRootLoggerConfigBuilder()
.setProcessId(ProcessId.COMPUTE_ENGINE)
.build());
assertThat(pattern).isEqualTo("%d{yyyy.MM.dd HH:mm:ss} %-5level ce[][%logger{1.}] %msg%n");
}
|
public synchronized int sendFetches() {
final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests();
sendFetchesInternal(
fetchRequests,
(fetchTarget, data, clientResponse) -> {
synchronized (Fetcher.this) {
handleFetchSuccess(fetchTarget, data, clientResponse);
}
},
(fetchTarget, data, error) -> {
synchronized (Fetcher.this) {
handleFetchFailure(fetchTarget, data, error);
}
});
return fetchRequests.size();
}
|
@Test
public void testFetchOnCompletedFetchesForAllPausedPartitions() {
buildFetcher();
assignFromUser(mkSet(tp0, tp1));
// seek to tp0 and tp1 in two polls to generate 2 complete requests and responses
// #1 seek, request, poll, response
subscriptions.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp0)));
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
// #2 seek, request, poll, response
subscriptions.seekUnvalidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
assertEquals(1, sendFetches());
client.prepareResponse(fullFetchResponse(tidp1, nextRecords, Errors.NONE, 100L, 0));
subscriptions.pause(tp0);
subscriptions.pause(tp1);
consumerClient.poll(time.timer(0));
assertEmptyFetch("Should not return records or advance position for all paused partitions");
assertTrue(fetcher.hasCompletedFetches(), "Should still contain completed fetches");
assertFalse(fetcher.hasAvailableFetches(), "Should not have any available (non-paused) completed fetches");
}
|
@Override
public double score(int[] truth, int[] cluster) {
return of(truth, cluster);
}
|
@Test
public void test() {
System.out.println("adjusted rand index");
int[] clusters = {2, 3, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 3, 3, 3, 3, 2, 3, 3, 1, 1, 1, 1, 1, 1, 4, 1, 3, 3, 3, 3, 3, 1, 4, 4, 4, 3, 1, 1, 3, 1, 4, 3, 3, 3, 3, 1, 1, 3, 1, 1, 3, 3, 3, 3, 4, 3, 1, 3, 1, 3, 1, 1, 1, 1, 1, 3, 3, 2, 3, 3, 1, 1, 3, 3, 3, 3, 3, 3, 1, 1, 3, 2, 3, 2, 2, 4, 1, 3, 1, 3, 1, 1, 3, 4, 4, 4, 1, 2, 3, 1, 1, 3, 1, 1, 1, 4, 3, 3, 2, 3, 3, 1, 3, 3, 1, 1, 1, 3, 4, 4, 2, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 2, 3, 3, 3, 2, 3, 3, 1, 3, 1, 3, 3, 1, 1, 3, 3, 3, 1, 1, 1, 1, 3, 3, 4, 3, 2, 3, 1, 1, 3, 1, 2, 3, 1, 1, 3, 3, 1, 1, 1, 1, 1, 3, 1, 3, 1, 3, 1, 3, 1, 1, 3, 1, 1, 1, 3, 2, 1, 2, 1, 1, 1, 1, 1, 3, 1, 1, 3, 3, 1, 3, 3, 3};
int[] alt = {3, 2, 2, 0, 0, 2, 2, 0, 2, 0, 0, 2, 2, 2, 2, 2, 3, 2, 2, 0, 0, 0, 0, 0, 0, 3, 0, 2, 2, 2, 2, 2, 0, 3, 3, 3, 2, 0, 0, 2, 0, 3, 2, 2, 2, 2, 0, 0, 2, 0, 0, 2, 2, 2, 2, 3, 2, 0, 2, 0, 2, 0, 0, 0, 0, 0, 2, 2, 3, 2, 2, 0, 0, 2, 2, 2, 2, 2, 2, 0, 0, 2, 3, 2, 0, 3, 3, 0, 2, 0, 2, 0, 0, 2, 3, 3, 3, 0, 3, 2, 0, 0, 2, 0, 0, 0, 3, 2, 2, 3, 2, 2, 0, 2, 2, 0, 0, 0, 2, 3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 3, 2, 2, 2, 2, 2, 2, 0, 2, 0, 2, 2, 0, 0, 2, 1, 2, 0, 0, 0, 0, 2, 2, 3, 2, 1, 2, 0, 0, 2, 0, 3, 2, 0, 0, 2, 2, 0, 0, 0, 0, 0, 2, 0, 2, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 2, 0, 2, 2, 2};
AdjustedRandIndex instance = new AdjustedRandIndex();
double expResult = 0.9262;
double result = instance.score(clusters, alt);
assertEquals(expResult, result, 1E-4);
}
|
@Override
public QueryStats execute(Statement statement)
{
return execute(statement, Optional.empty()).getQueryStats();
}
|
@Test
public void testQuerySucceededWithConverter()
{
QueryResult<Integer> result = prestoAction.execute(
SQL_PARSER.createStatement("SELECT x FROM (VALUES (1), (2), (3)) t(x)", PARSING_OPTIONS),
resultSet -> resultSet.getInt("x") * resultSet.getInt("x"));
assertEquals(result.getQueryStats().getState(), FINISHED.name());
assertEquals(result.getResults(), ImmutableList.of(1, 4, 9));
}
|
public static boolean isValidOrigin(String sourceHost, ZeppelinConfiguration zConf)
throws UnknownHostException, URISyntaxException {
String sourceUriHost = "";
if (sourceHost != null && !sourceHost.isEmpty()) {
sourceUriHost = new URI(sourceHost).getHost();
sourceUriHost = (sourceUriHost == null) ? "" : sourceUriHost.toLowerCase();
}
sourceUriHost = sourceUriHost.toLowerCase();
String currentHost = InetAddress.getLocalHost().getHostName().toLowerCase();
return zConf.getAllowedOrigins().contains("*")
|| currentHost.equals(sourceUriHost)
|| "localhost".equals(sourceUriHost)
|| zConf.getAllowedOrigins().contains(sourceHost);
}
|
@Test
void emptyOrigin()
throws URISyntaxException, UnknownHostException {
assertFalse(CorsUtils.isValidOrigin("",
ZeppelinConfiguration.load("zeppelin-site.xml")));
}
|
@Override
public Enumeration<URL> getResources(String name) throws IOException {
List<URL> resources = new ArrayList<>();
ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name);
log.trace("Received request to load resources '{}'", name);
for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) {
switch (classLoadingSource) {
case APPLICATION:
if (getParent() != null) {
resources.addAll(Collections.list(getParent().getResources(name)));
}
break;
case PLUGIN:
resources.addAll(Collections.list(findResources(name)));
break;
case DEPENDENCIES:
resources.addAll(findResourcesFromDependencies(name));
break;
}
}
return Collections.enumeration(resources);
}
|
@Test
void parentLastGetResourcesExistsOnlyInDependency() throws IOException, URISyntaxException {
Enumeration<URL> resources = parentLastPluginClassLoader.getResources("META-INF/dependency-file");
assertNumberOfResourcesAndFirstLineOfFirstElement(1, "dependency", resources);
}
|
public MapStoreConfig setFactoryImplementation(@Nonnull Object factoryImplementation) {
this.factoryImplementation = checkNotNull(factoryImplementation, "Map store factory cannot be null!");
this.factoryClassName = null;
return this;
}
|
@Test
public void setFactoryImplementation() {
Object mapStoreFactoryImpl = new Object();
MapStoreConfig cfg = new MapStoreConfig().setFactoryImplementation(mapStoreFactoryImpl);
assertEquals(mapStoreFactoryImpl, cfg.getFactoryImplementation());
assertEquals(new MapStoreConfig().setFactoryImplementation(mapStoreFactoryImpl), cfg);
}
|
public static <T> JSONSchema<T> of(SchemaDefinition<T> schemaDefinition) {
SchemaReader<T> reader = schemaDefinition.getSchemaReaderOpt()
.orElseGet(() -> new JacksonJsonReader<>(jsonMapper(), schemaDefinition.getPojo()));
SchemaWriter<T> writer = schemaDefinition.getSchemaWriterOpt()
.orElseGet(() -> new JacksonJsonWriter<>(jsonMapper()));
return new JSONSchema<>(parseSchemaInfo(schemaDefinition, SchemaType.JSON), schemaDefinition.getPojo(),
reader, writer);
}
|
@Test
public void testAllowNullCorrectPolymorphism() {
Bar bar = new Bar();
bar.setField1(true);
DerivedFoo derivedFoo = new DerivedFoo();
derivedFoo.setField1("foo1");
derivedFoo.setField2("bar2");
derivedFoo.setField3(4);
derivedFoo.setField4(bar);
derivedFoo.setField5("derived1");
derivedFoo.setField6(2);
Foo foo = new Foo();
foo.setField1("foo1");
foo.setField2("bar2");
foo.setField3(4);
foo.setField4(bar);
SchemaTestUtils.DerivedDerivedFoo derivedDerivedFoo = new SchemaTestUtils.DerivedDerivedFoo();
derivedDerivedFoo.setField1("foo1");
derivedDerivedFoo.setField2("bar2");
derivedDerivedFoo.setField3(4);
derivedDerivedFoo.setField4(bar);
derivedDerivedFoo.setField5("derived1");
derivedDerivedFoo.setField6(2);
derivedDerivedFoo.setFoo2(foo);
derivedDerivedFoo.setDerivedFoo(derivedFoo);
// schema for base class
JSONSchema<Foo> baseJsonSchema = JSONSchema.of(SchemaDefinition.<Foo>builder().withPojo(Foo.class).withAlwaysAllowNull(false).build());
Assert.assertEquals(baseJsonSchema.decode(baseJsonSchema.encode(foo)), foo);
Assert.assertEquals(baseJsonSchema.decode(baseJsonSchema.encode(derivedFoo)), foo);
Assert.assertEquals(baseJsonSchema.decode(baseJsonSchema.encode(derivedDerivedFoo)), foo);
// schema for derived class
JSONSchema<DerivedFoo> derivedJsonSchema = JSONSchema.of(SchemaDefinition.<DerivedFoo>builder().withPojo(DerivedFoo.class).withAlwaysAllowNull(false).build());
Assert.assertEquals(derivedJsonSchema.decode(derivedJsonSchema.encode(derivedFoo)), derivedFoo);
Assert.assertEquals(derivedJsonSchema.decode(derivedJsonSchema.encode(derivedDerivedFoo)), derivedFoo);
//schema for derived derived class
JSONSchema<SchemaTestUtils.DerivedDerivedFoo> derivedDerivedJsonSchema
= JSONSchema.of(SchemaDefinition.<SchemaTestUtils.DerivedDerivedFoo>builder().withPojo(SchemaTestUtils.DerivedDerivedFoo.class).withAlwaysAllowNull(false).build());
Assert.assertEquals(derivedDerivedJsonSchema.decode(derivedDerivedJsonSchema.encode(derivedDerivedFoo)), derivedDerivedFoo);
}
|
@Override
public String toString() {
return "SmppConfiguration[usingSSL=" + usingSSL
+ ", enquireLinkTimer=" + enquireLinkTimer
+ ", host=" + host
+ ", password=" + password
+ ", port=" + port
+ ", systemId=" + systemId
+ ", systemType=" + systemType
+ ", dataCoding=" + dataCoding
+ ", alphabet=" + alphabet
+ ", encoding=" + encoding
+ ", transactionTimer=" + transactionTimer
+ ", pduProcessorQueueCapacity=" + pduProcessorQueueCapacity
+ ", pduProcessorDegree=" + pduProcessorDegree
+ ", registeredDelivery=" + registeredDelivery
+ ", singleDLR=" + singleDLR
+ ", serviceType=" + serviceType
+ ", sourceAddrTon=" + sourceAddrTon
+ ", destAddrTon=" + destAddrTon
+ ", sourceAddrNpi=" + sourceAddrNpi
+ ", destAddrNpi=" + destAddrNpi
+ ", addressRange=" + addressRange
+ ", protocolId=" + protocolId
+ ", priorityFlag=" + priorityFlag
+ ", replaceIfPresentFlag=" + replaceIfPresentFlag
+ ", sourceAddr=" + sourceAddr
+ ", destAddr=" + destAddr
+ ", typeOfNumber=" + typeOfNumber
+ ", numberingPlanIndicator=" + numberingPlanIndicator
+ ", initialReconnectDelay=" + initialReconnectDelay
+ ", reconnectDelay=" + reconnectDelay
+ ", maxReconnect=" + maxReconnect
+ ", lazySessionCreation=" + lazySessionCreation
+ ", messageReceiverRouteId=" + messageReceiverRouteId
+ ", httpProxyHost=" + httpProxyHost
+ ", httpProxyPort=" + httpProxyPort
+ ", httpProxyUsername=" + httpProxyUsername
+ ", httpProxyPassword=" + httpProxyPassword
+ ", splittingPolicy=" + splittingPolicy
+ ", proxyHeaders=" + proxyHeaders
+ ", interfaceVersion=" + interfaceVersion
+ "]";
}
|
@Test
public void toStringShouldListAllInstanceVariables() {
String expected = "SmppConfiguration["
+ "usingSSL=false, "
+ "enquireLinkTimer=60000, "
+ "host=localhost, "
+ "password=null, "
+ "port=2775, "
+ "systemId=smppclient, "
+ "systemType=, "
+ "dataCoding=0, "
+ "alphabet=0, "
+ "encoding=ISO-8859-1, "
+ "transactionTimer=10000, "
+ "pduProcessorQueueCapacity=100, "
+ "pduProcessorDegree=3, "
+ "registeredDelivery=1, "
+ "singleDLR=false, "
+ "serviceType=CMT, "
+ "sourceAddrTon=0, "
+ "destAddrTon=0, "
+ "sourceAddrNpi=0, "
+ "destAddrNpi=0, "
+ "addressRange=, "
+ "protocolId=0, "
+ "priorityFlag=1, "
+ "replaceIfPresentFlag=0, "
+ "sourceAddr=1616, "
+ "destAddr=1717, "
+ "typeOfNumber=0, "
+ "numberingPlanIndicator=0, "
+ "initialReconnectDelay=5000, "
+ "reconnectDelay=5000, "
+ "maxReconnect=2147483647, "
+ "lazySessionCreation=false, "
+ "messageReceiverRouteId=null, "
+ "httpProxyHost=null, "
+ "httpProxyPort=3128, "
+ "httpProxyUsername=null, "
+ "httpProxyPassword=null, "
+ "splittingPolicy=ALLOW, "
+ "proxyHeaders=null, "
+ "interfaceVersion=3.4]";
assertEquals(expected, configuration.toString());
}
|
static String generateRustLiteral(final PrimitiveType type, final String value)
{
Verify.notNull(type, "type");
Verify.notNull(value, "value");
final String typeName = rustTypeName(type);
if (typeName == null)
{
throw new IllegalArgumentException("Unknown Rust type name found for primitive " + type.primitiveName());
}
switch (type)
{
case CHAR:
case INT8:
case INT16:
case INT32:
case INT64:
return value + '_' + typeName;
case UINT8:
case UINT16:
case UINT32:
case UINT64:
return "0x" + Long.toHexString(parseLong(value)) + '_' + typeName;
case FLOAT:
case DOUBLE:
return value.endsWith("NaN") ? typeName + "::NAN" : value + '_' + typeName;
default:
throw new IllegalArgumentException("Unsupported literal generation for type: " + type.primitiveName());
}
}
|
@Test
void generateRustLiteralNullValueParam()
{
assertThrows(NullPointerException.class, () -> generateRustLiteral(INT8, null));
}
|
@Override
public List<BufferedRequestState<RequestEntryT>> snapshotState(long checkpointId) {
return Collections.singletonList(new BufferedRequestState<>((bufferedRequestEntries)));
}
|
@Test
public void testRestoreFromMultipleStates() throws IOException {
List<BufferedRequestState<Integer>> states =
Arrays.asList(
new BufferedRequestState<>(
Arrays.asList(
new RequestEntryWrapper<>(1, 1),
new RequestEntryWrapper<>(2, 1),
new RequestEntryWrapper<>(3, 1))),
new BufferedRequestState<>(
Arrays.asList(
new RequestEntryWrapper<>(4, 1),
new RequestEntryWrapper<>(5, 1))),
new BufferedRequestState<>(
Collections.singletonList(new RequestEntryWrapper<>(6, 1))));
AsyncSinkWriterImpl sink =
new AsyncSinkWriterImplBuilder().context(sinkInitContext).buildWithState(states);
List<BufferedRequestState<Integer>> bufferedRequestStates = sink.snapshotState(1);
// After snapshotting state, all entries are merged into a single BufferedRequestState
assertThat(bufferedRequestStates).hasSize(1);
BufferedRequestState<Integer> snapshotState = bufferedRequestStates.get(0);
assertThat(snapshotState.getBufferedRequestEntries()).hasSize(6);
assertThat(snapshotState.getStateSize()).isEqualTo(6);
assertThat(
snapshotState.getBufferedRequestEntries().stream()
.map(RequestEntryWrapper::getRequestEntry)
.collect(Collectors.toList()))
.containsExactlyInAnyOrder(1, 2, 3, 4, 5, 6);
}
|
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"})
void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas,
MigrationDecisionCallback callback) {
assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: "
+ Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas);
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas));
logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas));
}
initState(oldReplicas);
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
// fix cyclic partition replica movements
if (fixCycle(oldReplicas, newReplicas)) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId,
Arrays.toString(newReplicas));
}
}
int currentIndex = 0;
while (currentIndex < oldReplicas.length) {
if (logger.isFinestEnabled()) {
logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex,
Arrays.toString(state));
}
assertNoDuplicate(partitionId, oldReplicas, newReplicas);
if (newReplicas[currentIndex] == null) {
if (state[currentIndex] != null) {
// replica owner is removed and no one will own this replica
logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1);
state[currentIndex] = null;
}
currentIndex++;
continue;
}
if (state[currentIndex] == null) {
int i = getReplicaIndex(state, newReplicas[currentIndex]);
if (i == -1) {
// fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner
logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (i > currentIndex) {
// SHIFT UP replica from i to currentIndex, copy data from partition owner
logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId,
state[i], i, currentIndex);
callback.migrate(null, -1, -1, state[i], i, currentIndex);
state[currentIndex] = state[i];
state[i] = null;
continue;
}
throw new AssertionError("partitionId=" + partitionId
+ "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas)
+ ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas));
}
if (newReplicas[currentIndex].equals(state[currentIndex])) {
// no change, no action needed
currentIndex++;
continue;
}
if (getReplicaIndex(newReplicas, state[currentIndex]) == -1
&& getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
// MOVE partition replica from its old owner to new owner
logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) {
int newIndex = getReplicaIndex(newReplicas, state[currentIndex]);
assert newIndex > currentIndex : "partitionId=" + partitionId
+ ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: "
+ Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state)
+ ", FINAL: " + Arrays.toString(newReplicas);
if (state[newIndex] == null) {
// it is a SHIFT DOWN
logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId,
state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex);
state[newIndex] = state[currentIndex];
} else {
logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex);
callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex);
}
state[currentIndex] = newReplicas[currentIndex];
currentIndex++;
continue;
}
planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex);
}
assert Arrays.equals(state, newReplicas)
: "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas)
+ " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas);
}
|
@Test
public void test_MOVE() throws UnknownHostException {
final PartitionReplica[] oldReplicas = {
new PartitionReplica(new Address("localhost", 5701), uuids[0]),
new PartitionReplica(new Address("localhost", 5702), uuids[1]),
new PartitionReplica(new Address("localhost", 5703), uuids[2]),
null,
null,
null,
null,
};
final PartitionReplica[] newReplicas = {
new PartitionReplica(new Address("localhost", 5704), uuids[3]),
new PartitionReplica(new Address("localhost", 5702), uuids[1]),
new PartitionReplica(new Address("localhost", 5705), uuids[4]),
null,
null,
null,
null,
};
migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5701), uuids[0]), 0, -1, new PartitionReplica(new Address("localhost", 5704), uuids[3]), -1, 0);
verify(callback).migrate(new PartitionReplica(new Address("localhost", 5703), uuids[2]), 2, -1, new PartitionReplica(new Address("localhost", 5705), uuids[4]), -1, 2);
}
|
public String toString()
{
return "RecordingLog{" +
"entries=" + entriesCache +
", cacheIndex=" + cacheIndexByLeadershipTermIdMap +
'}';
}
|
@Test
void entryToString()
{
final RecordingLog.Entry entry = new RecordingLog.Entry(
42, 5, 1024, 701, 1_000_000_000_000L, 16, ENTRY_TYPE_SNAPSHOT, null, true, 2);
assertEquals(
"Entry{recordingId=42, leadershipTermId=5, termBaseLogPosition=1024, logPosition=701, " +
"timestamp=1000000000000, serviceId=16, type=SNAPSHOT, isValid=true, entryIndex=2}",
entry.toString());
}
|
public Object resolve(final Expression expression) {
return new Visitor().process(expression, null);
}
|
@Test
public void shouldParseTimestamp() {
// Given:
final SqlType type = SqlTypes.TIMESTAMP;
final Expression exp = new StringLiteral("2021-01-09T04:40:02");
// When:
Object o = new GenericExpressionResolver(type, FIELD_NAME, registry, config, "insert value",
false).resolve(exp);
// Then:
assertTrue(o instanceof Timestamp);
assertThat(((Timestamp) o).getTime(), is(1610167202000L));
}
|
protected ConfiguredCloseableHttpClient createClient(
final org.apache.hc.client5.http.impl.classic.HttpClientBuilder builder,
final InstrumentedHttpClientConnectionManager manager,
final String name) {
final String cookiePolicy = configuration.isCookiesEnabled() ? StandardCookieSpec.RELAXED : StandardCookieSpec.IGNORE;
final int timeout = (int) configuration.getTimeout().toMilliseconds();
final int connectionTimeout = (int) configuration.getConnectionTimeout().toMilliseconds();
final int connectionRequestTimeout = (int) configuration.getConnectionRequestTimeout().toMilliseconds();
final long keepAlive = configuration.getKeepAlive().toMilliseconds();
final ConnectionReuseStrategy reuseStrategy = keepAlive == 0
? ((request, response, context) -> false)
: new DefaultConnectionReuseStrategy();
final HttpRequestRetryStrategy retryHandler = configuration.getRetries() == 0
? NO_RETRIES
: (httpRequestRetryStrategy == null ? new DefaultHttpRequestRetryStrategy(configuration.getRetries(),
TimeValue.ofSeconds(1L)) : httpRequestRetryStrategy);
final RequestConfig requestConfig
= RequestConfig.custom().setCookieSpec(cookiePolicy)
.setResponseTimeout(timeout, TimeUnit.MILLISECONDS)
.setConnectTimeout(connectionTimeout, TimeUnit.MILLISECONDS)
.setConnectionKeepAlive(TimeValue.of(-1, TimeUnit.MILLISECONDS))
.setConnectionRequestTimeout(connectionRequestTimeout, TimeUnit.MILLISECONDS)
.build();
final SocketConfig socketConfig = SocketConfig.custom()
.setTcpNoDelay(true)
.setSoTimeout(timeout, TimeUnit.MILLISECONDS)
.build();
manager.setDefaultSocketConfig(socketConfig);
builder.setRequestExecutor(createRequestExecutor(name))
.setConnectionManager(manager)
.setDefaultRequestConfig(requestConfig)
.setConnectionReuseStrategy(reuseStrategy)
.setRetryStrategy(retryHandler)
.setUserAgent(createUserAgent(name));
if (keepAlive != 0) {
// either keep alive based on response header Keep-Alive,
// or if the server can keep a persistent connection (-1), then override based on client's configuration
builder.setKeepAliveStrategy(new DefaultConnectionKeepAliveStrategy() {
@Override
public TimeValue getKeepAliveDuration(HttpResponse response, HttpContext context) {
final TimeValue duration = super.getKeepAliveDuration(response, context);
return (duration.getDuration() == -1) ? TimeValue.ofMilliseconds(keepAlive) : duration;
}
});
}
// create a tunnel through a proxy host if it's specified in the config
final ProxyConfiguration proxy = configuration.getProxyConfiguration();
if (proxy != null) {
final HttpHost httpHost = new HttpHost(proxy.getScheme(), proxy.getHost(), proxy.getPort());
builder.setRoutePlanner(new NonProxyListProxyRoutePlanner(httpHost, proxy.getNonProxyHosts()));
// if the proxy host requires authentication then add the host credentials to the credentials provider
final AuthConfiguration auth = proxy.getAuth();
if (auth != null) {
if (credentialsStore == null) {
credentialsStore = new BasicCredentialsProvider();
}
// set the AuthScope
AuthScope authScope = new AuthScope(httpHost, auth.getRealm(), auth.getAuthScheme());
// set the credentials type
Credentials credentials = configureCredentials(auth);
credentialsStore.setCredentials(authScope, credentials);
}
}
if (credentialsStore != null) {
builder.setDefaultCredentialsProvider(credentialsStore);
}
if (routePlanner != null) {
builder.setRoutePlanner(routePlanner);
}
if (disableContentCompression) {
builder.disableContentCompression();
}
if (redirectStrategy != null) {
builder.setRedirectStrategy(redirectStrategy);
}
if (defaultHeaders != null) {
builder.setDefaultHeaders(defaultHeaders);
}
if (httpProcessor != null) {
builder.addRequestInterceptorFirst(httpProcessor);
builder.addResponseInterceptorLast(httpProcessor);
}
customizeBuilder(builder);
return new ConfiguredCloseableHttpClient(builder.build(), requestConfig);
}
|
@Test
void exposedConfigIsTheSameAsInternalToTheWrappedHttpClient() {
ConfiguredCloseableHttpClient client = builder.createClient(apacheBuilder, connectionManager, "test");
assertThat(client).isNotNull();
assertThat(client.getClient()).extracting("defaultConfig").isEqualTo(client.getDefaultRequestConfig());
}
|
public Plan validateReservationUpdateRequest(
ReservationSystem reservationSystem, ReservationUpdateRequest request)
throws YarnException {
ReservationId reservationId = request.getReservationId();
Plan plan = validateReservation(reservationSystem, reservationId,
AuditConstants.UPDATE_RESERVATION_REQUEST);
validateReservationDefinition(reservationId,
request.getReservationDefinition(), plan,
AuditConstants.UPDATE_RESERVATION_REQUEST);
return plan;
}
|
@Test
public void testUpdateReservationInvalidRR() {
ReservationUpdateRequest request =
createSimpleReservationUpdateRequest(0, 0, 1, 5, 3);
Plan plan = null;
try {
plan = rrValidator.validateReservationUpdateRequest(rSystem, request);
Assert.fail();
} catch (YarnException e) {
Assert.assertNull(plan);
String message = e.getMessage();
Assert.assertTrue(message
.startsWith("No resources have been specified to reserve"));
LOG.info(message);
}
}
|
public static void checkNotNullAndNotEmpty(String arg, String argName) {
checkNotNull(arg, argName);
checkArgument(
!arg.isEmpty(),
"'%s' must not be empty.",
argName);
}
|
@Test
public void testCheckNotNullAndNotEmpty() throws Exception {
// Should not throw.
Validate.checkNotNullAndNotEmpty(NON_EMPTY_ARRAY, "array");
Validate.checkNotNullAndNotEmpty(NON_EMPTY_BYTE_ARRAY, "array");
Validate.checkNotNullAndNotEmpty(NON_EMPTY_SHORT_ARRAY, "array");
Validate.checkNotNullAndNotEmpty(NON_EMPTY_INT_ARRAY, "array");
Validate.checkNotNullAndNotEmpty(NON_EMPTY_LONG_ARRAY, "array");
// Verify it throws.
intercept(IllegalArgumentException.class, "'string' must not be empty",
() -> Validate.checkNotNullAndNotEmpty("", "string"));
intercept(IllegalArgumentException.class, "'array' must not be null", () ->
Validate.checkNotNullAndNotEmpty(SampleDataForTests.NULL_ARRAY,
"array"));
intercept(IllegalArgumentException.class,
"'array' must have at least one element", () ->
Validate.checkNotNullAndNotEmpty(SampleDataForTests.EMPTY_ARRAY,
"array"));
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'array' must not be null",
() -> Validate.checkNotNullAndNotEmpty(NULL_BYTE_ARRAY, "array"));
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'array' must have at least one element",
() -> Validate.checkNotNullAndNotEmpty(EMPTY_BYTE_ARRAY, "array"));
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'array' must not be null",
() -> Validate.checkNotNullAndNotEmpty(NULL_SHORT_ARRAY, "array"));
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'array' must have at least one element",
() -> Validate.checkNotNullAndNotEmpty(EMPTY_SHORT_ARRAY, "array"));
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'array' must not be null",
() -> Validate.checkNotNullAndNotEmpty(NULL_INT_ARRAY, "array"));
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'array' must have at least one element",
() -> Validate.checkNotNullAndNotEmpty(EMPTY_INT_ARRAY, "array"));
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'array' must not be null",
() -> Validate.checkNotNullAndNotEmpty(NULL_LONG_ARRAY, "array"));
ExceptionAsserts.assertThrows(
IllegalArgumentException.class,
"'array' must have at least one element",
() -> Validate.checkNotNullAndNotEmpty(EMPTY_LONG_ARRAY, "array"));
}
|
@SuppressWarnings("unused") // Part of required API.
public void execute(
final ConfiguredStatement<InsertValues> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
final InsertValues insertValues = statement.getStatement();
final MetaStore metaStore = executionContext.getMetaStore();
final KsqlConfig config = statement.getSessionConfig().getConfig(true);
final DataSource dataSource = getDataSource(config, metaStore, insertValues);
validateInsert(insertValues.getColumns(), dataSource);
final ProducerRecord<byte[], byte[]> record =
buildRecord(statement, metaStore, dataSource, serviceContext);
try {
producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps());
} catch (final TopicAuthorizationException e) {
// TopicAuthorizationException does not give much detailed information about why it failed,
// except which topics are denied. Here we just add the ACL to make the error message
// consistent with other authorization error messages.
final Exception rootCause = new KsqlTopicAuthorizationException(
AclOperation.WRITE,
e.unauthorizedTopics()
);
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause);
} catch (final ClusterAuthorizationException e) {
// ClusterAuthorizationException is thrown when using idempotent producers
// and either a topic write permission or a cluster-level idempotent write
// permission (only applicable for broker versions no later than 2.8) is
// missing. In this case, we include additional context to help the user
// distinguish this type of failure from other permissions exceptions
// such as the ones thrown above when TopicAuthorizationException is caught.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} catch (final KafkaException e) {
if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) {
// The error message thrown when an idempotent producer is missing permissions
// is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException,
// as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException.
// ksqlDB handles these two the same way, accordingly.
// See https://issues.apache.org/jira/browse/KAFKA-14138 for more.
throw new KsqlException(
createInsertFailedExceptionMessage(insertValues),
createClusterAuthorizationExceptionRootCause(dataSource)
);
} else {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
} catch (final Exception e) {
throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e);
}
}
|
@Test
public void shouldInsertValuesIntoHeaderSchemaValueColumns() {
// Given:
givenSourceStreamWithSchema(SCHEMA_WITH_HEADERS, SerdeFeatures.of(), SerdeFeatures.of());
final ConfiguredStatement<InsertValues> statement = givenInsertValues(
ImmutableList.of(K0, COL0, COL1),
ImmutableList.of(
new StringLiteral("key"),
new StringLiteral("str"),
new LongLiteral(2L)
)
);
// When:
executor.execute(statement, mock(SessionProperties.class), engine, serviceContext);
// Then:
verify(producer).send(new ProducerRecord<>(TOPIC_NAME, null, 1L, KEY, VALUE));
}
|
@Override
public void validate(String methodName, Class<?>[] parameterTypes, Object[] arguments) throws Exception {
List<Class<?>> groups = new ArrayList<>();
Class<?> methodClass = methodClass(methodName);
if (methodClass != null) {
groups.add(methodClass);
}
Method method = clazz.getMethod(methodName, parameterTypes);
Class<?>[] methodClasses;
if (method.isAnnotationPresent(MethodValidated.class)) {
methodClasses = method.getAnnotation(MethodValidated.class).value();
groups.addAll(Arrays.asList(methodClasses));
}
// add into default group
groups.add(0, Default.class);
groups.add(1, clazz);
// convert list to array
Class<?>[] classGroups = groups.toArray(new Class[0]);
Set<ConstraintViolation<?>> violations = new HashSet<>();
Object parameterBean = getMethodParameterBean(clazz, method, arguments);
if (parameterBean != null) {
violations.addAll(validator.validate(parameterBean, classGroups));
}
for (Object arg : arguments) {
validate(violations, arg, classGroups);
}
if (!violations.isEmpty()) {
logger.info("Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: "
+ violations);
throw new ConstraintViolationException(
"Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: "
+ violations,
violations);
}
}
|
@Test
void testItWithNestedParameterValidationWithNullParam() {
Assertions.assertThrows(ValidationException.class, () -> {
URL url = URL.valueOf(
"test://test:11/org.apache.dubbo.validation.support.jvalidation.mock.JValidatorTestTarget");
JValidator jValidator = new JValidator(url);
jValidator.validate(
"someMethod7", new Class<?>[] {JValidatorTestTarget.BaseParam.class}, new Object[] {null});
});
}
|
@Override
public boolean supportsConvert() {
return false;
}
|
@Test
void assertSupportsConvertWithParameter() {
assertFalse(metaData.supportsConvert(0, 0));
}
|
@Override
public String getDescription() {
return "Write metadata";
}
|
@Test
public void getDescription_is_defined() {
assertThat(underTest.getDescription()).isNotEmpty();
}
|
@Override
public ItemChangeSets resolve(long namespaceId, String configText, List<ItemDTO> baseItems) {
Map<Integer, ItemDTO> oldLineNumMapItem = BeanUtils.mapByKey("lineNum", baseItems);
Map<String, ItemDTO> oldKeyMapItem = BeanUtils.mapByKey("key", baseItems);
//remove comment and blank item map.
oldKeyMapItem.remove("");
String[] newItems = configText.split(ITEM_SEPARATOR);
Set<String> repeatKeys = new HashSet<>();
if (isHasRepeatKey(newItems, repeatKeys)) {
throw new BadRequestException("Config text has repeated keys: %s, please check your input.", repeatKeys);
}
ItemChangeSets changeSets = new ItemChangeSets();
Map<Integer, String> newLineNumMapItem = new HashMap<>();//use for delete blank and comment item
int lineCounter = 1;
for (String newItem : newItems) {
newItem = newItem.trim();
newLineNumMapItem.put(lineCounter, newItem);
ItemDTO oldItemByLine = oldLineNumMapItem.get(lineCounter);
//comment item
if (isCommentItem(newItem)) {
handleCommentLine(namespaceId, oldItemByLine, newItem, lineCounter, changeSets);
//blank item
} else if (isBlankItem(newItem)) {
handleBlankLine(namespaceId, oldItemByLine, lineCounter, changeSets);
//normal item
} else {
handleNormalLine(namespaceId, oldKeyMapItem, newItem, lineCounter, changeSets);
}
lineCounter++;
}
deleteCommentAndBlankItem(oldLineNumMapItem, newLineNumMapItem, changeSets);
deleteNormalKVItem(oldKeyMapItem, changeSets);
return changeSets;
}
|
@Test
public void testAddCommentAndBlankItem() {
ItemChangeSets changeSets = resolver.resolve(1, "#ddd\na=b\n\nb=c\nc=d", mockBaseItemHas3Key());
Assert.assertEquals(2, changeSets.getCreateItems().size());
Assert.assertEquals(3, changeSets.getUpdateItems().size());
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
try {
if(implementations.containsKey(Command.mlsd)) {
// Note that there is no distinct FEAT output for MLSD. The presence of the MLST feature
// indicates that both MLST and MLSD are supported.
if(session.getClient().hasFeature(FTPCmd.MLST.getCommand())) {
try {
return this.post(directory, implementations.get(Command.mlsd).list(directory, listener), listener);
}
catch(InteroperabilityException e) {
this.remove(Command.mlsd);
}
}
else {
this.remove(Command.mlsd);
}
}
if(implementations.containsKey(Command.stat)) {
try {
return this.post(directory, implementations.get(Command.stat).list(directory, listener), listener);
}
catch(InteroperabilityException | AccessDeniedException | NotfoundException e) {
this.remove(Command.stat);
}
}
if(implementations.containsKey(Command.lista)) {
try {
return this.post(directory, implementations.get(Command.lista).list(directory, listener), listener);
}
catch(FTPInvalidListException e) {
// Empty directory listing. #7737
}
catch(InteroperabilityException e) {
this.remove(Command.lista);
}
}
try {
return this.post(directory, implementations.get(Command.list).list(directory, listener), listener);
}
catch(FTPInvalidListException f) {
// Empty directory listing
return this.post(directory, f.getParsed(), listener);
}
}
catch(IOException e) {
throw new FTPExceptionMappingService().map("Listing directory {0} failed", e, directory);
}
}
|
@Test(expected = NotfoundException.class)
public void testListNotfound() throws Exception {
final Path f = new Path(UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory));
final FTPListService service = new FTPListService(session, null, TimeZone.getDefault());
service.list(f, new DisabledListProgressListener());
}
|
@Override
public void blame(BlameInput input, BlameOutput result) {
for (InputFile inputFile : input.filesToBlame()) {
processFile(inputFile, result);
}
}
|
@Test
public void testBlameWithRelativeDate() throws IOException {
File source = new File(baseDir, "src/foo.xoo");
FileUtils.write(source, "sample content");
File scm = new File(baseDir, "src/foo.xoo.scm");
FileUtils.write(scm, "123,julien,-10\n234,julien,-10");
DefaultInputFile inputFile = new TestInputFileBuilder("foo", "src/foo.xoo")
.setLanguage(Xoo.KEY)
.setModuleBaseDir(baseDir.toPath())
.build();
fs.add(inputFile);
BlameOutput result = mock(BlameOutput.class);
when(input.filesToBlame()).thenReturn(Arrays.asList(inputFile));
new XooBlameCommand().blame(input, result);
Predicate<Date> datePredicate = argument -> {
Date approximate = DateUtils.addDays(new Date(), -10);
return argument.getTime() > approximate.getTime() - 5000 && argument.getTime() < approximate.getTime() + 5000;
};
ArgumentCaptor<List<BlameLine>> blameLinesCaptor = ArgumentCaptor.forClass(List.class);
verify(result).blameResult(eq(inputFile), blameLinesCaptor.capture());
assertThat(blameLinesCaptor.getValue())
.extracting(BlameLine::date)
.allMatch(datePredicate);
}
|
public static NativeReader<WindowedValue<?>> create(
final CloudObject spec,
final PipelineOptions options,
DataflowExecutionContext executionContext)
throws Exception {
@SuppressWarnings("unchecked")
final Source<Object> source = (Source<Object>) deserializeFromCloudSource(spec);
if (source instanceof BoundedSource) {
@SuppressWarnings({"unchecked", "rawtypes"})
NativeReader<WindowedValue<?>> reader =
(NativeReader)
new NativeReader<WindowedValue<Object>>() {
@Override
public NativeReaderIterator<WindowedValue<Object>> iterator() throws IOException {
return new BoundedReaderIterator<>(
((BoundedSource<Object>) source).createReader(options));
}
};
return reader;
} else if (source instanceof UnboundedSource) {
@SuppressWarnings({"unchecked", "rawtypes"})
NativeReader<WindowedValue<?>> reader =
(NativeReader)
new UnboundedReader<Object>(
options, spec, (StreamingModeExecutionContext) executionContext);
return reader;
} else {
throw new IllegalArgumentException("Unexpected source kind: " + source.getClass());
}
}
|
@Test
public void testReadUnboundedReader() throws Exception {
CounterSet counterSet = new CounterSet();
StreamingModeExecutionStateRegistry executionStateRegistry =
new StreamingModeExecutionStateRegistry();
ReaderCache readerCache = new ReaderCache(Duration.standardMinutes(1), Runnable::run);
StreamingModeExecutionContext context =
new StreamingModeExecutionContext(
counterSet,
COMPUTATION_ID,
readerCache,
/*stateNameMap=*/ ImmutableMap.of(),
/*stateCache=*/ null,
StreamingStepMetricsContainer.createRegistry(),
new DataflowExecutionStateTracker(
ExecutionStateSampler.newForTest(),
executionStateRegistry.getState(
NameContext.forStage("stageName"), "other", null, NoopProfileScope.NOOP),
counterSet,
PipelineOptionsFactory.create(),
"test-work-item-id"),
executionStateRegistry,
Long.MAX_VALUE);
options.setNumWorkers(5);
int maxElements = 10;
DataflowPipelineDebugOptions debugOptions = options.as(DataflowPipelineDebugOptions.class);
debugOptions.setUnboundedReaderMaxElements(maxElements);
ByteString state = ByteString.EMPTY;
for (int i = 0; i < 10 * maxElements;
/* Incremented in inner loop */ ) {
// Initialize streaming context with state from previous iteration.
context.start(
"key",
createMockWork(
Windmill.WorkItem.newBuilder()
.setKey(ByteString.copyFromUtf8("0000000000000001")) // key is zero-padded index.
.setWorkToken(i) // Must be increasing across activations for cache to be used.
.setCacheToken(1)
.setSourceState(
Windmill.SourceState.newBuilder().setState(state).build()) // Source state.
.build(),
Watermarks.builder().setInputDataWatermark(new Instant(0)).build()),
mock(WindmillStateReader.class),
mock(SideInputStateFetcher.class),
OperationalLimits.builder().build(),
Windmill.WorkItemCommitRequest.newBuilder());
@SuppressWarnings({"unchecked", "rawtypes"})
NativeReader<WindowedValue<ValueWithRecordId<KV<Integer, Integer>>>> reader =
(NativeReader)
WorkerCustomSources.create(
(CloudObject)
serializeToCloudSource(new TestCountingSource(Integer.MAX_VALUE), options)
.getSpec(),
options,
context);
// Verify data.
Instant beforeReading = Instant.now();
int numReadOnThisIteration = 0;
for (WindowedValue<ValueWithRecordId<KV<Integer, Integer>>> value :
ReaderUtils.readAllFromReader(reader)) {
assertEquals(KV.of(0, i), value.getValue().getValue());
assertArrayEquals(
encodeToByteArray(KvCoder.of(VarIntCoder.of(), VarIntCoder.of()), KV.of(0, i)),
value.getValue().getId());
assertThat(value.getWindows(), contains((BoundedWindow) GlobalWindow.INSTANCE));
assertEquals(i, value.getTimestamp().getMillis());
i++;
numReadOnThisIteration++;
}
Instant afterReading = Instant.now();
long maxReadSec = debugOptions.getUnboundedReaderMaxReadTimeSec();
assertThat(
new Duration(beforeReading, afterReading).getStandardSeconds(),
lessThanOrEqualTo(maxReadSec + 1));
assertThat(
numReadOnThisIteration, lessThanOrEqualTo(debugOptions.getUnboundedReaderMaxElements()));
// Extract and verify state modifications.
context.flushState();
state = context.getOutputBuilder().getSourceStateUpdates().getState();
// CountingSource's watermark is the last record + 1. i is now one past the last record,
// so the expected watermark is i millis.
assertEquals(
TimeUnit.MILLISECONDS.toMicros(i), context.getOutputBuilder().getSourceWatermark());
assertEquals(
1, context.getOutputBuilder().getSourceStateUpdates().getFinalizeIdsList().size());
assertNotNull(
readerCache.acquireReader(
context.getComputationKey(),
context.getWorkItem().getCacheToken(),
context.getWorkToken() + 1));
assertEquals(7L, context.getBacklogBytes());
}
}
|
@Override
public Row poll() {
return poll(Duration.ZERO);
}
|
@Test
public void shouldDeliverBufferedRowsIfComplete() throws Exception {
// Given
givenPublisherAcceptsOneRow();
completeQueryResult();
// When
final Row receivedRow = queryResult.poll();
// Then
assertThat(receivedRow, is(row));
}
|
public static boolean isDirectory(URL resourceURL) throws URISyntaxException {
final String protocol = resourceURL.getProtocol();
switch (protocol) {
case "jar":
try {
final JarURLConnection jarConnection = (JarURLConnection) resourceURL.openConnection();
final JarEntry entry = jarConnection.getJarEntry();
if (entry.isDirectory()) {
return true;
}
// WARNING! Heuristics ahead.
// It turns out that JarEntry#isDirectory() really just tests whether the filename ends in a '/'.
// If you try to open the same URL without a trailing '/', it'll succeed — but the result won't be
// what you want. We try to get around this by calling getInputStream() on the file inside the jar.
// This seems to return null for directories (though that behavior is undocumented as far as I
// can tell). If you have a better idea, please improve this.
final String relativeFilePath = entry.getName();
final JarFile jarFile = jarConnection.getJarFile();
final ZipEntry zipEntry = jarFile.getEntry(relativeFilePath);
final InputStream inputStream = jarFile.getInputStream(zipEntry);
return inputStream == null;
} catch (IOException e) {
throw new ResourceNotFoundException(e);
}
case "file":
return new File(resourceURL.toURI()).isDirectory();
default:
throw new IllegalArgumentException("Unsupported protocol " + resourceURL.getProtocol() +
" for resource " + resourceURL);
}
}
|
@Test
void isDirectoryReturnsTrueForURLEncodedDirectoriesInJars() throws Exception {
final URL url = new URL("jar:" + resourceJar.toExternalForm() + "!/dir%20with%20space/");
assertThat(url.getProtocol()).isEqualTo("jar");
assertThat(ResourceURL.isDirectory(url)).isTrue();
}
|
@PostConstruct
public void init() {
rejectTips = RateLimitUtils.getRejectTips(polarisRateLimitProperties);
}
|
@Test
public void testInit() {
quotaCheckReactiveFilter.init();
try {
Field rejectTips = QuotaCheckReactiveFilter.class.getDeclaredField("rejectTips");
rejectTips.setAccessible(true);
assertThat(rejectTips.get(quotaCheckReactiveFilter)).isEqualTo("RejectRequestTips提示消息");
}
catch (NoSuchFieldException | IllegalAccessException e) {
fail("Exception encountered.", e);
}
}
|
@Override
public ObjectNode encode(Instruction instruction, CodecContext context) {
checkNotNull(instruction, "Instruction cannot be null");
return new EncodeInstructionCodecHelper(instruction, context).encode();
}
|
@Test
public void outputInstructionTest() {
final Instructions.OutputInstruction instruction =
Instructions.createOutput(PortNumber.portNumber(22));
final ObjectNode instructionJson =
instructionCodec.encode(instruction, context);
assertThat(instructionJson, matchesInstruction(instruction));
}
|
@Override
public InputStream getInputStream() {
return new RedissonInputStream();
}
|
@Test
public void testRead() throws IOException {
RBinaryStream stream = redisson.getBinaryStream("test");
byte[] value = {1, 2, 3, 4, 5, (byte)0xFF};
stream.set(value);
InputStream s = stream.getInputStream();
int b = 0;
byte[] readValue = new byte[6];
int i = 0;
while (true) {
b = s.read();
if (b == -1) {
break;
}
readValue[i] = (byte) b;
i++;
}
assertThat(readValue).isEqualTo(value);
}
|
@VisibleForTesting
static Optional<Dependency> parseDependency(String line) {
Matcher dependencyMatcher = SHADE_INCLUDE_MODULE_PATTERN.matcher(line);
if (!dependencyMatcher.find()) {
return Optional.empty();
}
return Optional.of(
Dependency.create(
dependencyMatcher.group("groupId"),
dependencyMatcher.group("artifactId"),
dependencyMatcher.group("version"),
dependencyMatcher.group("classifier")));
}
|
@Test
void testLineParsingGroupId() {
assertThat(
ShadeParser.parseDependency(
"Including external:dependency1:jar:1.0 in the shaded jar."))
.hasValueSatisfying(
dependency -> assertThat(dependency.getGroupId()).isEqualTo("external"));
}
|
public static void disablePullConsumption(DefaultLitePullConsumerWrapper wrapper, Set<String> topics) {
Set<String> subscribedTopic = wrapper.getSubscribedTopics();
if (subscribedTopic.stream().anyMatch(topics::contains)) {
suspendPullConsumer(wrapper);
return;
}
resumePullConsumer(wrapper);
}
|
@Test
public void testDisablePullConsumptionWithAssignSubTractTopics() {
subscribedTopics = new HashSet<>();
subscribedTopics.add("test-topic-1");
subscribedTopics.add("test-topic-2");
pullConsumerWrapper.setSubscribedTopics(subscribedTopics);
pullConsumerWrapper.setSubscriptionType(SubscriptionType.ASSIGN);
pullConsumerWrapper.setAssignedMessageQueue(assignedMessageQueue);
RocketMqPullConsumerController.disablePullConsumption(pullConsumerWrapper, prohibitionTopics);
Mockito.verify(assignedMessageQueue, Mockito.times(1)).updateAssignedMessageQueue(
Mockito.any());
}
|
@GetMapping("list")
public String getProductsList(Model model, @RequestParam(name = "filter", required = false) String filter) {
model.addAttribute("products", this.productsRestClient.findAllProducts(filter));
model.addAttribute("filter", filter);
return "catalogue/products/list";
}
|
@Test
void getProductsList_ReturnsProductsListPage() {
// given
var model = new ConcurrentModel();
var filter = "товар";
var products = IntStream.range(1, 4)
.mapToObj(i -> new Product(i, "Товар №%d".formatted(i),
"Описание товара №%d".formatted(i)))
.toList();
doReturn(products).when(this.productsRestClient).findAllProducts(filter);
// when
var result = this.controller.getProductsList(model, filter);
// then
assertEquals("catalogue/products/list", result);
assertEquals(filter, model.getAttribute("filter"));
assertEquals(products, model.getAttribute("products"));
}
|
public final void hasSize(int expectedSize) {
checkArgument(expectedSize >= 0, "expectedSize(%s) must be >= 0", expectedSize);
check("size()").that(checkNotNull(actual).size()).isEqualTo(expectedSize);
}
|
@Test
public void hasSize() {
assertThat(ImmutableMultimap.of(1, 2, 3, 4)).hasSize(2);
}
|
public PlanNode plan(Analysis analysis)
{
return planStatement(analysis, analysis.getStatement());
}
|
@Test
public void testRedundantLimitNodeRemoval()
{
String query = "SELECT count(*) FROM orders LIMIT 10";
assertFalse(
searchFrom(plan(query, OPTIMIZED).getRoot())
.where(LimitNode.class::isInstance)
.matches(),
format("Unexpected limit node for query: '%s'", query));
assertPlan(
"SELECT orderkey, count(*) FROM orders GROUP BY orderkey LIMIT 10",
output(
limit(10,
anyTree(
tableScan("orders")))));
assertPlan(
"SELECT * FROM (VALUES 1,2,3,4,5,6) AS t1 LIMIT 10",
output(
values(ImmutableList.of("x"))));
}
|
public static ClusterAllocationDiskSettings create(boolean enabled, String low, String high, String floodStage) {
if (!enabled) {
return ClusterAllocationDiskSettings.create(enabled, null);
}
return ClusterAllocationDiskSettings.create(enabled, createWatermarkSettings(low, high, floodStage));
}
|
@Test
public void createAbsoluteValueWatermarkSettings() throws Exception {
ClusterAllocationDiskSettings clusterAllocationDiskSettings = ClusterAllocationDiskSettingsFactory.create(true, "20Gb", "10Gb", "5Gb");
assertThat(clusterAllocationDiskSettings).isInstanceOf(ClusterAllocationDiskSettings.class);
assertThat(clusterAllocationDiskSettings.ThresholdEnabled()).isTrue();
assertThat(clusterAllocationDiskSettings.watermarkSettings()).isInstanceOf(AbsoluteValueWatermarkSettings.class);
AbsoluteValueWatermarkSettings settings = (AbsoluteValueWatermarkSettings) clusterAllocationDiskSettings.watermarkSettings();
assertThat(settings.type()).isEqualTo(WatermarkSettings.SettingsType.ABSOLUTE);
assertThat(settings.low()).isInstanceOf(ByteSize.class);
assertThat(settings.low().getBytes()).isEqualTo(21474836480L);
assertThat(settings.high()).isInstanceOf(ByteSize.class);
assertThat(settings.high().getBytes()).isEqualTo(10737418240L);
assertThat(settings.floodStage()).isInstanceOf(ByteSize.class);
assertThat(settings.floodStage().getBytes()).isEqualTo(5368709120L);
}
|
public T send() throws IOException {
return web3jService.send(this, responseType);
}
|
@Test
public void testShhVersion() throws Exception {
web3j.shhVersion().send();
verifyResult(
"{\"jsonrpc\":\"2.0\",\"method\":\"shh_version\"," + "\"params\":[],\"id\":1}");
}
|
public static Ethernet buildNdpAdv(Ip6Address srcIp,
MacAddress srcMac,
Ethernet request) {
checkNotNull(srcIp, "IP address cannot be null");
checkNotNull(srcMac, "MAC address cannot be null");
checkNotNull(request, "Request cannot be null");
checkArgument(request.getEtherType() == Ethernet.TYPE_IPV6,
"EtherType must be IPv6");
final IPv6 ipv6Request = (IPv6) request.getPayload();
checkArgument(ipv6Request.getNextHeader() == IPv6.PROTOCOL_ICMP6,
"Protocol must be ICMP6");
final ICMP6 icmpv6 = (ICMP6) ipv6Request.getPayload();
checkArgument(icmpv6.getIcmpType() == ICMP6.NEIGHBOR_SOLICITATION,
"ICMP6 type must be NEIGHBOR_SOLICITATION");
Ethernet eth = new Ethernet();
eth.setDestinationMACAddress(request.getSourceMAC());
eth.setSourceMACAddress(srcMac);
eth.setEtherType(Ethernet.TYPE_IPV6);
eth.setVlanID(request.getVlanID());
IPv6 ipv6 = new IPv6();
ipv6.setSourceAddress(srcIp.toOctets());
ipv6.setDestinationAddress(ipv6Request.getSourceAddress());
ipv6.setHopLimit(NDP_HOP_LIMIT);
ipv6.setNextHeader(IPv6.PROTOCOL_ICMP6);
ICMP6 icmp6 = new ICMP6();
icmp6.setIcmpType(ICMP6.NEIGHBOR_ADVERTISEMENT);
icmp6.setIcmpCode(RESERVED_CODE);
NeighborAdvertisement nadv = new NeighborAdvertisement();
nadv.setTargetAddress(srcIp.toOctets());
nadv.setSolicitedFlag(NDP_SOLICITED_FLAG);
nadv.setOverrideFlag(NDP_OVERRIDE_FLAG);
nadv.addOption(NeighborDiscoveryOptions.TYPE_TARGET_LL_ADDRESS,
srcMac.toBytes());
icmp6.setPayload(nadv);
ipv6.setPayload(icmp6);
eth.setPayload(ipv6);
return eth;
}
|
@Test
public void testBuildNdpAdv() {
Ethernet eth = new Ethernet();
eth.setSourceMACAddress(MAC_ADDRESS);
eth.setDestinationMACAddress(MAC_ADDRESS2);
IPv6 ipv6 = new IPv6();
ipv6.setSourceAddress(IPV6_SOURCE_ADDRESS);
ipv6.setDestinationAddress(IPV6_DESTINATION_ADDRESS);
ipv6.setNextHeader(IPv6.PROTOCOL_ICMP6);
eth.setEtherType(Ethernet.TYPE_IPV6);
eth.setPayload(ipv6);
ICMP6 icmp6 = new ICMP6();
icmp6.setIcmpType(ICMP6.NEIGHBOR_SOLICITATION);
icmp6.setIcmpCode(NeighborAdvertisement.RESERVED_CODE);
ipv6.setPayload(icmp6);
final Ethernet ethResponse = NeighborAdvertisement.buildNdpAdv(IP_6_ADDRESS, MAC_ADDRESS2, eth);
assertTrue(ethResponse.getDestinationMAC().equals(MAC_ADDRESS));
assertTrue(ethResponse.getSourceMAC().equals(MAC_ADDRESS2));
assertTrue(ethResponse.getEtherType() == Ethernet.TYPE_IPV6);
final IPv6 responseIpv6 = (IPv6) ethResponse.getPayload();
assertArrayEquals(responseIpv6.getSourceAddress(), ipv6.getDestinationAddress());
assertArrayEquals(responseIpv6.getDestinationAddress(), ipv6.getSourceAddress());
assertTrue(responseIpv6.getNextHeader() == IPv6.PROTOCOL_ICMP6);
final ICMP6 responseIcmp6 = (ICMP6) responseIpv6.getPayload();
assertTrue(responseIcmp6.getIcmpType() == ICMP6.NEIGHBOR_ADVERTISEMENT);
assertTrue(responseIcmp6.getIcmpCode() == NeighborAdvertisement.RESERVED_CODE);
final NeighborAdvertisement responseNadv = (NeighborAdvertisement) responseIcmp6.getPayload();
assertArrayEquals(responseNadv.getTargetAddress(), IPV6_DESTINATION_ADDRESS);
assertTrue(responseNadv.getSolicitedFlag() == NeighborAdvertisement.NDP_SOLICITED_FLAG);
assertTrue(responseNadv.getOverrideFlag() == NeighborAdvertisement.NDP_OVERRIDE_FLAG);
assertThat(responseNadv.getOptions(),
hasItem(hasOption(NeighborDiscoveryOptions.TYPE_TARGET_LL_ADDRESS, MAC_ADDRESS2.toBytes())));
}
|
public static ContentClusterStats generate(Distributor distributor) {
Map<Integer, ContentNodeStats> mapToNodeStats = new HashMap<>();
for (StorageNode storageNode : distributor.getStorageNodes()) {
mapToNodeStats.put(storageNode.getIndex(), new ContentNodeStats(storageNode));
}
return new ContentClusterStats(mapToNodeStats);
}
|
@Test
void testContentNodeStats() throws IOException {
String data = getJsonString();
HostInfo hostInfo = HostInfo.createHostInfo(data);
ContentClusterStats clusterStats = StorageNodeStatsBridge.generate(hostInfo.getDistributor());
Iterator<ContentNodeStats> itr = clusterStats.iterator();
{ // content node 0
ContentNodeStats stats = itr.next();
assertThat(stats.getNodeIndex(), is(0));
assertThat(stats.getBucketSpaces().size(), is(2));
assertBucketSpaceStats(11, 3, stats.getBucketSpaces().get("default"));
assertBucketSpaceStats(13, 5, stats.getBucketSpaces().get("global"));
}
{ // content node 1
ContentNodeStats stats = itr.next();
assertThat(stats.getNodeIndex(), is(1));
assertThat(stats.getBucketSpaces().size(), is(1));
assertBucketSpaceStats(0, 0, stats.getBucketSpaces().get("default"));
}
assertFalse(itr.hasNext());
}
|
@SuppressWarnings("unchecked")
@Override
public <T extends Statement> ConfiguredStatement<T> inject(
final ConfiguredStatement<T> statement
) {
try {
if (statement.getStatement() instanceof CreateAsSelect) {
registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement);
} else if (statement.getStatement() instanceof CreateSource) {
registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement);
}
} catch (final KsqlStatementException e) {
throw e;
} catch (final KsqlException e) {
throw new KsqlStatementException(
ErrorMessageUtil.buildErrorMessage(e),
statement.getMaskedStatementText(),
e.getCause());
}
// Remove schema id from SessionConfig
return stripSchemaIdConfig(statement);
}
|
@Test
public void shouldNotRegisterSchemaForSchemaRegistryDisabledFormatCreateSource() {
// Given:
givenStatement("CREATE STREAM sink (f1 VARCHAR) WITH(kafka_topic='expectedName', key_format='KAFKA', value_format='DELIMITED', partitions=1);");
// When:
injector.inject(statement);
// Then:
verifyNoMoreInteractions(schemaRegistryClient);
}
|
static void setEntryValue( StepInjectionMetaEntry entry, RowMetaAndData row, SourceStepField source )
throws KettleValueException {
// A standard attribute, a single row of data...
//
Object value = null;
switch ( entry.getValueType() ) {
case ValueMetaInterface.TYPE_STRING:
value = row.getString( source.getField(), null );
break;
case ValueMetaInterface.TYPE_BOOLEAN:
value = row.getBoolean( source.getField(), false );
break;
case ValueMetaInterface.TYPE_INTEGER:
value = row.getInteger( source.getField(), 0L );
break;
case ValueMetaInterface.TYPE_NUMBER:
value = row.getNumber( source.getField(), 0.0D );
break;
case ValueMetaInterface.TYPE_DATE:
value = row.getDate( source.getField(), null );
break;
case ValueMetaInterface.TYPE_BIGNUMBER:
value = row.getBigNumber( source.getField(), null );
break;
default:
break;
}
entry.setValue( value );
}
|
@Test
public void setEntryValue_date() throws KettleValueException {
StepInjectionMetaEntry entry = mock( StepInjectionMetaEntry.class );
doReturn( ValueMetaInterface.TYPE_DATE ).when( entry ).getValueType();
RowMetaAndData row = createRowMetaAndData( new ValueMetaDate( TEST_FIELD ), null );
SourceStepField sourceField = new SourceStepField( TEST_SOURCE_STEP_NAME, TEST_FIELD );
MetaInject.setEntryValue( entry, row, sourceField );
verify( entry ).setValue( null );
}
|
@Restricted(NoExternalUse.class)
public static String[] printLogRecordHtml(LogRecord r, LogRecord prior) {
String[] oldParts = prior == null ? new String[4] : logRecordPreformat(prior);
String[] newParts = logRecordPreformat(r);
for (int i = 0; i < /* not 4 */3; i++) {
newParts[i] = "<span class='" + (newParts[i].equals(oldParts[i]) ? "logrecord-metadata-old" : "logrecord-metadata-new") + "'>" + newParts[i] + "</span>";
}
newParts[3] = Util.xmlEscape(newParts[3]);
return newParts;
}
|
@Test public void printLogRecordHtmlNoLogger() {
LogRecord lr = new LogRecord(Level.INFO, "<discarded/>");
assertEquals("<discarded/>\n", Functions.printLogRecordHtml(lr, null)[3]);
}
|
@Override
public Comparable convert(Comparable value) {
if (!(value instanceof CompositeValue)) {
throw new IllegalArgumentException("Cannot convert [" + value + "] to composite");
}
CompositeValue compositeValue = (CompositeValue) value;
Comparable[] components = compositeValue.getComponents();
Comparable[] converted = new Comparable[components.length];
for (int i = 0; i < components.length; ++i) {
Comparable component = components[i];
if (component == NULL || component == NEGATIVE_INFINITY || component == POSITIVE_INFINITY) {
converted[i] = component;
} else {
converted[i] = converters[i].convert(component);
}
}
return new CompositeValue(converted);
}
|
@Test(expected = IllegalArgumentException.class)
public void testConversionAcceptsOnlyCompositeValues() {
converter(STRING_CONVERTER).convert("value");
}
|
public Set<Analysis.AliasedDataSource> extractDataSources(final AstNode node) {
new Visitor().process(node, null);
return getAllSources();
}
|
@Test
public void shouldThrowIfInnerJoinSourceDoesNotExist() {
// Given:
final AstNode stmt = givenQuery("SELECT * FROM TEST1 JOIN UNKNOWN"
+ " ON test1.col1 = UNKNOWN.col1;");
// When:
final Exception e = assertThrows(
KsqlException.class,
() -> extractor.extractDataSources(stmt)
);
// Then:
assertThat(e.getMessage(), containsString(
"UNKNOWN does not exist."));
}
|
public static org.apache.iceberg.Table loadIcebergTable(SparkSession spark, String name)
throws ParseException, NoSuchTableException {
CatalogAndIdentifier catalogAndIdentifier = catalogAndIdentifier(spark, name);
TableCatalog catalog = asTableCatalog(catalogAndIdentifier.catalog);
Table sparkTable = catalog.loadTable(catalogAndIdentifier.identifier);
return toIcebergTable(sparkTable);
}
|
@Test
public void testLoadIcebergTable() throws Exception {
spark.conf().set("spark.sql.catalog.hive", SparkCatalog.class.getName());
spark.conf().set("spark.sql.catalog.hive.type", "hive");
spark.conf().set("spark.sql.catalog.hive.default-namespace", "default");
String tableFullName = "hive.default.tbl";
sql("CREATE TABLE %s (c1 bigint, c2 string, c3 string) USING iceberg", tableFullName);
Table table = Spark3Util.loadIcebergTable(spark, tableFullName);
Assert.assertTrue(table.name().equals(tableFullName));
}
|
public Schema toKsqlSchema(final Schema schema) {
try {
final Schema rowSchema = toKsqlFieldSchema(schema);
if (rowSchema.type() != Schema.Type.STRUCT) {
throw new KsqlException("KSQL stream/table schema must be structured");
}
if (rowSchema.fields().isEmpty()) {
throw new KsqlException("Schema does not include any columns with "
+ "types that ksqlDB supports."
+ System.lineSeparator()
+ "schema: " + FORMATTER.format(schema));
}
return rowSchema;
} catch (final UnsupportedTypeException e) {
throw new KsqlException("Unsupported type at root of schema: " + e.getMessage(), e);
}
}
|
@Test
public void shouldTranslateMaps() {
final Schema connectSchema = SchemaBuilder
.struct()
.field("mapField", SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA))
.build();
final Schema ksqlSchema = translator.toKsqlSchema(connectSchema);
assertThat(ksqlSchema.field(nameTranslator.apply("mapField")), notNullValue());
final Schema mapSchema = ksqlSchema.field(nameTranslator.apply("mapField")).schema();
assertThat(mapSchema.type(), equalTo(Schema.Type.MAP));
assertThat(mapSchema.keySchema(), equalTo(Schema.OPTIONAL_STRING_SCHEMA));
assertThat(mapSchema.valueSchema(), equalTo(Schema.OPTIONAL_INT32_SCHEMA));
}
|
@Override
public void run() {
int numFilesCleaned = 0;
long diskSpaceCleaned = 0L;
try (Timer.Context t = cleanupRoutineDuration.time()) {
final long nowMills = Time.currentTimeMillis();
Set<Path> oldLogDirs = selectDirsForCleanup(nowMills);
final long nowSecs = TimeUnit.MILLISECONDS.toSeconds(nowMills);
SortedSet<Path> deadWorkerDirs = getDeadWorkerDirs((int) nowSecs, oldLogDirs);
LOG.debug("log cleanup: now={} old log dirs {} dead worker dirs {}", nowSecs,
oldLogDirs.stream().map(p -> p.getFileName().toString()).collect(joining(",")),
deadWorkerDirs.stream().map(p -> p.getFileName().toString()).collect(joining(",")));
for (Path dir : deadWorkerDirs) {
Path path = dir.toAbsolutePath().normalize();
long sizeInBytes = sizeOfDir(dir);
LOG.info("Cleaning up: Removing {}, {} KB", path, sizeInBytes * 1e-3);
try {
Utils.forceDelete(path.toString());
cleanupEmptyTopoDirectory(dir);
numFilesCleaned++;
diskSpaceCleaned += sizeInBytes;
} catch (Exception ex) {
numFileRemovalExceptions.mark();
LOG.error(ex.getMessage(), ex);
}
}
final List<DeletionMeta> perWorkerDirCleanupMeta = perWorkerDirCleanup(maxPerWorkerLogsSizeMb * 1024 * 1024);
numFilesCleaned += perWorkerDirCleanupMeta.stream().mapToInt(meta -> meta.deletedFiles).sum();
diskSpaceCleaned += perWorkerDirCleanupMeta.stream().mapToLong(meta -> meta.deletedSize).sum();
final DeletionMeta globalLogCleanupMeta = globalLogCleanup(maxSumWorkerLogsSizeMb * 1024 * 1024);
numFilesCleaned += globalLogCleanupMeta.deletedFiles;
diskSpaceCleaned += globalLogCleanupMeta.deletedSize;
} catch (Exception ex) {
numCleanupExceptions.mark();
LOG.error("Exception while cleaning up old log.", ex);
}
numFilesCleanedUp.update(numFilesCleaned);
diskSpaceFreed.update(diskSpaceCleaned);
}
|
@Test
public void testCleanupFn() throws IOException {
try (TmpPath dir1 = new TmpPath(); TmpPath dir2 = new TmpPath()) {
Files.createDirectory(dir1.getFile().toPath());
Files.createDirectory(dir2.getFile().toPath());
Map<String, Object> conf = Utils.readStormConfig();
StormMetricsRegistry metricRegistry = new StormMetricsRegistry();
WorkerLogs stubbedWorkerLogs = new WorkerLogs(conf, Paths.get(""), metricRegistry);
LogCleaner logCleaner = new LogCleaner(conf, stubbedWorkerLogs, new DirectoryCleaner(metricRegistry), null, metricRegistry) {
@Override
Set<Path> selectDirsForCleanup(long nowMillis) {
return Collections.emptySet();
}
@Override
SortedSet<Path> getDeadWorkerDirs(int nowSecs, Set<Path> logDirs) {
SortedSet<Path> dirs = new TreeSet<>();
dirs.add(dir1.getFile().toPath());
dirs.add(dir2.getFile().toPath());
return dirs;
}
@Override
void cleanupEmptyTopoDirectory(Path dir) {
}
};
logCleaner.run();
assertThat(Files.exists(dir1.getFile().toPath()), is(false));
assertThat(Files.exists(dir2.getFile().toPath()), is(false));
}
}
|
static void onHeartbeatReturned(final ThreadId id, final Status status, final AppendEntriesRequest request,
final AppendEntriesResponse response, final long rpcSendTime) {
if (id == null) {
// replicator already was destroyed.
return;
}
final long startTimeMs = Utils.nowMs();
Replicator r;
if ((r = (Replicator) id.lock()) == null) {
return;
}
boolean doUnlock = true;
try {
final boolean isLogDebugEnabled = LOG.isDebugEnabled();
StringBuilder sb = null;
if (isLogDebugEnabled) {
sb = new StringBuilder("Node ") //
.append(r.options.getGroupId()) //
.append(':') //
.append(r.options.getServerId()) //
.append(" received HeartbeatResponse from ") //
.append(r.options.getPeerId()) //
.append(" prevLogIndex=") //
.append(request.getPrevLogIndex()) //
.append(" prevLogTerm=") //
.append(request.getPrevLogTerm());
}
if (!status.isOk()) {
if (isLogDebugEnabled) {
sb.append(" fail, sleep, status=") //
.append(status);
LOG.debug(sb.toString());
}
r.setState(State.Probe);
notifyReplicatorStatusListener(r, ReplicatorEvent.ERROR, status);
if ((r.consecutiveErrorTimes++) % 10 == 0) {
LOG.warn("Fail to issue RPC to {}, consecutiveErrorTimes={}, error={}, groupId={}", r.options.getPeerId(),
r.consecutiveErrorTimes, status, r.options.getGroupId());
}
r.startHeartbeatTimer(startTimeMs);
return;
}
r.consecutiveErrorTimes = 0;
if (response.getTerm() > r.options.getTerm()) {
if (isLogDebugEnabled) {
sb.append(" fail, greater term ") //
.append(response.getTerm()) //
.append(" expect term ") //
.append(r.options.getTerm());
LOG.debug(sb.toString());
}
final NodeImpl node = r.options.getNode();
r.notifyOnCaughtUp(RaftError.EPERM.getNumber(), true);
r.destroy();
node.increaseTermTo(response.getTerm(), new Status(RaftError.EHIGHERTERMRESPONSE,
"Leader receives higher term heartbeat_response from peer:%s, group:%s", r.options.getPeerId(), r.options.getGroupId()));
return;
}
if (!response.getSuccess() && response.hasLastLogIndex()) {
if (isLogDebugEnabled) {
sb.append(" fail, response term ") //
.append(response.getTerm()) //
.append(" lastLogIndex ") //
.append(response.getLastLogIndex());
LOG.debug(sb.toString());
}
LOG.warn("Heartbeat to peer {} failure, try to send a probe request, groupId={}.", r.options.getPeerId(), r.options.getGroupId());
doUnlock = false;
r.sendProbeRequest();
r.startHeartbeatTimer(startTimeMs);
return;
}
if (isLogDebugEnabled) {
LOG.debug(sb.toString());
}
if (rpcSendTime > r.lastRpcSendTimestamp) {
r.lastRpcSendTimestamp = rpcSendTime;
}
r.startHeartbeatTimer(startTimeMs);
} finally {
if (doUnlock) {
id.unlock();
}
}
}
|
@Test
public void testOnHeartbeatReturnedTermMismatch() {
final Replicator r = getReplicator();
final RpcRequests.AppendEntriesRequest request = createEmptyEntriesRequest();
final RpcRequests.AppendEntriesResponse response = RpcRequests.AppendEntriesResponse.newBuilder() //
.setSuccess(false) //
.setLastLogIndex(12) //
.setTerm(2) //
.build();
this.id.unlock();
Replicator.onHeartbeatReturned(this.id, Status.OK(), request, response, Utils.monotonicMs());
Mockito.verify(this.node).increaseTermTo(
2,
new Status(RaftError.EHIGHERTERMRESPONSE,
"Leader receives higher term heartbeat_response from peer:%s, group:%s", this.peerId, this.node
.getGroupId()));
assertNull(r.id);
}
|
@Override
public <T> T target(FeignClientFactoryBean factory, Feign.Builder feign, FeignClientFactory context,
Target.HardCodedTarget<T> target) {
if (!(feign instanceof PolarisFeignCircuitBreaker.Builder)) {
return feign.target(target);
}
PolarisFeignCircuitBreaker.Builder builder = (PolarisFeignCircuitBreaker.Builder) feign;
String name = !StringUtils.hasText(factory.getContextId()) ? factory.getName() : factory.getContextId();
Class<?> fallback = factory.getFallback();
if (fallback != void.class) {
return targetWithFallback(name, context, target, builder, fallback);
}
Class<?> fallbackFactory = factory.getFallbackFactory();
if (fallbackFactory != void.class) {
return targetWithFallbackFactory(name, context, target, builder, fallbackFactory);
}
return builder(name, builder).target(target);
}
|
@Test
public void testTarget() {
PolarisFeignCircuitBreakerTargeter targeter = new PolarisFeignCircuitBreakerTargeter(circuitBreakerFactory, circuitBreakerNameResolver);
targeter.target(new FeignClientFactoryBean(), new Feign.Builder(), new FeignClientFactory(), new Target.HardCodedTarget<>(TestApi.class, "/test"));
}
|
public Optional<DbEntityCatalogEntry> getByCollectionName(final String collection) {
return Optional.ofNullable(entitiesByCollectionName.get(collection));
}
|
@Test
void returnsEmptyOptionalsOnEmptyCatalog() {
DbEntitiesCatalog catalog = new DbEntitiesCatalog(List.of());
assertThat(catalog.getByCollectionName("Guadalajara")).isEmpty();
}
|
@Override
public IConfigContext getConfigContext() {
return configContext;
}
|
@Test
void getConfigContext() {
ConfigResponse configResponse = new ConfigResponse();
IConfigContext configContext = configResponse.getConfigContext();
assertNotNull(configContext);
}
|
public static boolean isValidValue(Map<String, Object> serviceSuppliedConfig,
Map<String, Object> clientSuppliedServiceConfig,
String propertyName)
{
// prevent clients from violating SLAs as published by the service
if (propertyName.equals(PropertyKeys.HTTP_REQUEST_TIMEOUT))
{
String clientSuppliedTimeout = (String)clientSuppliedServiceConfig.get(propertyName);
String serviceSuppliedTimeout = (String)serviceSuppliedConfig.get(propertyName);
try
{
return Integer.parseInt(clientSuppliedTimeout) >= Integer.parseInt(serviceSuppliedTimeout);
}
catch (NumberFormatException e)
{
_log.error("Failed to convert HTTP Request Timeout to an int. clientSuppliedTimeout is " + clientSuppliedTimeout
+ ". serviceSuppliedTimeout is " + serviceSuppliedTimeout, e);
return false;
}
}
return true;
}
|
@Test
public void testParseFailureHttpRequestTimeout()
{
Map<String, Object> serviceSuppliedProperties = new HashMap<>();
serviceSuppliedProperties.put(PropertyKeys.HTTP_REQUEST_TIMEOUT, "1000");
Map<String, Object> clientSuppliedProperties = new HashMap<>();
clientSuppliedProperties.put(PropertyKeys.HTTP_REQUEST_TIMEOUT, "foo");
Assert.assertFalse(ClientServiceConfigValidator.isValidValue(serviceSuppliedProperties,
clientSuppliedProperties,
PropertyKeys.HTTP_REQUEST_TIMEOUT));
}
|
@CanDistro
@PatchMapping
@Secured(action = ActionTypes.WRITE)
public String patch(HttpServletRequest request) throws Exception {
String serviceName = WebUtils.required(request, CommonParams.SERVICE_NAME);
NamingUtils.checkServiceNameFormat(serviceName);
String ip = WebUtils.required(request, "ip");
String port = WebUtils.required(request, "port");
String cluster = WebUtils.optional(request, CommonParams.CLUSTER_NAME, StringUtils.EMPTY);
if (StringUtils.isBlank(cluster)) {
cluster = WebUtils.optional(request, "cluster", UtilsAndCommons.DEFAULT_CLUSTER_NAME);
}
InstancePatchObject patchObject = new InstancePatchObject(cluster, ip, Integer.parseInt(port));
String metadata = WebUtils.optional(request, METADATA, StringUtils.EMPTY);
if (StringUtils.isNotBlank(metadata)) {
patchObject.setMetadata(UtilsAndCommons.parseMetadata(metadata));
}
String weight = WebUtils.optional(request, "weight", StringUtils.EMPTY);
if (StringUtils.isNotBlank(weight)) {
patchObject.setWeight(Double.parseDouble(weight));
}
String healthy = WebUtils.optional(request, "healthy", StringUtils.EMPTY);
if (StringUtils.isNotBlank(healthy)) {
patchObject.setHealthy(ConvertUtils.toBoolean(healthy));
}
String enabledString = WebUtils.optional(request, "enabled", StringUtils.EMPTY);
if (StringUtils.isNotBlank(enabledString)) {
patchObject.setEnabled(ConvertUtils.toBoolean(enabledString));
}
String namespaceId = WebUtils.optional(request, CommonParams.NAMESPACE_ID, Constants.DEFAULT_NAMESPACE_ID);
getInstanceOperator().patchInstance(namespaceId, serviceName, patchObject);
return "ok";
}
|
@Test
void testPatch() throws Exception {
mockRequestParameter("metadata", "{}");
mockRequestParameter("app", "test");
mockRequestParameter("weight", "10");
mockRequestParameter("healthy", "false");
mockRequestParameter("enabled", "false");
assertEquals("ok", instanceController.patch(request));
verify(instanceServiceV2).patchInstance(eq(Constants.DEFAULT_NAMESPACE_ID), eq(TEST_GROUP_NAME + "@@" + TEST_SERVICE_NAME),
any(InstancePatchObject.class));
}
|
public static long toUpperCase(final long word) {
final long mask = applyLowerCasePattern(word) >>> 2;
return word & ~mask;
}
|
@Test
void toUpperCaseLong() {
// given
final byte[] asciiTable = getExtendedAsciiTable();
shuffleArray(asciiTable, random);
// when
for (int idx = 0; idx < asciiTable.length; idx += Long.BYTES) {
final long value = getLong(asciiTable, idx);
final long actual = SWARUtil.toUpperCase(value);
long expected = 0L;
for (int i = 0; i < Long.BYTES; i++) {
final byte b = (byte) Character.toUpperCase(asciiTable[idx + i]);
expected |= (long) ((b & 0xff)) << (56 - (Long.BYTES * i));
}
// then
assertEquals(expected, actual);
}
}
|
static void validateJarPathNotNull(Path jarPath) {
// Check that parameter is not null, because it is used to access the file
if (Objects.isNull(jarPath)) {
throw new JetException("File path can not be null");
}
}
|
@Test
public void testValidateJarPathNotNull() {
assertThatThrownBy(() -> JarOnMemberValidator.validateJarPathNotNull(null))
.isInstanceOf(JetException.class)
.hasMessageContaining("File path can not be null");
}
|
public void register(String noteId, String className,
String event, String cmd) throws InvalidHookException {
synchronized (registry) {
if (!HookType.ValidEvents.contains(event)) {
throw new InvalidHookException("event " + event + " is not valid hook event");
}
if (noteId == null) {
noteId = GLOBAL_KEY;
}
addRepl(noteId, className);
registry.get(noteId).get(className).put(event, cmd);
}
}
|
@Test
void testValidEventCode() {
assertThrows(InvalidHookException.class, () -> {
InterpreterHookRegistry registry = new InterpreterHookRegistry();
// Test that only valid event codes ("pre_exec", "post_exec") are accepted
registry.register("foo", "bar", "baz", "whatever");
});
}
|
static BaseFilesTable.ManifestReadTask fromJson(JsonNode jsonNode) {
Preconditions.checkArgument(jsonNode != null, "Invalid JSON node for files task: null");
Preconditions.checkArgument(
jsonNode.isObject(), "Invalid JSON node for files task: non-object (%s)", jsonNode);
Schema dataTableSchema = SchemaParser.fromJson(JsonUtil.get(SCHEMA, jsonNode));
Schema projection = SchemaParser.fromJson(JsonUtil.get(PROJECTION, jsonNode));
FileIO fileIO = FileIOParser.fromJson(JsonUtil.get(FILE_IO, jsonNode), null);
JsonNode specsArray = JsonUtil.get(SPECS, jsonNode);
Preconditions.checkArgument(
specsArray.isArray(), "Invalid JSON node for partition specs: non-array (%s)", specsArray);
ImmutableList.Builder<PartitionSpec> specsBuilder = ImmutableList.builder();
for (JsonNode specNode : specsArray) {
PartitionSpec spec = PartitionSpecParser.fromJson(dataTableSchema, specNode);
specsBuilder.add(spec);
}
Map<Integer, PartitionSpec> specsById = PartitionUtil.indexSpecs(specsBuilder.build());
Expression residualFilter = ExpressionParser.fromJson(JsonUtil.get(RESIDUAL, jsonNode));
ManifestFile manifestFile = ManifestFileParser.fromJson(JsonUtil.get(MANIFEST, jsonNode));
return new BaseFilesTable.ManifestReadTask(
dataTableSchema, fileIO, specsById, manifestFile, projection, residualFilter);
}
|
@Test
public void invalidJsonNode() throws Exception {
String jsonStr = "{\"str\":\"1\", \"arr\":[]}";
ObjectMapper mapper = new ObjectMapper();
JsonNode rootNode = mapper.reader().readTree(jsonStr);
assertThatThrownBy(() -> FilesTableTaskParser.fromJson(rootNode.get("str")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Invalid JSON node for files task: non-object ");
assertThatThrownBy(() -> FilesTableTaskParser.fromJson(rootNode.get("arr")))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("Invalid JSON node for files task: non-object ");
}
|
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (!(o instanceof BatchEventData that)) {
return false;
}
return events != null ? events.equals(that.events) : that.events == null;
}
|
@Test
public void testEquals() {
assertEquals(batchEventData, batchEventData);
assertEquals(batchEventData, batchEventDataSameAttribute);
assertNotEquals(null, batchEventData);
assertNotEquals(new Object(), batchEventData);
assertEquals(batchEventData, batchEventDataOtherSource);
assertEquals(batchEventData, batchEventDataOtherPartitionId);
assertNotEquals(batchEventData, batchEventDataOtherEvent);
assertNotEquals(batchEventData, batchEventDataNoEvent);
}
|
public Optional<Throwable> run(String... arguments) {
try {
if (isFlag(HELP, arguments)) {
parser.printHelp(stdOut);
} else if (isFlag(VERSION, arguments)) {
parser.printVersion(stdOut);
} else {
final Namespace namespace = parser.parseArgs(arguments);
final Command command = requireNonNull(commands.get(namespace.getString(COMMAND_NAME_ATTR)),
"Command is not found");
try {
command.run(bootstrap, namespace);
} catch (Throwable e) {
// The command failed to run, and the command knows
// best how to cleanup / debug exception
command.onError(this, namespace, e);
return Optional.of(e);
}
}
return Optional.empty();
} catch (HelpScreenException ignored) {
// This exception is triggered when the user passes in a help flag.
// Return true to signal that the process executed normally.
return Optional.empty();
} catch (ArgumentParserException e) {
stdErr.println(e.getMessage());
e.getParser().printHelp(stdErr);
return Optional.of(e);
}
}
|
@Test
void unhandledExceptionsCustomCommandDebug() throws Exception {
doThrow(new BadAppException()).when(command).run(any(), any(Namespace.class), any(Configuration.class));
assertThat(cli.run("custom", "--debug"))
.hasValueSatisfying(t -> assertThat(t).isInstanceOf(RuntimeException.class).hasMessage("I did not expect this!"));
assertThat(stdOut.toString())
.isEmpty();
assertThat(stdErr.toString())
.startsWith(String.format("java.lang.RuntimeException: I did not expect this!%n" +
"\tat io.dropwizard.core.cli.CliTest$CustomCommand.run(CliTest.java"));
}
|
@GET
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
@Override
public ClusterInfo get() {
return getClusterInfo();
}
|
@Test
public void testClusterMetrics() throws JSONException, Exception {
WebResource r = resource();
ClientResponse response = r.path("ws").path("v1").path("cluster")
.path("metrics").accept(MediaType.APPLICATION_JSON)
.get(ClientResponse.class);
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
response.getType().toString());
JSONObject json = response.getEntity(JSONObject.class);
verifyClusterMetricsJSON(json);
}
|
public static String truncateByByteLength(String str, Charset charset, int maxBytes, int factor,
boolean appendDots) {
//字符数*速算因子<=最大字节数
if (str == null || str.length() * factor <= maxBytes) {
return str;
}
final byte[] sba = str.getBytes(charset);
if (sba.length <= maxBytes) {
return str;
}
//限制字节数
final int limitBytes;
if (appendDots) {
limitBytes = maxBytes - "...".getBytes(charset).length;
} else {
limitBytes = maxBytes;
}
final ByteBuffer bb = ByteBuffer.wrap(sba, 0, limitBytes);
final CharBuffer cb = CharBuffer.allocate(limitBytes);
final CharsetDecoder decoder = charset.newDecoder();
//忽略被截断的字符
decoder.onMalformedInput(CodingErrorAction.IGNORE);
decoder.decode(bb, cb, true);
decoder.flush(cb);
final String result = new String(cb.array(), 0, cb.position());
if (appendDots) {
return result + "...";
}
return result;
}
|
@Test
public void truncateByByteLengthTest() {
final String str = "This is English";
final String ret = StrUtil.truncateByByteLength(str, StandardCharsets.ISO_8859_1, 10, 1, false);
assertEquals("This is En", ret);
}
|
public Span newTrace() {
return _toSpan(null, newRootContext(0));
}
|
@Test void useSpanAfterFinished_doesNotCauseBraveFlush() {
simulateInProcessPropagation(tracer, tracer);
GarbageCollectors.blockOnGC();
tracer.newTrace().start().abandon(); //trigger orphaned span check
assertThat(spans).hasSize(1);
assertThat(spans.get(0).annotations())
.extracting(Map.Entry::getValue)
.doesNotContain("brave.flush");
}
|
@Override
public AuthLoginRespVO refreshToken(String refreshToken) {
OAuth2AccessTokenDO accessTokenDO = oauth2TokenService.refreshAccessToken(refreshToken, OAuth2ClientConstants.CLIENT_ID_DEFAULT);
return AuthConvert.INSTANCE.convert(accessTokenDO);
}
|
@Test
public void testRefreshToken() {
// 准备参数
String refreshToken = randomString();
// mock 方法
OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class);
when(oauth2TokenService.refreshAccessToken(eq(refreshToken), eq("default")))
.thenReturn(accessTokenDO);
// 调用
AuthLoginRespVO loginRespVO = authService.refreshToken(refreshToken);
// 断言
assertPojoEquals(accessTokenDO, loginRespVO);
}
|
Future<Boolean> canRoll(int podId) {
LOGGER.debugCr(reconciliation, "Determining whether broker {} can be rolled", podId);
return canRollBroker(descriptions, podId);
}
|
@Test
public void testMinIsrEqualsReplicasWithOfflineReplicas(VertxTestContext context) {
KSB ksb = new KSB()
.addNewTopic("A", false)
.addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3")
.addNewPartition(0)
.replicaOn(0, 1, 2)
.leader(0)
.isr(0, 1)
.endPartition()
.endTopic()
.addBroker(3);
KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac());
Checkpoint a = context.checkpoint(ksb.brokers.size());
for (Integer brokerId : ksb.brokers.keySet()) {
kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
assertTrue(canRoll,
"broker " + brokerId + " should be rollable, being minisr = 3, but only 3 replicas");
a.flag();
})));
}
}
|
public static String processPattern(String pattern, TbMsg tbMsg) {
try {
String result = processPattern(pattern, tbMsg.getMetaData());
JsonNode json = JacksonUtil.toJsonNode(tbMsg.getData());
if (json.isObject()) {
Matcher matcher = DATA_PATTERN.matcher(result);
while (matcher.find()) {
String group = matcher.group(2);
String[] keys = group.split("\\.");
JsonNode jsonNode = json;
for (String key : keys) {
if (!StringUtils.isEmpty(key) && jsonNode != null) {
jsonNode = jsonNode.get(key);
} else {
jsonNode = null;
break;
}
}
if (jsonNode != null && jsonNode.isValueNode()) {
result = result.replace(formatDataVarTemplate(group), jsonNode.asText());
}
}
}
return result;
} catch (Exception e) {
throw new RuntimeException("Failed to process pattern!", e);
}
}
|
@Test
public void testSimpleReplacement() {
String pattern = "ABC ${metadata_key} $[data_key]";
TbMsgMetaData md = new TbMsgMetaData();
md.putValue("metadata_key", "metadata_value");
ObjectNode node = JacksonUtil.newObjectNode();
node.put("data_key", "data_value");
TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, TenantId.SYS_TENANT_ID, md, JacksonUtil.toString(node));
String result = TbNodeUtils.processPattern(pattern, msg);
Assertions.assertEquals("ABC metadata_value data_value", result);
}
|
@Override
public String getName() {
return FUNCTION_NAME;
}
|
@Test
public void testDivisionNullLiteral() {
ExpressionContext expression = RequestContextUtils.getExpression(String.format("div(%s,null)", INT_SV_COLUMN));
TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap);
Assert.assertTrue(transformFunction instanceof DivisionTransformFunction);
Assert.assertEquals(transformFunction.getName(), DivisionTransformFunction.FUNCTION_NAME);
double[] expectedValues = new double[NUM_ROWS];
for (int i = 0; i < NUM_ROWS; i++) {
expectedValues[i] = _intSVValues[i] / Integer.MIN_VALUE;
}
RoaringBitmap roaringBitmap = new RoaringBitmap();
roaringBitmap.add(0L, NUM_ROWS);
testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap);
}
|
public static int getRemoteExecutorTimesOfProcessors() {
String timesString = System.getProperty("remote.executor.times.of.processors");
if (NumberUtils.isDigits(timesString)) {
int times = Integer.parseInt(timesString);
return times > 0 ? times : REMOTE_EXECUTOR_TIMES_OF_PROCESSORS;
} else {
return REMOTE_EXECUTOR_TIMES_OF_PROCESSORS;
}
}
|
@Test
void testGetRemoteExecutorTimesOfProcessors() {
int defaultExpectVal = 1 << 4;
int defaultVal = RemoteUtils.getRemoteExecutorTimesOfProcessors();
assertEquals(defaultExpectVal, defaultVal);
System.setProperty("remote.executor.times.of.processors", "10");
int val1 = RemoteUtils.getRemoteExecutorTimesOfProcessors();
assertEquals(10, val1);
System.setProperty("remote.executor.times.of.processors", "-1");
int val2 = RemoteUtils.getRemoteExecutorTimesOfProcessors();
assertEquals(defaultExpectVal, val2);
}
|
public void refreshStarted(long currentVersion, long requestedVersion) {
updatePlanDetails = new ConsumerRefreshMetrics.UpdatePlanDetails();
refreshStartTimeNano = System.nanoTime();
refreshMetricsBuilder = new ConsumerRefreshMetrics.Builder();
refreshMetricsBuilder.setIsInitialLoad(currentVersion == VERSION_NONE);
refreshMetricsBuilder.setUpdatePlanDetails(updatePlanDetails);
cycleVersionStartTimes.clear(); // clear map to avoid accumulation over time
}
|
@Test
public void testRefreshStartedWithSubsequentLoad() {
concreteRefreshMetricsListener.refreshStarted(TEST_VERSION_LOW, TEST_VERSION_HIGH);
ConsumerRefreshMetrics refreshMetrics = concreteRefreshMetricsListener.refreshMetricsBuilder.build();
Assert.assertFalse(refreshMetrics.getIsInitialLoad());
assertNotNull(refreshMetrics.getUpdatePlanDetails());
}
|
String generateName(List<String> symbols)
{
if (_serverNodeUri == null)
{
throw new IllegalStateException("Cannot generate symbol table name with null server node URI.");
}
return _serverNodeUri + SERVER_NODE_URI_PREFIX_TABLENAME_SEPARATOR + _symbolTablePrefix
+ PREFIX_HASH_SEPARATOR + symbols.hashCode();
}
|
@Test
public void testGenerateName()
{
List<String> symbols = Collections.unmodifiableList(Arrays.asList("Haha", "Hehe"));
String name = SYMBOL_TABLE_NAME_HANDLER.generateName(symbols);
Assert.assertEquals(name, "https://Host:100/service|Prefix-" + symbols.hashCode());
}
|
@VisibleForTesting
long calculateRenewalDelay(Clock clock, long nextRenewal) {
long now = clock.millis();
long renewalDelay = Math.round(tokensRenewalTimeRatio * (nextRenewal - now));
LOG.debug(
"Calculated delay on renewal is {}, based on next renewal {} and the ratio {}, and current time {}",
renewalDelay,
nextRenewal,
tokensRenewalTimeRatio,
now);
return renewalDelay;
}
|
@Test
public void calculateRenewalDelayShouldConsiderRenewalRatio() {
Configuration configuration = new Configuration();
configuration.setBoolean(CONFIG_PREFIX + ".throw.enabled", false);
configuration.set(DELEGATION_TOKENS_RENEWAL_TIME_RATIO, 0.5);
DefaultDelegationTokenManager delegationTokenManager =
new DefaultDelegationTokenManager(configuration, null, null, null);
Clock constantClock = Clock.fixed(ofEpochMilli(100), ZoneId.systemDefault());
assertEquals(50, delegationTokenManager.calculateRenewalDelay(constantClock, 200));
}
|
public static Date getDateFromString( String dateString ) throws ParseException {
String dateFormat = detectDateFormat( dateString );
if ( dateFormat == null ) {
throw new ParseException( "Unknown date format.", 0 );
}
return getDateFromStringByFormat( dateString, dateFormat );
}
|
@Test
public void testGetDateFromStringLocale() throws ParseException {
assertEquals( SAMPLE_DATE_US, DateDetector.getDateFromString( SAMPLE_DATE_STRING_US, LOCALE_en_US ) );
try {
DateDetector.getDateFromString( null );
} catch ( ParseException e ) {
// expected exception
}
try {
DateDetector.getDateFromString( null, null );
} catch ( ParseException e ) {
// expected exception
}
}
|
public void process(final Exchange exchange) {
final ExecutionContext executionContext = smooks.createExecutionContext();
try {
executionContext.put(EXCHANGE_TYPED_KEY, exchange);
String charsetName = (String) exchange.getProperty(Exchange.CHARSET_NAME);
if (charsetName != null) {
// if provided use the came character encoding
executionContext.setContentEncoding(charsetName);
}
exchange.getIn().setHeader(SMOOKS_EXECUTION_CONTEXT, executionContext);
setupSmooksReporting(executionContext);
final Exports exports = smooks.getApplicationContext().getRegistry().lookup(new ExportsLookup());
if (exports.hasExports()) {
final Result[] results = exports.createResults();
smooks.filterSource(executionContext, getSource(exchange), results);
setResultOnBody(exports, results, exchange);
} else {
smooks.filterSource(executionContext, getSource(exchange));
}
} finally {
executionContext.remove(EXCHANGE_TYPED_KEY);
}
}
|
@Test
public void testProcess() throws Exception {
context.addRoutes(createEdiToXmlRouteBuilder());
context.start();
assertOneProcessedMessage();
}
|
<T extends PipelineOptions> T as(Class<T> iface) {
checkNotNull(iface);
checkArgument(iface.isInterface(), "Not an interface: %s", iface);
T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
synchronized (this) {
// double check
existingOption = computedProperties.interfaceToProxyCache.getInstance(iface);
if (existingOption == null) {
Registration<T> registration =
PipelineOptionsFactory.CACHE
.get()
.validateWellFormed(iface, computedProperties.knownInterfaces);
List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors();
Class<T> proxyClass = registration.getProxyClass();
existingOption =
InstanceBuilder.ofType(proxyClass)
.fromClass(proxyClass)
.withArg(InvocationHandler.class, this)
.build();
computedProperties =
computedProperties.updated(iface, existingOption, propertyDescriptors);
}
}
}
return existingOption;
}
|
@Test
public void testUpCastRetainsSubClassValues() throws Exception {
ProxyInvocationHandler handler = new ProxyInvocationHandler(Maps.newHashMap());
SubClass extended = handler.as(SubClass.class);
extended.setExtended("subClassValue");
SubClass extended2 = extended.as(Simple.class).as(SubClass.class);
assertEquals("subClassValue", extended2.getExtended());
}
|
@Override
public Job drainJob(String project, String region, String jobId) {
LOG.info("Draining {} under {}", jobId, project);
Job job = new Job().setRequestedState(JobState.DRAINED.toString());
LOG.info("Sending job to update {}:\n{}", jobId, formatForLogging(job));
return Failsafe.with(clientRetryPolicy())
.get(
() ->
client.projects().locations().jobs().update(project, region, jobId, job).execute());
}
|
@Test
public void testDrainJobThrowsException() throws IOException {
when(getLocationJobs(client).update(any(), any(), any(), any())).thenThrow(new IOException());
assertThrows(
FailsafeException.class,
() -> new FakePipelineLauncher(client).drainJob(PROJECT, REGION, JOB_ID));
}
|
@Override
public ItemChangeSets resolve(long namespaceId, String configText, List<ItemDTO> baseItems) {
Map<Integer, ItemDTO> oldLineNumMapItem = BeanUtils.mapByKey("lineNum", baseItems);
Map<String, ItemDTO> oldKeyMapItem = BeanUtils.mapByKey("key", baseItems);
//remove comment and blank item map.
oldKeyMapItem.remove("");
String[] newItems = configText.split(ITEM_SEPARATOR);
Set<String> repeatKeys = new HashSet<>();
if (isHasRepeatKey(newItems, repeatKeys)) {
throw new BadRequestException("Config text has repeated keys: %s, please check your input.", repeatKeys);
}
ItemChangeSets changeSets = new ItemChangeSets();
Map<Integer, String> newLineNumMapItem = new HashMap<>();//use for delete blank and comment item
int lineCounter = 1;
for (String newItem : newItems) {
newItem = newItem.trim();
newLineNumMapItem.put(lineCounter, newItem);
ItemDTO oldItemByLine = oldLineNumMapItem.get(lineCounter);
//comment item
if (isCommentItem(newItem)) {
handleCommentLine(namespaceId, oldItemByLine, newItem, lineCounter, changeSets);
//blank item
} else if (isBlankItem(newItem)) {
handleBlankLine(namespaceId, oldItemByLine, lineCounter, changeSets);
//normal item
} else {
handleNormalLine(namespaceId, oldKeyMapItem, newItem, lineCounter, changeSets);
}
lineCounter++;
}
deleteCommentAndBlankItem(oldLineNumMapItem, newLineNumMapItem, changeSets);
deleteNormalKVItem(oldKeyMapItem, changeSets);
return changeSets;
}
|
@Test
public void testAllSituation(){
ItemChangeSets changeSets = resolver.resolve(1, "#ww\nd=e\nb=c\na=b\n\nq=w\n#eee", mockBaseItemWith2Key1Comment1Blank());
Assert.assertEquals(2, changeSets.getDeleteItems().size());
Assert.assertEquals(2, changeSets.getUpdateItems().size());
Assert.assertEquals(5, changeSets.getCreateItems().size());
}
|
@Override
public void setValue(String value) throws IOException
{
checkValue(value);
// if there are export values/an Opt entry there is a different
// approach to setting the value
if (!getExportValues().isEmpty())
{
updateByOption(value);
}
else
{
updateByValue(value);
}
applyChange();
}
|
@Test
void testRadioButtonWithOptions()
{
File file = new File(TARGET_PDF_DIR, "PDFBOX-3656.pdf");
try (PDDocument pdfDocument = Loader.loadPDF(file))
{
PDRadioButton radioButton = (PDRadioButton) pdfDocument.getDocumentCatalog().getAcroForm().getField("Checking/Savings");
radioButton.setValue("Off");
radioButton.getWidgets().forEach(widget ->
assertEquals(COSName.Off, widget.getCOSObject().getItem(COSName.AS),
"The widget should be set to Off"));
}
catch (IOException e)
{
fail("Unexpected IOException " + e.getMessage());
}
}
|
@Override
public final String toString() {
StringBuilder out = new StringBuilder();
appendTo(out);
return out.toString();
}
|
@Test
void requireThatPredicatesCanBeBuiltUsingChainedMethodCalls() {
assertEquals("country not in [no, se] or age in [20..] or height in [..160]",
new Disjunction()
.addOperand(new Negation(new FeatureSet("country").addValue("no").addValue("se")))
.addOperand(new FeatureRange("age").setFromInclusive(20L))
.addOperand(new FeatureRange("height").setToInclusive(160L))
.toString());
}
|
void handleRestore(final QueuedCommand queuedCommand) {
throwIfNotConfigured();
handleStatementWithTerminatedQueries(
queuedCommand.getAndDeserializeCommand(commandDeserializer),
queuedCommand.getAndDeserializeCommandId(),
queuedCommand.getStatus(),
Mode.RESTORE,
queuedCommand.getOffset(),
true
);
}
|
@Test(expected = IllegalStateException.class)
public void shouldThrowOnHandleRestoreIfNotConfigured() {
// Given:
statementExecutor = new InteractiveStatementExecutor(
serviceContext,
mockEngine,
mockParser,
mockQueryIdGenerator,
commandDeserializer
);
final Map<String, Object> withoutAppServer = ksqlConfig.originals();
withoutAppServer.remove(StreamsConfig.APPLICATION_SERVER_CONFIG);
when(mockEngine.getKsqlConfig()).thenReturn(new KsqlConfig(withoutAppServer));
// When:
statementExecutor.handleRestore(queuedCommand);
}
|
protected static VplsOperation getOptimizedVplsOperation(Deque<VplsOperation> operations) {
if (operations.isEmpty()) {
return null;
}
// no need to optimize if the queue contains only one operation
if (operations.size() == 1) {
return operations.getFirst();
}
final VplsOperation firstOperation = operations.peekFirst();
final VplsOperation lastOperation = operations.peekLast();
final VplsOperation.Operation firstOp = firstOperation.op();
final VplsOperation.Operation lastOp = lastOperation.op();
if (firstOp.equals(VplsOperation.Operation.REMOVE)) {
if (lastOp.equals(VplsOperation.Operation.REMOVE)) {
// case 1: both first and last operation are REMOVE; do remove
return firstOperation;
} else if (lastOp.equals(VplsOperation.Operation.ADD)) {
// case 2: if first is REMOVE, and last is ADD; do update
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.UPDATE);
} else {
// case 3: first is REMOVE, last is UPDATE; do update
return lastOperation;
}
} else if (firstOp.equals(VplsOperation.Operation.ADD)) {
if (lastOp.equals(VplsOperation.Operation.REMOVE)) {
// case 4: first is ADD, last is REMOVE; nothing to do
return null;
} else if (lastOp.equals(VplsOperation.Operation.ADD)) {
// case 5: both first and last are ADD, do add
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.ADD);
} else {
// case 6: first is ADD and last is update, do add
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.ADD);
}
} else {
if (lastOp.equals(VplsOperation.Operation.REMOVE)) {
// case 7: last is remove, do remove
return lastOperation;
} else if (lastOp.equals(VplsOperation.Operation.ADD)) {
// case 8: do update only
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.UPDATE);
} else {
// case 9: from UPDATE to UPDATE
// only need last UPDATE operation
return VplsOperation.of(lastOperation.vpls(),
VplsOperation.Operation.UPDATE);
}
}
}
|
@Test
public void testOptimizeOperationsAToA() {
Deque<VplsOperation> operations = new ArrayDeque<>();
VplsData vplsData = VplsData.of(VPLS1);
vplsData.addInterfaces(ImmutableSet.of(V100H1));
VplsOperation vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.ADD);
operations.add(vplsOperation);
vplsData = VplsData.of(VPLS1, EncapsulationType.VLAN);
vplsData.addInterfaces(ImmutableSet.of(V100H1, V100H2));
vplsOperation = VplsOperation.of(vplsData,
VplsOperation.Operation.ADD);
operations.add(vplsOperation);
vplsOperation = VplsOperationManager.getOptimizedVplsOperation(operations);
assertEquals(VplsOperation.of(vplsData, VplsOperation.Operation.ADD), vplsOperation);
}
|
@Override
public ProtobufSystemInfo.Section toProtobuf() {
ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder();
protobuf.setName("System");
setAttribute(protobuf, "Server ID", server.getId());
setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel());
setAttribute(protobuf, NCLOC.getName() ,statisticsSupport.getLinesOfCode());
setAttribute(protobuf, "Container", containerSupport.isRunningInContainer());
setAttribute(protobuf, "High Availability", true);
setAttribute(protobuf, "External Users and Groups Provisioning",
commonSystemInformation.getManagedInstanceProviderName());
setAttribute(protobuf, "External User Authentication",
commonSystemInformation.getExternalUserAuthentication());
addIfNotEmpty(protobuf, "Accepted external identity providers",
commonSystemInformation.getEnabledIdentityProviders());
addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up",
commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders());
setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication());
return protobuf.build();
}
|
@Test
public void toProtobuf_whenExternalUserAuthentication_shouldWriteIt() {
when(commonSystemInformation.getExternalUserAuthentication()).thenReturn("LDAP");
ProtobufSystemInfo.Section protobuf = underTest.toProtobuf();
assertThatAttributeIs(protobuf, "External User Authentication", "LDAP");
}
|
public Future<KafkaVersionChange> reconcile() {
return getVersionFromController()
.compose(i -> getPods())
.compose(this::detectToAndFromVersions)
.compose(i -> prepareVersionChange());
}
|
@Test
public void testNoopWithoutVersionFromSts(VertxTestContext context) {
String kafkaVersion = VERSIONS.defaultVersion().version();
String interBrokerProtocolVersion = VERSIONS.defaultVersion().protocolVersion();
String logMessageFormatVersion = VERSIONS.defaultVersion().messageVersion();
VersionChangeCreator vcc = mockVersionChangeCreator(
mockKafka(null, null, null),
mockNewCluster(
mockSts(kafkaVersion),
null,
mockUniformPods(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion)
)
);
Checkpoint async = context.checkpoint();
vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> {
assertThat(c.from(), is(VERSIONS.defaultVersion()));
assertThat(c.to(), is(VERSIONS.defaultVersion()));
assertThat(c.interBrokerProtocolVersion(), is(VERSIONS.defaultVersion().protocolVersion()));
assertThat(c.logMessageFormatVersion(), is(VERSIONS.defaultVersion().messageVersion()));
assertThat(c.metadataVersion(), is(VERSIONS.defaultVersion().metadataVersion()));
async.flag();
})));
}
|
@Nonnull
public static RuntimeException rethrow(@Nonnull final Throwable t) {
com.hazelcast.internal.util.ExceptionUtil.rethrowIfError(t);
throw peeledAndUnchecked(t);
}
|
@Test
public void when_throwableIsExecutionExceptionWithNullCause_then_returnHazelcastException() {
ExecutionException exception = new ExecutionException(null);
assertThrows(JetException.class, () -> {
throw rethrow(exception);
});
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.