focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
protected void write(final MySQLPacketPayload payload) {
payload.writeInt1(protocolVersion);
payload.writeStringNul(serverVersion);
payload.writeInt4(connectionId);
payload.writeStringNul(new String(authPluginData.getAuthenticationPluginDataPart1()));
payload.writeInt2(capabilityFlagsLower);
payload.writeInt1(characterSet);
payload.writeInt2(statusFlag.getValue());
payload.writeInt2(capabilityFlagsUpper);
payload.writeInt1(isClientPluginAuth() ? authPluginData.getAuthenticationPluginData().length + 1 : 0);
payload.writeReserved(10);
writeAuthPluginDataPart2(payload);
writeAuthPluginName(payload);
}
|
@Test
void assertWrite() {
MySQLAuthenticationPluginData authPluginData = new MySQLAuthenticationPluginData(part1, part2);
new MySQLHandshakePacket(1000, false, authPluginData).write(payload);
verify(payload).writeInt1(MySQLConstants.PROTOCOL_VERSION);
verify(payload).writeStringNul(DatabaseProtocolServerInfo.getDefaultProtocolVersion(TypedSPILoader.getService(DatabaseType.class, "MySQL")));
verify(payload).writeInt4(1000);
verify(payload).writeStringNul(new String(authPluginData.getAuthenticationPluginDataPart1()));
verify(payload).writeInt2(MySQLCapabilityFlag.calculateHandshakeCapabilityFlagsLower());
verify(payload).writeInt1(MySQLConstants.DEFAULT_CHARSET.getId());
verify(payload).writeInt2(MySQLStatusFlag.SERVER_STATUS_AUTOCOMMIT.getValue());
verify(payload).writeInt2(MySQLCapabilityFlag.calculateHandshakeCapabilityFlagsUpper());
verify(payload).writeInt1(authPluginData.getAuthenticationPluginData().length + 1);
verify(payload).writeReserved(10);
verify(payload).writeStringNul(new String(authPluginData.getAuthenticationPluginDataPart2()));
}
|
public String getString(HazelcastProperty property) {
String value = properties.getProperty(property.getName());
if (value != null) {
return value;
}
value = property.getSystemProperty();
if (value != null) {
return value;
}
HazelcastProperty parent = property.getParent();
if (parent != null) {
return getString(parent);
}
String deprecatedName = property.getDeprecatedName();
if (deprecatedName != null) {
value = get(deprecatedName);
if (value == null) {
value = System.getProperty(deprecatedName);
}
if (value != null) {
// we don't have a logger available, and the Logging service is constructed after the Properties are created.
System.err.print("Don't use deprecated '" + deprecatedName + "' "
+ "but use '" + property.getName() + "' instead. "
+ "The former name will be removed in the next Hazelcast release.");
return value;
}
}
Function<HazelcastProperties, ?> function = property.getFunction();
if (function != null) {
return "" + function.apply(this);
}
return property.getDefaultValue();
}
|
@Test
public void getString_whenDeprecatedNameUsed() {
Properties props = new Properties();
props.setProperty("oldname", "10");
HazelcastProperties properties = new HazelcastProperties(props);
HazelcastProperty property = new HazelcastProperty("newname")
.setDeprecatedName("oldname");
String value = properties.getString(property);
assertEquals("10", value);
}
|
@Override
public boolean askForNotificationPostPermission(@NonNull Activity activity) {
return PermissionRequestHelper.check(
activity, PermissionRequestHelper.NOTIFICATION_PERMISSION_REQUEST_CODE);
}
|
@Test
@Config(sdk = Build.VERSION_CODES.TIRAMISU)
public void testReturnsTrueIfAlreadyHasPermission() {
var appShadow = Shadows.shadowOf(RuntimeEnvironment.getApplication());
appShadow.grantPermissions(Manifest.permission.POST_NOTIFICATIONS);
try (var scenario = ActivityScenario.launch(TestFragmentActivity.class)) {
scenario
.moveToState(Lifecycle.State.RESUMED)
.onActivity(
activity -> {
Assert.assertTrue(mUnderTest.askForNotificationPostPermission(activity));
});
}
}
|
public static int getDigit(final int index, final byte value)
{
if (value < 0x30 || value > 0x39)
{
throw new AsciiNumberFormatException("'" + ((char)value) + "' is not a valid digit @ " + index);
}
return value - 0x30;
}
|
@Test
void shouldThrowExceptionWhenDecodingCharNonNumericValue()
{
assertThrows(AsciiNumberFormatException.class, () -> AsciiEncoding.getDigit(0, 'a'));
}
|
@Operation(summary = "queryDataSourceListPaging", description = "QUERY_DATA_SOURCE_LIST_PAGING_NOTES")
@Parameters({
@Parameter(name = "searchVal", description = "SEARCH_VAL", schema = @Schema(implementation = String.class)),
@Parameter(name = "pageNo", description = "PAGE_NO", required = true, schema = @Schema(implementation = int.class, example = "1")),
@Parameter(name = "pageSize", description = "PAGE_SIZE", required = true, schema = @Schema(implementation = int.class, example = "20"))
})
@GetMapping()
@ResponseStatus(HttpStatus.OK)
@ApiException(QUERY_DATASOURCE_ERROR)
public Result<Object> queryDataSourceListPaging(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageNo") Integer pageNo,
@RequestParam("pageSize") Integer pageSize) {
checkPageParams(pageNo, pageSize);
searchVal = ParameterUtils.handleEscapes(searchVal);
PageInfo<DataSource> pageInfo =
dataSourceService.queryDataSourceListPaging(loginUser, searchVal, pageNo, pageSize);
return Result.success(pageInfo);
}
|
@Test
public void testQueryDataSourceListPaging() throws Exception {
MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>();
paramsMap.add("searchVal", "mysql");
paramsMap.add("pageNo", "1");
paramsMap.add("pageSize", "1");
MvcResult mvcResult = mockMvc.perform(get("/datasources")
.header("sessionId", sessionId)
.params(paramsMap))
.andExpect(status().isOk())
.andExpect(content().contentType(MediaType.APPLICATION_JSON))
.andReturn();
Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class);
Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue());
logger.info(mvcResult.getResponse().getContentAsString());
}
|
@UdafFactory(description = "Compute sample standard deviation of column with type Integer.",
aggregateSchema = "STRUCT<SUM integer, COUNT bigint, M2 double>")
public static TableUdaf<Integer, Struct, Double> stdDevInt() {
return getStdDevImplementation(
0,
STRUCT_INT,
(agg, newValue) -> newValue + agg.getInt32(SUM),
(agg, newValue) ->
Double.valueOf(newValue * (agg.getInt64(COUNT) + 1) - (agg.getInt32(SUM) + newValue)),
(agg1, agg2) ->
agg1.getInt32(SUM).doubleValue() / agg1.getInt64(COUNT).doubleValue()
- agg2.getInt32(SUM).doubleValue() / agg2.getInt64(COUNT).doubleValue(),
(agg1, agg2) -> agg1.getInt32(SUM) + agg2.getInt32(SUM),
(agg, valueToRemove) -> agg.getInt32(SUM) - valueToRemove);
}
|
@Test
public void shouldMergeInts() {
final TableUdaf<Integer, Struct, Double> udaf = stdDevInt();
Struct left = udaf.initialize();
final Integer[] leftValues = new Integer[] {5, 8, 10};
for (final Integer thisValue : leftValues) {
left = udaf.aggregate(thisValue, left);
}
Struct right = udaf.initialize();
final Integer[] rightValues = new Integer[] {6, 7, 9};
for (final Integer thisValue : rightValues) {
right = udaf.aggregate(thisValue, right);
}
final Struct merged = udaf.merge(left, right);
assertThat(merged.getInt64(COUNT), equalTo(6L));
assertThat(merged.getInt32(SUM), equalTo(45));
assertThat(merged.getFloat64(M2), equalTo(17.5));
final double standardDev = udaf.map(merged);
assertThat(standardDev, equalTo(1.8708286933869707));
}
|
@Override
public Set<ConstraintCheckResult> checkConstraints(Collection<Constraint> requestedConstraints) {
final ImmutableSet.Builder<ConstraintCheckResult> fulfilledConstraints = ImmutableSet.builder();
for (Constraint constraint : requestedConstraints) {
if (constraint instanceof PluginVersionConstraint) {
final PluginVersionConstraint versionConstraint = (PluginVersionConstraint) constraint;
final Requirement requiredVersion = versionConstraint.version();
boolean result = false;
for (Semver pluginVersion : pluginVersions) {
if (requiredVersion.isSatisfiedBy(pluginVersion)) {
result = true;
}
}
ConstraintCheckResult constraintCheckResult = ConstraintCheckResult.create(versionConstraint, result);
fulfilledConstraints.add(constraintCheckResult);
}
}
return fulfilledConstraints.build();
}
|
@Test
public void checkConstraintsFails() {
final TestPluginMetaData pluginMetaData = new TestPluginMetaData();
final PluginVersionConstraintChecker constraintChecker = new PluginVersionConstraintChecker(Collections.singleton(pluginMetaData));
final GraylogVersionConstraint graylogVersionConstraint = GraylogVersionConstraint.builder()
.version("^2.0.0")
.build();
final PluginVersionConstraint pluginVersionConstraint = PluginVersionConstraint.builder()
.pluginId("unique-id")
.version("^2.0.0")
.build();
final ImmutableSet<Constraint> requiredConstraints = ImmutableSet.of(graylogVersionConstraint, pluginVersionConstraint);
assertThat(constraintChecker.checkConstraints(requiredConstraints).stream().allMatch(c -> !c.fulfilled())).isTrue();
}
|
public List<MavenArtifact> searchSha1(String sha1) throws IOException, TooManyRequestsException {
if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) {
throw new IllegalArgumentException("Invalid SHA1 format");
}
if (cache != null) {
final List<MavenArtifact> cached = cache.get(sha1);
if (cached != null) {
LOGGER.debug("cache hit for Central: " + sha1);
if (cached.isEmpty()) {
throw new FileNotFoundException("Artifact not found in Central");
}
return cached;
}
}
final List<MavenArtifact> result = new ArrayList<>();
final URL url = new URL(String.format(query, rootURL, sha1));
LOGGER.trace("Searching Central url {}", url);
// Determine if we need to use a proxy. The rules:
// 1) If the proxy is set, AND the setting is set to true, use the proxy
// 2) Otherwise, don't use the proxy (either the proxy isn't configured,
// or proxy is specifically set to false)
final URLConnectionFactory factory = new URLConnectionFactory(settings);
final HttpURLConnection conn = factory.createHttpURLConnection(url, useProxy);
conn.setDoOutput(true);
// JSON would be more elegant, but there's not currently a dependency
// on JSON, so don't want to add one just for this
conn.addRequestProperty("Accept", "application/xml");
conn.connect();
if (conn.getResponseCode() == 200) {
boolean missing = false;
try {
final DocumentBuilder builder = XmlUtils.buildSecureDocumentBuilder();
final Document doc = builder.parse(conn.getInputStream());
final XPath xpath = XPathFactory.newInstance().newXPath();
final String numFound = xpath.evaluate("/response/result/@numFound", doc);
if ("0".equals(numFound)) {
missing = true;
} else {
final NodeList docs = (NodeList) xpath.evaluate("/response/result/doc", doc, XPathConstants.NODESET);
for (int i = 0; i < docs.getLength(); i++) {
final String g = xpath.evaluate("./str[@name='g']", docs.item(i));
LOGGER.trace("GroupId: {}", g);
final String a = xpath.evaluate("./str[@name='a']", docs.item(i));
LOGGER.trace("ArtifactId: {}", a);
final String v = xpath.evaluate("./str[@name='v']", docs.item(i));
final NodeList attributes = (NodeList) xpath.evaluate("./arr[@name='ec']/str", docs.item(i), XPathConstants.NODESET);
boolean pomAvailable = false;
boolean jarAvailable = false;
for (int x = 0; x < attributes.getLength(); x++) {
final String tmp = xpath.evaluate(".", attributes.item(x));
if (".pom".equals(tmp)) {
pomAvailable = true;
} else if (".jar".equals(tmp)) {
jarAvailable = true;
}
}
final String centralContentUrl = settings.getString(Settings.KEYS.CENTRAL_CONTENT_URL);
String artifactUrl = null;
String pomUrl = null;
if (jarAvailable) {
//org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom
artifactUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/'
+ v + '/' + a + '-' + v + ".jar";
}
if (pomAvailable) {
//org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom
pomUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/'
+ v + '/' + a + '-' + v + ".pom";
}
result.add(new MavenArtifact(g, a, v, artifactUrl, pomUrl));
}
}
} catch (ParserConfigurationException | IOException | SAXException | XPathExpressionException e) {
// Anything else is jacked up XML stuff that we really can't recover from well
final String errorMessage = "Failed to parse MavenCentral XML Response: " + e.getMessage();
throw new IOException(errorMessage, e);
}
if (missing) {
if (cache != null) {
cache.put(sha1, result);
}
throw new FileNotFoundException("Artifact not found in Central");
}
} else if (conn.getResponseCode() == 429) {
final String errorMessage = "Too many requests sent to MavenCentral; additional requests are being rejected.";
throw new TooManyRequestsException(errorMessage);
} else {
final String errorMessage = "Could not connect to MavenCentral (" + conn.getResponseCode() + "): " + conn.getResponseMessage();
throw new IOException(errorMessage);
}
if (cache != null) {
cache.put(sha1, result);
}
return result;
}
|
@Test(expected = IllegalArgumentException.class)
public void testNullSha1() throws Exception {
searcher.searchSha1(null);
}
|
@Override
public void collect(long elapsedTime, StatementContext ctx) {
final Timer timer = getTimer(ctx);
timer.update(elapsedTime, TimeUnit.NANOSECONDS);
}
|
@Test
public void updatesTimerForSqlObjects() throws Exception {
final StatementNameStrategy strategy = new SmartNameStrategy();
final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry,
strategy);
final StatementContext ctx = mock(StatementContext.class);
doReturn("SELECT 1").when(ctx).getRawSql();
doReturn(getClass()).when(ctx).getSqlObjectType();
doReturn(getClass().getMethod("updatesTimerForSqlObjects")).when(ctx).getSqlObjectMethod();
collector.collect(TimeUnit.SECONDS.toNanos(1), ctx);
final String name = strategy.getStatementName(ctx);
final Timer timer = registry.timer(name);
assertThat(name)
.isEqualTo(name(getClass(), "updatesTimerForSqlObjects"));
assertThat(timer.getSnapshot().getMax())
.isEqualTo(1000000000);
}
|
@Override
public Collection<String> getActualTableNames() {
return actualTableNames;
}
|
@Test
void assertGetActualTableMapper() {
assertThat(new LinkedList<>(ruleAttribute.getActualTableNames()), is(Collections.singletonList("foo_tbl_0")));
}
|
@Override
public void dropDb(String dbName, boolean isForceDrop) throws MetaNotFoundException {
if (listTableNames(dbName).size() != 0) {
throw new StarRocksConnectorException("Database %s not empty", dbName);
}
hmsOps.dropDb(dbName, isForceDrop);
}
|
@Test
public void dropDbTest() {
ExceptionChecker.expectThrowsWithMsg(StarRocksConnectorException.class,
"Database d1 not empty",
() -> hiveMetadata.dropDb("d1", true));
ExceptionChecker.expectThrowsWithMsg(MetaNotFoundException.class,
"Failed to access database empty_db",
() -> hiveMetadata.dropDb("empty_db", true));
}
|
public static void releaseLock(FileLock lock) throws IOException {
String lockPath = LOCK_MAP.remove(lock);
if (lockPath == null) {
throw new LockException("Cannot release unobtained lock");
}
lock.release();
lock.channel().close();
boolean removed = LOCK_HELD.remove(lockPath);
if (!removed) {
throw new LockException("Lock path was not marked as held: " + lockPath);
}
}
|
@Test
public void LockReleaseLock() throws IOException {
FileLockFactory.releaseLock(lock);
}
|
public MyNewIssuesNotification newMyNewIssuesNotification(Map<String, UserDto> assigneesByUuid) {
verifyAssigneesByUuid(assigneesByUuid);
return new MyNewIssuesNotification(new DetailsSupplierImpl(assigneesByUuid));
}
|
@Test
public void newMyNewIssuesNotification_DetailsSupplier_getComponentNameByUuid_returns_name_of_project_in_TreeRootHolder() {
treeRootHolder.setRoot(ReportComponent.builder(PROJECT, 1).setUuid("rootUuid").setName("root").build());
MyNewIssuesNotification underTest = this.underTest.newMyNewIssuesNotification(emptyMap());
DetailsSupplier detailsSupplier = readDetailsSupplier(underTest);
assertThat(detailsSupplier.getComponentNameByUuid("rootUuid")).contains("root");
assertThat(detailsSupplier.getComponentNameByUuid("foo")).isEmpty();
}
|
@Override
public void close() throws IOException {
this.inStream.close();
}
|
@Test
void testClose() throws Exception {
final AtomicBoolean closeCalled = new AtomicBoolean(false);
InputStream mockedInputStream =
new InputStream() {
@Override
public int read() {
return 0;
}
@Override
public void close() throws IOException {
closeCalled.set(true);
super.close();
}
};
InputStreamFSInputWrapper wrapper = new InputStreamFSInputWrapper(mockedInputStream);
wrapper.close();
assertThat(closeCalled).isTrue();
}
|
@Deprecated
public static int findCounterIdByRecording(final CountersReader countersReader, final long recordingId)
{
return findCounterIdByRecording(countersReader, recordingId, Aeron.NULL_VALUE);
}
|
@Test
void shouldFindByRecordingIdAndArchiveId()
{
final long recordingId = 42;
final long archiveId = 19;
final int sourceIdentityLength = 10;
final CountersReader countersReader = mock(CountersReader.class);
when(countersReader.maxCounterId()).thenReturn(5);
when(countersReader.getCounterState(anyInt())).thenReturn(RECORD_ALLOCATED);
when(countersReader.getCounterTypeId(0)).thenReturn(0);
when(countersReader.getCounterTypeId(2)).thenReturn(0);
when(countersReader.getCounterTypeId(1)).thenReturn(RECORDING_POSITION_TYPE_ID);
when(countersReader.getCounterTypeId(3)).thenReturn(RECORDING_POSITION_TYPE_ID);
final AtomicBuffer metaBuffer = mock(AtomicBuffer.class);
when(countersReader.metaDataBuffer()).thenReturn(metaBuffer);
when(metaBuffer.getLong(METADATA_LENGTH + KEY_OFFSET + RECORDING_ID_OFFSET)).thenReturn(recordingId);
final int keyOffset = 3 * METADATA_LENGTH + KEY_OFFSET;
when(metaBuffer.getLong(keyOffset + RECORDING_ID_OFFSET)).thenReturn(recordingId);
when(metaBuffer.getInt(keyOffset + SOURCE_IDENTITY_LENGTH_OFFSET)).thenReturn(sourceIdentityLength);
when(metaBuffer.getLong(keyOffset + SOURCE_IDENTITY_OFFSET + sourceIdentityLength)).thenReturn(archiveId);
assertEquals(3, RecordingPos.findCounterIdByRecording(countersReader, recordingId, archiveId));
assertEquals(
NULL_RECORDING_ID,
RecordingPos.findCounterIdByRecording(countersReader, recordingId, Long.MIN_VALUE));
assertEquals(1, RecordingPos.findCounterIdByRecording(countersReader, recordingId, NULL_VALUE));
}
|
@Udf(description = "Converts a string representation of a date in the given format"
+ " into the number of days since 1970-01-01 00:00:00 UTC/GMT.")
public int stringToDate(
@UdfParameter(
description = "The string representation of a date.") final String formattedDate,
@UdfParameter(
description = "The format pattern should be in the format expected by"
+ " java.time.format.DateTimeFormatter.") final String formatPattern) {
// NB: We do not perform a null here preferring to throw an exception as
// there is no sentinel value for a "null" Date.
try {
final DateTimeFormatter formatter = formatters.get(formatPattern);
return ((int)LocalDate.parse(formattedDate, formatter).toEpochDay());
} catch (final ExecutionException | RuntimeException e) {
throw new KsqlFunctionException("Failed to parse date '" + formattedDate
+ "' with formatter '" + formatPattern
+ "': " + e.getMessage(), e);
}
}
|
@Test
public void shouldThrowOnNullDateFormat() {
final Exception e = assertThrows(
KsqlFunctionException.class,
() -> udf.stringToDate("2021-12-01", null)
);
// Then:
assertThat(e.getMessage(), containsString("Failed to parse date '2021-12-01' with formatter 'null'"));
}
|
@Override
public Image call() throws LayerPropertyNotFoundException {
try (ProgressEventDispatcher ignored =
progressEventDispatcherFactory.create("building image format", 1);
TimerEventDispatcher ignored2 =
new TimerEventDispatcher(buildContext.getEventHandlers(), DESCRIPTION)) {
// Constructs the image.
Image.Builder imageBuilder = Image.builder(buildContext.getTargetFormat());
// Base image layers
baseImageLayers.forEach(imageBuilder::addLayer);
// Passthrough config and count non-empty history entries
int nonEmptyLayerCount = 0;
for (HistoryEntry historyObject : baseImage.getHistory()) {
imageBuilder.addHistory(historyObject);
if (!historyObject.hasCorrespondingLayer()) {
nonEmptyLayerCount++;
}
}
imageBuilder
.setArchitecture(baseImage.getArchitecture())
.setOs(baseImage.getOs())
.addEnvironment(baseImage.getEnvironment())
.addLabels(baseImage.getLabels())
.setHealthCheck(baseImage.getHealthCheck())
.addExposedPorts(baseImage.getExposedPorts())
.addVolumes(baseImage.getVolumes())
.setUser(baseImage.getUser())
.setWorkingDirectory(baseImage.getWorkingDirectory());
ContainerConfiguration containerConfiguration = buildContext.getContainerConfiguration();
// Add history elements for non-empty layers that don't have one yet
Instant layerCreationTime = containerConfiguration.getCreationTime();
for (int count = 0; count < baseImageLayers.size() - nonEmptyLayerCount; count++) {
imageBuilder.addHistory(
HistoryEntry.builder()
.setCreationTimestamp(layerCreationTime)
.setComment("auto-generated by Jib")
.build());
}
// Add built layers/configuration
for (PreparedLayer applicationLayer : applicationLayers) {
imageBuilder
.addLayer(applicationLayer)
.addHistory(
HistoryEntry.builder()
.setCreationTimestamp(layerCreationTime)
.setAuthor("Jib")
.setCreatedBy(buildContext.getToolName() + ":" + buildContext.getToolVersion())
.setComment(applicationLayer.getName())
.build());
}
imageBuilder
.addEnvironment(containerConfiguration.getEnvironmentMap())
.setCreated(containerConfiguration.getCreationTime())
.setEntrypoint(computeEntrypoint(baseImage, containerConfiguration))
.setProgramArguments(computeProgramArguments(baseImage, containerConfiguration))
.addExposedPorts(containerConfiguration.getExposedPorts())
.addVolumes(containerConfiguration.getVolumes())
.addLabels(containerConfiguration.getLabels());
if (containerConfiguration.getUser() != null) {
imageBuilder.setUser(containerConfiguration.getUser());
}
if (containerConfiguration.getWorkingDirectory() != null) {
imageBuilder.setWorkingDirectory(containerConfiguration.getWorkingDirectory().toString());
}
// Gets the container configuration content descriptor.
return imageBuilder.build();
}
}
|
@Test
public void test_inheritedUser() {
Mockito.when(mockContainerConfiguration.getUser()).thenReturn(null);
Image image =
new BuildImageStep(
mockBuildContext,
mockProgressEventDispatcherFactory,
baseImage,
baseImageLayers,
applicationLayers)
.call();
Assert.assertEquals("base:user", image.getUser());
}
|
static ArgumentParser argParser() {
ArgumentParser parser = ArgumentParsers
.newArgumentParser("producer-performance")
.defaultHelp(true)
.description("This tool is used to verify the producer performance. To enable transactions, " +
"you can specify a transaction id or set a transaction duration using --transaction-duration-ms. " +
"There are three ways to specify the transaction id: set transaction.id=<id> via --producer-props, " +
"set transaction.id=<id> in the config file via --producer.config, or use --transaction-id <id>.");
MutuallyExclusiveGroup payloadOptions = parser
.addMutuallyExclusiveGroup()
.required(true)
.description("either --record-size or --payload-file must be specified but not both.");
parser.addArgument("--topic")
.action(store())
.required(true)
.type(String.class)
.metavar("TOPIC")
.help("produce messages to this topic");
parser.addArgument("--num-records")
.action(store())
.required(true)
.type(Long.class)
.metavar("NUM-RECORDS")
.dest("numRecords")
.help("number of messages to produce");
payloadOptions.addArgument("--record-size")
.action(store())
.required(false)
.type(Integer.class)
.metavar("RECORD-SIZE")
.dest("recordSize")
.help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file " +
"or --payload-monotonic.");
payloadOptions.addArgument("--payload-file")
.action(store())
.required(false)
.type(String.class)
.metavar("PAYLOAD-FILE")
.dest("payloadFile")
.help("file to read the message payloads from. This works only for UTF-8 encoded text files. " +
"Payloads will be read from this file and a payload will be randomly selected when sending messages. " +
"Note that you must provide exactly one of --record-size or --payload-file or --payload-monotonic.");
payloadOptions.addArgument("--payload-monotonic")
.action(storeTrue())
.type(Boolean.class)
.metavar("PAYLOAD-MONOTONIC")
.dest("payloadMonotonic")
.help("payload is monotonically increasing integer. Note that you must provide exactly one of --record-size " +
"or --payload-file or --payload-monotonic.");
parser.addArgument("--payload-delimiter")
.action(store())
.required(false)
.type(String.class)
.metavar("PAYLOAD-DELIMITER")
.dest("payloadDelimiter")
.setDefault("\\n")
.help("provides delimiter to be used when --payload-file is provided. " +
"Defaults to new line. " +
"Note that this parameter will be ignored if --payload-file is not provided.");
parser.addArgument("--throughput")
.action(store())
.required(true)
.type(Double.class)
.metavar("THROUGHPUT")
.help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling.");
parser.addArgument("--producer-props")
.nargs("+")
.required(false)
.metavar("PROP-NAME=PROP-VALUE")
.type(String.class)
.dest("producerConfig")
.help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " +
"These configs take precedence over those passed via --producer.config.");
parser.addArgument("--producer.config")
.action(store())
.required(false)
.type(String.class)
.metavar("CONFIG-FILE")
.dest("producerConfigFile")
.help("producer config properties file.");
parser.addArgument("--print-metrics")
.action(storeTrue())
.type(Boolean.class)
.metavar("PRINT-METRICS")
.dest("printMetrics")
.help("print out metrics at the end of the test.");
parser.addArgument("--transactional-id")
.action(store())
.required(false)
.type(String.class)
.metavar("TRANSACTIONAL-ID")
.dest("transactionalId")
.help("The transactional id to use. This config takes precedence over the transactional.id " +
"specified via --producer.config or --producer-props. Note that if the transactional id " +
"is not specified while --transaction-duration-ms is provided, the default value for the " +
"transactional id will be performance-producer- followed by a random uuid.");
parser.addArgument("--transaction-duration-ms")
.action(store())
.required(false)
.type(Long.class)
.metavar("TRANSACTION-DURATION")
.dest("transactionDurationMs")
.help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. " +
"The value should be greater than 0. If the transactional id is specified via --producer-props, " +
"--producer.config, or --transactional-id but --transaction-duration-ms is not specified, " +
"the default value will be 3000.");
return parser;
}
|
@Test
public void testEnableTransactionByTransactionId() throws IOException, ArgumentParserException {
File producerConfigFile = createTempFile("transactional.id=foobar");
ArgumentParser parser = ProducerPerformance.argParser();
String[] args = new String[]{
"--topic", "Hello-Kafka",
"--num-records", "5",
"--throughput", "100",
"--record-size", "100",
"--producer.config", producerConfigFile.getAbsolutePath(),
"--producer-props", "bootstrap.servers=localhost:9000"};
ProducerPerformance.ConfigPostProcessor configs = new ProducerPerformance.ConfigPostProcessor(parser, args);
assertTrue(configs.transactionsEnabled);
assertEquals(ProducerPerformance.DEFAULT_TRANSACTION_DURATION_MS, configs.transactionDurationMs);
assertEquals("foobar", configs.producerProps.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG));
args = new String[]{
"--topic", "Hello-Kafka",
"--num-records", "5",
"--throughput", "100",
"--record-size", "100",
"--producer.config", producerConfigFile.getAbsolutePath(),
"--producer-props", "bootstrap.servers=localhost:9000", "transactional.id=hello_kafka"};
configs = new ProducerPerformance.ConfigPostProcessor(parser, args);
assertTrue(configs.transactionsEnabled);
assertEquals(ProducerPerformance.DEFAULT_TRANSACTION_DURATION_MS, configs.transactionDurationMs);
assertEquals("hello_kafka", configs.producerProps.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG));
args = new String[]{
"--topic", "Hello-Kafka",
"--num-records", "5",
"--throughput", "100",
"--record-size", "100",
"--transactional-id", "kafka_hello",
"--producer.config", producerConfigFile.getAbsolutePath(),
"--producer-props", "bootstrap.servers=localhost:9000", "transactional.id=hello_kafka"};
configs = new ProducerPerformance.ConfigPostProcessor(parser, args);
assertTrue(configs.transactionsEnabled);
assertEquals(ProducerPerformance.DEFAULT_TRANSACTION_DURATION_MS, configs.transactionDurationMs);
assertEquals("kafka_hello", configs.producerProps.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG));
Utils.delete(producerConfigFile);
}
|
@SuppressWarnings("unchecked")
@Override
public Result execute(Query query, Target target) {
Query adjustedQuery = adjustQuery(query);
switch (target.mode()) {
case ALL_NODES:
adjustedQuery = Query.of(adjustedQuery).partitionIdSet(getAllPartitionIds()).build();
return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.ALL_NODES);
case LOCAL_NODE:
adjustedQuery = Query.of(adjustedQuery).partitionIdSet(getLocalPartitionIds()).build();
return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.LOCAL_NODE);
case PARTITION_OWNER:
int solePartition = target.partitions().solePartition();
adjustedQuery = Query.of(adjustedQuery).partitionIdSet(target.partitions()).build();
if (solePartition >= 0) {
return runOnGivenPartition(adjustedQuery, solePartition);
} else {
return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.ALL_NODES);
}
default:
throw new IllegalArgumentException("Illegal target " + target);
}
}
|
@Test
public void runQueryOnAllPartitions() {
Predicate<Object, Object> predicate = Predicates.equal("this", value);
Query query = Query.of().mapName(map.getName()).predicate(predicate).iterationType(KEY).build();
QueryResult result = queryEngine.execute(query, Target.ALL_NODES);
assertEquals(1, result.size());
assertEquals(key, toObject(result.iterator().next().getKey()));
}
|
@Override
public FinishApplicationMasterResponse finishApplicationMaster(
FinishApplicationMasterRequest request)
throws YarnException, IOException {
try {
return this.rmClient.finishApplicationMaster(request);
} catch (ApplicationMasterNotRegisteredException e) {
LOG.warn("Out of sync with RM " + rmId
+ " for " + this.appId + ", hence resyncing.");
// re register with RM
reRegisterApplicationMaster(this.amRegistrationRequest);
return finishApplicationMaster(request);
}
}
|
@Test
public void testConcurrentReregister() throws YarnException, IOException {
// Set RM restart and failover flag
this.mockAMS.setFailoverFlag();
this.mockAMS.setThrowAlreadyRegister();
relayer.finishApplicationMaster(null);
}
|
@ScalarFunction(value = "noisy_empty_approx_set_sfm", deterministic = false)
@Description("an SfmSketch object representing an empty set")
@SqlType(SfmSketchType.NAME)
public static Slice emptyApproxSet(@SqlType(StandardTypes.DOUBLE) double epsilon,
@SqlType(StandardTypes.BIGINT) long numberOfBuckets,
@SqlType(StandardTypes.BIGINT) long precision)
{
SfmSketchAggregationUtils.validateSketchParameters(epsilon, (int) numberOfBuckets, (int) precision);
SfmSketch sketch = SfmSketch.create((int) numberOfBuckets, (int) precision);
sketch.enablePrivacy(epsilon);
return sketch.serialize();
}
|
@Test
public void testEmptyApproxSet()
{
// with no privacy (epsilon = infinity), an empty approx set should return 0 cardinality
assertFunction("cardinality(noisy_empty_approx_set_sfm(infinity()))", BIGINT, 0L);
assertFunction("cardinality(noisy_empty_approx_set_sfm(infinity(), 4096))", BIGINT, 0L);
assertFunction("cardinality(noisy_empty_approx_set_sfm(infinity(), 4096, 24))", BIGINT, 0L);
}
|
private ScalarOperator reduceDatetimeToDateCast(BinaryPredicateOperator operator) {
ScalarOperator castChild = operator.getChild(0).getChild(0);
ConstantOperator child2 = (ConstantOperator) operator.getChild(1);
if (child2.isNull()) {
return operator;
}
LocalDateTime originalDate = child2.getDate().truncatedTo(ChronoUnit.DAYS);
LocalDateTime targetDate;
BinaryType binaryType = operator.getBinaryType();
int offset;
ScalarOperator resultBinaryPredicateOperator;
ConstantOperator newDatetime;
switch (binaryType) {
case GE:
// when the BinaryType is >= , cast date to equivalent datetime type
// E.g. cast(id_datetime as date) >= 2021-12-28
// optimized to id_datetime >= 2021-12-28 00:00:00.0
offset = 0;
targetDate = originalDate.plusDays(offset);
newDatetime = ConstantOperator.createDatetime(targetDate);
resultBinaryPredicateOperator = BinaryPredicateOperator.ge(castChild, newDatetime);
break;
case GT:
// when the BinaryType is > , cast date to equivalent datetime type of next day
// E.g. cast(id_datetime as date) > 2021-12-28
// optimized to id_datetime >= 2021-12-29 00:00:00.0
offset = 1;
targetDate = originalDate.plusDays(offset);
newDatetime = ConstantOperator.createDatetime(targetDate);
resultBinaryPredicateOperator = BinaryPredicateOperator.ge(castChild, newDatetime);
break;
case LE:
// when the BinaryType is <= , cast date to equivalent datetime type of next day
// E.g. cast(id_datetime as date) <= 2021-12-28
// optimized to id_datetime < 2021-12-29 00:00:00.0
offset = 1;
targetDate = originalDate.plusDays(offset);
newDatetime = ConstantOperator.createDatetime(targetDate);
resultBinaryPredicateOperator = BinaryPredicateOperator.lt(castChild, newDatetime);
break;
case LT:
// when the BinaryType is < , cast date to equivalent datetime type
// E.g. cast(id_datetime as date) < 2021-12-28
// optimized to id_datetime < 2021-12-28 00:00:00.0
offset = 0;
targetDate = originalDate.plusDays(offset);
newDatetime = ConstantOperator.createDatetime(targetDate);
resultBinaryPredicateOperator = BinaryPredicateOperator.lt(castChild, newDatetime);
break;
case EQ:
// when the BinaryType is = , replace it with compound operator
// E.g. cast(id_datetime as date) = 2021-12-28
// optimized to id_datetime >= 2021-12-28 and id_datetime < 2021-12-29
ConstantOperator beginDatetime = ConstantOperator.createDatetime(originalDate.plusDays(0));
ConstantOperator endDatetime = ConstantOperator.createDatetime(originalDate.plusDays(1));
resultBinaryPredicateOperator = CompoundPredicateOperator
.and(BinaryPredicateOperator.ge(castChild, beginDatetime),
BinaryPredicateOperator.lt(castChild, endDatetime));
break;
default:
resultBinaryPredicateOperator = operator;
break;
}
return resultBinaryPredicateOperator;
}
|
@Test
public void testReduceDatetimeToDateCast() {
DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
ReduceCastRule reduceCastRule = new ReduceCastRule();
{
CastOperator castOperator =
new CastOperator(Type.DATE, new ColumnRefOperator(0, Type.DATETIME, "id_datetime", false));
ConstantOperator constantOperator =
ConstantOperator.createDate(LocalDate.parse("2021-12-28").atTime(0, 0, 0, 0));
BinaryPredicateOperator beforeOptimize =
new BinaryPredicateOperator(BinaryType.GE, castOperator, constantOperator);
ScalarOperator afterOptimize = reduceCastRule.apply(
beforeOptimize,
null);
Assert.assertTrue(afterOptimize instanceof BinaryPredicateOperator);
Assert.assertEquals(BinaryType.GE,
((BinaryPredicateOperator) afterOptimize).getBinaryType());
Assert.assertTrue(afterOptimize.getChild(0) instanceof ColumnRefOperator);
Assert.assertTrue(afterOptimize.getChild(1) instanceof ConstantOperator);
Assert.assertEquals("2021-12-28 00:00:00",
((ConstantOperator) afterOptimize.getChild(1)).getDatetime().format(formatter));
}
{
CastOperator castOperator =
new CastOperator(Type.DATE, new ColumnRefOperator(0, Type.DATETIME, "id_datetime", false));
ConstantOperator constantOperator =
ConstantOperator.createDate(LocalDate.parse("2021-12-28").atTime(0, 0, 0, 0));
BinaryPredicateOperator beforeOptimize =
new BinaryPredicateOperator(BinaryType.GT, castOperator, constantOperator);
ScalarOperator afterOptimize = reduceCastRule.apply(
beforeOptimize,
null);
Assert.assertTrue(afterOptimize instanceof BinaryPredicateOperator);
Assert.assertEquals(BinaryType.GE,
((BinaryPredicateOperator) afterOptimize).getBinaryType());
Assert.assertTrue(afterOptimize.getChild(0) instanceof ColumnRefOperator);
Assert.assertTrue(afterOptimize.getChild(1) instanceof ConstantOperator);
Assert.assertEquals("2021-12-29 00:00:00",
((ConstantOperator) afterOptimize.getChild(1)).getDatetime().format(formatter));
}
{
CastOperator castOperator =
new CastOperator(Type.DATE, new ColumnRefOperator(0, Type.DATETIME, "id_datetime", false));
ConstantOperator constantOperator =
ConstantOperator.createDate(LocalDate.parse("2021-12-28").atTime(0, 0, 0, 0));
BinaryPredicateOperator beforeOptimize =
new BinaryPredicateOperator(BinaryType.LE, castOperator, constantOperator);
ScalarOperator afterOptimize = reduceCastRule.apply(
beforeOptimize,
null);
Assert.assertTrue(afterOptimize instanceof BinaryPredicateOperator);
Assert.assertEquals(BinaryType.LT,
((BinaryPredicateOperator) afterOptimize).getBinaryType());
Assert.assertTrue(afterOptimize.getChild(0) instanceof ColumnRefOperator);
Assert.assertTrue(afterOptimize.getChild(1) instanceof ConstantOperator);
Assert.assertEquals("2021-12-29 00:00:00",
((ConstantOperator) afterOptimize.getChild(1)).getDatetime().format(formatter));
}
{
CastOperator castOperator =
new CastOperator(Type.DATE, new ColumnRefOperator(0, Type.DATETIME, "id_datetime", false));
ConstantOperator constantOperator =
ConstantOperator.createDate(LocalDate.parse("2021-12-28").atTime(0, 0, 0, 0));
BinaryPredicateOperator beforeOptimize =
new BinaryPredicateOperator(BinaryType.LT, castOperator, constantOperator);
ScalarOperator afterOptimize = reduceCastRule.apply(
beforeOptimize,
null);
Assert.assertTrue(afterOptimize instanceof BinaryPredicateOperator);
Assert.assertEquals(BinaryType.LT,
((BinaryPredicateOperator) afterOptimize).getBinaryType());
Assert.assertTrue(afterOptimize.getChild(0) instanceof ColumnRefOperator);
Assert.assertTrue(afterOptimize.getChild(1) instanceof ConstantOperator);
Assert.assertEquals("2021-12-28 00:00:00",
((ConstantOperator) afterOptimize.getChild(1)).getDatetime().format(formatter));
}
{
CastOperator castOperator =
new CastOperator(Type.DATE, new ColumnRefOperator(0, Type.DATETIME, "id_datetime", false));
ConstantOperator constantOperator =
ConstantOperator.createDate(LocalDate.parse("2021-12-28").atTime(0, 0, 0, 0));
BinaryPredicateOperator beforeOptimize =
new BinaryPredicateOperator(BinaryType.EQ, castOperator, constantOperator);
ScalarOperator afterOptimize = reduceCastRule.apply(
beforeOptimize,
null);
Assert.assertTrue(afterOptimize instanceof CompoundPredicateOperator);
Assert.assertTrue(((CompoundPredicateOperator) afterOptimize).isAnd());
ScalarOperator left = afterOptimize.getChild(0);
ScalarOperator right = afterOptimize.getChild(1);
Assert.assertTrue(left instanceof BinaryPredicateOperator);
Assert.assertTrue(right instanceof BinaryPredicateOperator);
Assert.assertEquals(BinaryType.GE,
((BinaryPredicateOperator) left).getBinaryType());
Assert.assertTrue(left.getChild(0) instanceof ColumnRefOperator);
Assert.assertTrue(left.getChild(1) instanceof ConstantOperator);
Assert.assertEquals("2021-12-28 00:00:00",
((ConstantOperator) left.getChild(1)).getDatetime().format(formatter));
Assert.assertEquals(BinaryType.LT,
((BinaryPredicateOperator) right).getBinaryType());
Assert.assertTrue(right.getChild(0) instanceof ColumnRefOperator);
Assert.assertTrue(right.getChild(1) instanceof ConstantOperator);
Assert.assertEquals("2021-12-29 00:00:00",
((ConstantOperator) right.getChild(1)).getDatetime().format(formatter));
}
{
CastOperator castOperator =
new CastOperator(Type.DATE, new ColumnRefOperator(0, Type.DATETIME, "id_datetime", false));
ConstantOperator constantOperator = ConstantOperator.createNull(Type.DATE);
BinaryPredicateOperator beforeOptimize = BinaryPredicateOperator.lt(castOperator, constantOperator);
ScalarOperator afterOptimize = reduceCastRule.apply(beforeOptimize, null);
Assert.assertSame(beforeOptimize, afterOptimize);
}
}
|
public ConsumerStatsManager getConsumerStatsManager() {
return this.defaultMQPushConsumerImpl.getConsumerStatsManager();
}
|
@Test
public void testGetConsumerStatsManager() {
ConsumerStatsManager actual = popService.getConsumerStatsManager();
assertNotNull(actual);
assertEquals(consumerStatsManager, actual);
}
|
static void start(Keys key, String value, StringBuilder b) {
b.append(key.name()).append(AuditConstants.KEY_VAL_SEPARATOR).append(value);
}
|
@Test
public void testRouterAuditLoggerWithIP() throws Exception {
Configuration conf = new Configuration();
RPC.setProtocolEngine(conf, TestRpcBase.TestRpcService.class, ProtobufRpcEngine2.class);
// Create server side implementation
MyTestRouterRPCServer serverImpl = new MyTestRouterRPCServer();
BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto
.newReflectiveBlockingService(serverImpl);
// start the IPC server
Server server = new RPC.Builder(conf)
.setProtocol(TestRpcBase.TestRpcService.class)
.setInstance(service).setBindAddress("0.0.0.0")
.setPort(0).setNumHandlers(5).setVerbose(true).build();
server.start();
InetSocketAddress address = NetUtils.getConnectAddress(server);
// Make a client connection and test the audit log
TestRpcBase.TestRpcService proxy = null;
try {
proxy = RPC.getProxy(TestRpcBase.TestRpcService.class,
TestRPC.TestProtocol.versionID, address, conf);
// Start the testcase
TestProtos.EmptyRequestProto pingRequest =
TestProtos.EmptyRequestProto.newBuilder().build();
proxy.ping(null, pingRequest);
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
|
@VisibleForTesting
static String formatBonusPercentage(float bonus)
{
final int bonusValue = Math.round(10_000 * bonus);
final float bonusPercent = bonusValue / 100f;
final int bonusPercentInt = (int) bonusPercent;
if (bonusPercent == bonusPercentInt)
{
return String.valueOf(bonusPercentInt);
}
else
{
return String.valueOf(bonusPercent);
}
}
|
@Test
public void testFormatBonusPercentage()
{
assertEquals("33.33", formatBonusPercentage(0.33333333f));
assertEquals("50", formatBonusPercentage(0.5f));
assertEquals("102.5", formatBonusPercentage(1.025f));
assertEquals("105", formatBonusPercentage(1.05f));
assertEquals("110", formatBonusPercentage(1.1f));
assertEquals("115", formatBonusPercentage(1.15f));
assertEquals("120", formatBonusPercentage(1.2f));
assertEquals("150", formatBonusPercentage(1.5f));
assertEquals("250", formatBonusPercentage(2.5f));
assertEquals("300", formatBonusPercentage(3f));
assertEquals("350", formatBonusPercentage(3.5f));
assertEquals("400", formatBonusPercentage(4f));
assertEquals("700", formatBonusPercentage(7f));
}
|
@JsonProperty
public boolean isReportOnStop() {
return reportOnStop;
}
|
@Test
void reportOnStopCanBeTrue() throws Exception {
config = factory.build(new ResourceConfigurationSourceProvider(), "yaml/metrics-report-on-stop.yml");
assertThat(config.isReportOnStop()).isTrue();
}
|
@Override
public String execute(CommandContext commandContext, String[] args) {
StringBuilder buf = new StringBuilder();
String port = null;
boolean detail = false;
if (args.length > 0) {
for (String part : args) {
if ("-l".equals(part)) {
detail = true;
} else {
if (!StringUtils.isNumber(part)) {
return "Illegal port " + part + ", must be integer.";
}
port = part;
}
}
}
if (StringUtils.isEmpty(port)) {
for (ProtocolServer server : dubboProtocol.getServers()) {
if (buf.length() > 0) {
buf.append("\r\n");
}
if (detail) {
buf.append(server.getUrl().getProtocol())
.append("://")
.append(server.getUrl().getAddress());
} else {
buf.append(server.getUrl().getPort());
}
}
} else {
int p = Integer.parseInt(port);
ProtocolServer protocolServer = null;
for (ProtocolServer s : dubboProtocol.getServers()) {
if (p == s.getUrl().getPort()) {
protocolServer = s;
break;
}
}
if (protocolServer != null) {
ExchangeServer server = (ExchangeServer) protocolServer.getRemotingServer();
Collection<ExchangeChannel> channels = server.getExchangeChannels();
for (ExchangeChannel c : channels) {
if (buf.length() > 0) {
buf.append("\r\n");
}
if (detail) {
buf.append(c.getRemoteAddress()).append(" -> ").append(c.getLocalAddress());
} else {
buf.append(c.getRemoteAddress());
}
}
} else {
buf.append("No such port ").append(port);
}
}
return buf.toString();
}
|
@Test
void testNoPort() throws RemotingException {
String result = port.execute(mockCommandContext, new String[] {"-l", "20880"});
assertEquals("No such port 20880", result);
}
|
protected boolean isWrapperClass(Class<?> clazz) {
Constructor<?>[] constructors = clazz.getConstructors();
for (Constructor<?> constructor : constructors) {
if (constructor.getParameterTypes().length == 1 && constructor.getParameterTypes()[0] == type) {
return true;
}
}
return false;
}
|
@Test
void isWrapperClass() {
assertFalse(getExtensionLoader(Demo.class).isWrapperClass(DemoImpl.class));
assertTrue(getExtensionLoader(Demo.class).isWrapperClass(DemoWrapper.class));
assertTrue(getExtensionLoader(Demo.class).isWrapperClass(DemoWrapper2.class));
}
|
public void cleanupBeforeRelaunch(Container container)
throws IOException, InterruptedException {
if (container.getLocalizedResources() != null) {
Map<Path, Path> symLinks = resolveSymLinks(
container.getLocalizedResources(), container.getUser());
for (Map.Entry<Path, Path> symLink : symLinks.entrySet()) {
LOG.debug("{} deleting {}", container.getContainerId(),
symLink.getValue());
deleteAsUser(new DeletionAsUserContext.Builder()
.setUser(container.getUser())
.setSubDir(symLink.getValue())
.build());
}
}
}
|
@Test
public void testCleanupBeforeLaunch() throws Exception {
Container container = mock(Container.class);
java.nio.file.Path linkName = Paths.get("target/linkName");
java.nio.file.Path target = Paths.get("target");
//deletes the link if it already exists because of previous test failures
FileUtils.deleteQuietly(linkName.toFile());
Files.createSymbolicLink(linkName.toAbsolutePath(),
target.toAbsolutePath());
Map<Path, List<String>> localResources = new HashMap<>();
localResources.put(new Path(target.toFile().getAbsolutePath()),
Lists.newArrayList(linkName.toFile().getAbsolutePath()));
when(container.getLocalizedResources())
.thenReturn(localResources);
when(container.getUser()).thenReturn(System.getProperty("user.name"));
containerExecutor.cleanupBeforeRelaunch(container);
Assert.assertTrue(!Files.exists(linkName));
}
|
public List<ZAddressRange<Integer>> zOrderSearchCurveIntegers(List<ZValueRange> ranges)
{
List<ZAddressRange<Long>> addressRanges = zOrderSearchCurve(ranges);
List<ZAddressRange<Integer>> integerAddressRanges = new ArrayList<>();
for (ZAddressRange<Long> addressRange : addressRanges) {
checkArgument(
(addressRange.getMinimumAddress() <= Integer.MAX_VALUE) && (addressRange.getMaximumAddress() <= Integer.MAX_VALUE),
format("The address range [%d, %d] contains addresses greater than integers.", addressRange.getMinimumAddress(), addressRange.getMaximumAddress()));
integerAddressRanges.add(new ZAddressRange<>(addressRange.getMinimumAddress().intValue(), addressRange.getMaximumAddress().intValue()));
}
return integerAddressRanges;
}
|
@Test
public void testZOrderSearchCurveMultipleRanges()
{
List<Integer> bitPositions = ImmutableList.of(3);
ZOrder zOrder = new ZOrder(bitPositions);
List<ZValueRange> ranges = ImmutableList.of(new ZValueRange(ImmutableList.of(Optional.empty(), Optional.of(6)), ImmutableList.of(Optional.of(-7), Optional.empty())));
List<ZAddressRange<Integer>> addresses = zOrder.zOrderSearchCurveIntegers(ranges);
assertEquals(addresses, ImmutableList.of(new ZAddressRange<>(0L, 1L), new ZAddressRange<>(14L, 15L)));
bitPositions = ImmutableList.of(3, 1, 2);
zOrder = new ZOrder(bitPositions);
ranges = ImmutableList.of(
new ZValueRange(ImmutableList.of(Optional.empty(), Optional.of(6)), ImmutableList.of(Optional.of(-7), Optional.empty())),
new ZValueRange(ImmutableList.of(Optional.of(0)), ImmutableList.of(Optional.empty())),
new ZValueRange(ImmutableList.of(Optional.of(1)), ImmutableList.of(Optional.of(1))));
addresses = zOrder.zOrderSearchCurveIntegers(ranges);
assertEquals(addresses, ImmutableList.of(
new ZAddressRange<>(194L, 195L), new ZAddressRange<>(210L, 211L),
new ZAddressRange<>(486L, 487L), new ZAddressRange<>(502L, 503L)));
}
|
@SneakyThrows
@Override
public Integer call() throws Exception {
super.call();
PicocliRunner.call(App.class, "flow", "namespace", "--help");
return 0;
}
|
@Test
void runWithNoParam() {
ByteArrayOutputStream out = new ByteArrayOutputStream();
System.setOut(new PrintStream(out));
try (ApplicationContext ctx = ApplicationContext.builder().deduceEnvironment(false).start()) {
String[] args = {};
Integer call = PicocliRunner.call(FlowNamespaceCommand.class, ctx, args);
assertThat(call, is(0));
assertThat(out.toString(), containsString("Usage: kestra flow namespace"));
}
}
|
public Predicate convert(ScalarOperator operator) {
if (operator == null) {
return null;
}
return operator.accept(this, null);
}
|
@Test
public void testGreaterEq() {
ConstantOperator value = ConstantOperator.createInt(5);
ScalarOperator op = new BinaryPredicateOperator(BinaryType.GE, F0, value);
Predicate result = CONVERTER.convert(op);
Assert.assertTrue(result instanceof LeafPredicate);
LeafPredicate leafPredicate = (LeafPredicate) result;
Assert.assertTrue(leafPredicate.function() instanceof GreaterOrEqual);
Assert.assertEquals(5, leafPredicate.literals().get(0));
}
|
@Override
public Result search(Query query, Execution execution) {
Result mergedResults = execution.search(query);
var targets = getTargets(query.getModel().getSources(), query.properties());
warnIfUnresolvedSearchChains(extractErrors(targets), mergedResults.hits());
var prunedTargets = pruneTargetsWithoutDocumentTypes(query.getModel().getRestrict(), extractSpecs(targets));
var regularTargetHandlers = resolveSearchChains(prunedTargets, execution.searchChainRegistry());
query.errors().addAll(regularTargetHandlers.errors());
Set<Target> targetHandlers = new LinkedHashSet<>(regularTargetHandlers.data());
targetHandlers.addAll(getAdditionalTargets(query, execution, targetSelector));
traceTargets(query, targetHandlers);
if (targetHandlers.isEmpty())
return mergedResults;
else if (targetHandlers.size() > 1)
search(query, execution, targetHandlers, mergedResults);
else if (shouldExecuteTargetLongerThanThread(query, targetHandlers.iterator().next()))
search(query, execution, targetHandlers, mergedResults); // one target, but search in separate thread
else
search(query, execution, first(targetHandlers), mergedResults); // search in this thread
return mergedResults;
}
|
@Test
void require_that_hits_are_not_automatically_filled() {
Result result = federationToSingleAddHitSearcher().search();
assertNotFilled(firstHitInFirstGroup(result));
}
|
public abstract long getFirstDataPageOffset();
|
@Test
public void testConversionSmall() {
long small = 1;
ColumnChunkMetaData md = newMD(small);
assertTrue(md instanceof IntColumnChunkMetaData);
assertEquals(small, md.getFirstDataPageOffset());
}
|
@Override
public boolean onActivity() {
return false;
}
|
@Test
public void testOnActivity() {
assertFalse(strategy.onActivity(), "onActivity() should always return false.");
}
|
@Override public String[] getReferencedObjectDescriptions() {
return new String[] {
BaseMessages.getString( PKG, "BaseStreamStepMeta.ReferencedObject.SubTrans.Description" ) };
}
|
@Test
public void testReferencedObjectHasDescription() {
BaseStreamStepMeta meta = new StuffStreamMeta();
assertEquals( 1, meta.getReferencedObjectDescriptions().length );
assertTrue( meta.getReferencedObjectDescriptions()[ 0 ] != null );
testRoundTrip( meta );
}
|
@Override
public Map<String, Optional<HivePartitionDataInfo>> getPartitionDataInfos() {
Map<String, Optional<HivePartitionDataInfo>> partitionDataInfos = Maps.newHashMap();
List<String> partitionNameToFetch = partitionNames;
if (partitionLimit >= 0 && partitionLimit < partitionNames.size()) {
partitionNameToFetch = partitionNames.subList(partitionNames.size() - partitionLimit, partitionNames.size());
}
List<PartitionInfo> partitions =
GlobalStateMgr.getCurrentState().getMetadataMgr().getRemotePartitions(table, partitionNameToFetch);
for (int i = 0; i < partitionNameToFetch.size(); i++) {
PartitionInfo partitionInfo = partitions.get(i);
HivePartitionDataInfo hivePartitionDataInfo = new HivePartitionDataInfo(partitionInfo.getModifiedTime(), 1);
partitionDataInfos.put(partitionNameToFetch.get(i), Optional.of(hivePartitionDataInfo));
}
return partitionDataInfos;
}
|
@Test
public void testGetPartitionDataInfos(@Mocked MetadataMgr metadataMgr) {
List<PartitionInfo> partitionInfoList = createRemotePartitions(4);
new Expectations() {
{
GlobalStateMgr.getCurrentState().getMetadataMgr();
result = metadataMgr;
minTimes = 0;
metadataMgr.getRemotePartitions((Table) any, (List<String>) any);
result = partitionInfoList;
minTimes = 0;
}
};
String location = "hdfs://path_to_file/lineorder_part";
HiveTable hiveTable = createHiveTable(location);
List<String> partitionNames = Lists.newArrayList(
"date=20240501", "date=20240502", "date=20240503", "date=20240504");
TableUpdateArbitrator.UpdateContext updateContext =
new TableUpdateArbitrator.UpdateContext(hiveTable, -1, partitionNames);
TableUpdateArbitrator arbitrator = TableUpdateArbitrator.create(updateContext);
Assert.assertTrue(arbitrator instanceof DirectoryBasedUpdateArbitrator);
Map<String, Optional<HivePartitionDataInfo>> hivePartitionDataInfo = arbitrator.getPartitionDataInfos();
Assert.assertEquals(4, hivePartitionDataInfo.size());
Assert.assertTrue(hivePartitionDataInfo.containsKey("date=20240501"));
Assert.assertTrue(hivePartitionDataInfo.containsKey("date=20240502"));
Assert.assertTrue(hivePartitionDataInfo.containsKey("date=20240503"));
Assert.assertTrue(hivePartitionDataInfo.containsKey("date=20240504"));
}
|
@Override
public Map<String, List<String>> getDockerCommandWithArguments() {
return super.getDockerCommandWithArguments();
}
|
@Test
public void testSetClientConfigDir() {
dockerRunCommand.setClientConfigDir(CLIENT_CONFIG_PATH);
assertEquals(CLIENT_CONFIG_PATH, StringUtils.join(",",
dockerRunCommand.getDockerCommandWithArguments().get("docker-config")));
}
|
public static List<InetSocketAddress> getMasterRpcAddresses(AlluxioConfiguration conf) {
// First check whether rpc addresses are explicitly configured.
if (conf.isSet(PropertyKey.MASTER_RPC_ADDRESSES)) {
return parseInetSocketAddresses(conf.getList(PropertyKey.MASTER_RPC_ADDRESSES));
}
// Fall back on server-side journal configuration.
int rpcPort = NetworkAddressUtils.getPort(NetworkAddressUtils.ServiceType.MASTER_RPC, conf);
return overridePort(getEmbeddedJournalAddresses(conf, ServiceType.MASTER_RAFT), rpcPort);
}
|
@Test
public void getMasterRpcAddressesFallback() {
AlluxioConfiguration conf =
createConf(ImmutableMap.of(
PropertyKey.MASTER_EMBEDDED_JOURNAL_ADDRESSES, "host1:99,host2:100",
PropertyKey.MASTER_RPC_PORT, 50));
assertEquals(
Arrays.asList(InetSocketAddress.createUnresolved("host1", 50),
InetSocketAddress.createUnresolved("host2", 50)),
ConfigurationUtils.getMasterRpcAddresses(conf));
}
|
@Override
public Map<String, Object> batchInsertOrUpdate(List<ConfigAllInfo> configInfoList, String srcUser, String srcIp,
Map<String, Object> configAdvanceInfo, SameConfigPolicy policy) throws NacosException {
int succCount = 0;
int skipCount = 0;
List<Map<String, String>> failData = null;
List<Map<String, String>> skipData = null;
for (int i = 0; i < configInfoList.size(); i++) {
ConfigAllInfo configInfo = configInfoList.get(i);
try {
ParamUtils.checkParam(configInfo.getDataId(), configInfo.getGroup(), "datumId",
configInfo.getContent());
} catch (NacosException e) {
LogUtil.DEFAULT_LOG.error("data verification failed", e);
throw e;
}
ConfigInfo configInfo2Save = new ConfigInfo(configInfo.getDataId(), configInfo.getGroup(),
configInfo.getTenant(), configInfo.getAppName(), configInfo.getContent());
configInfo2Save.setEncryptedDataKey(
configInfo.getEncryptedDataKey() == null ? StringUtils.EMPTY : configInfo.getEncryptedDataKey());
String type = configInfo.getType();
if (StringUtils.isBlank(type)) {
// simple judgment of file type based on suffix
if (configInfo.getDataId().contains(SPOT)) {
String extName = configInfo.getDataId().substring(configInfo.getDataId().lastIndexOf(SPOT) + 1);
FileTypeEnum fileTypeEnum = FileTypeEnum.getFileTypeEnumByFileExtensionOrFileType(extName);
type = fileTypeEnum.getFileType();
} else {
type = FileTypeEnum.getFileTypeEnumByFileExtensionOrFileType(null).getFileType();
}
}
if (configAdvanceInfo == null) {
configAdvanceInfo = new HashMap<>(16);
}
configAdvanceInfo.put("type", type);
configAdvanceInfo.put("desc", configInfo.getDesc());
boolean success;
try {
ConfigOperateResult configOperateResult = addConfigInfo(srcIp, srcUser, configInfo2Save,
configAdvanceInfo);
success = configOperateResult.isSuccess();
} catch (DataIntegrityViolationException ive) {
success = false;
}
if (success) {
succCount++;
} else {
// uniqueness constraint conflict or add config info fail.
if (SameConfigPolicy.ABORT.equals(policy)) {
failData = new ArrayList<>();
skipData = new ArrayList<>();
Map<String, String> faileditem = new HashMap<>(2);
faileditem.put("dataId", configInfo2Save.getDataId());
faileditem.put("group", configInfo2Save.getGroup());
failData.add(faileditem);
for (int j = (i + 1); j < configInfoList.size(); j++) {
ConfigInfo skipConfigInfo = configInfoList.get(j);
Map<String, String> skipitem = new HashMap<>(2);
skipitem.put("dataId", skipConfigInfo.getDataId());
skipitem.put("group", skipConfigInfo.getGroup());
skipData.add(skipitem);
skipCount++;
}
break;
} else if (SameConfigPolicy.SKIP.equals(policy)) {
skipCount++;
if (skipData == null) {
skipData = new ArrayList<>();
}
Map<String, String> skipitem = new HashMap<>(2);
skipitem.put("dataId", configInfo2Save.getDataId());
skipitem.put("group", configInfo2Save.getGroup());
skipData.add(skipitem);
} else if (SameConfigPolicy.OVERWRITE.equals(policy)) {
succCount++;
updateConfigInfo(configInfo2Save, srcIp, srcUser, configAdvanceInfo);
}
}
}
Map<String, Object> result = new HashMap<>(4);
result.put("succCount", succCount);
result.put("skipCount", skipCount);
if (failData != null && !failData.isEmpty()) {
result.put("failData", failData);
}
if (skipData != null && !skipData.isEmpty()) {
result.put("skipData", skipData);
}
return result;
}
|
@Test
void testBatchInsertOrUpdateSkip() throws NacosException {
List<ConfigAllInfo> configInfoList = new ArrayList<>();
//insert direct
configInfoList.add(createMockConfigAllInfo(0));
//exist config and overwrite
configInfoList.add(createMockConfigAllInfo(1));
//insert direct
configInfoList.add(createMockConfigAllInfo(2));
String srcUser = "srcUser1324";
String srcIp = "srcIp1243";
Map<String, Object> configAdvanceInfo = new HashMap<>();
//mock transactionTemplate and replace
TransactionTemplate transactionTemplateCurrent = Mockito.mock(TransactionTemplate.class);
ReflectionTestUtils.setField(externalConfigInfoPersistService, "tjt", transactionTemplateCurrent);
//mock add config 1 success,config 2 fail and skip,config 3 success
Mockito.when(transactionTemplateCurrent.execute(any()))
.thenReturn(new ConfigOperateResult(true), new ConfigOperateResult(false), new ConfigOperateResult(true));
Map<String, Object> stringObjectMap = externalConfigInfoPersistService.batchInsertOrUpdate(configInfoList, srcUser, srcIp,
configAdvanceInfo, SameConfigPolicy.SKIP);
assertEquals(2, stringObjectMap.get("succCount"));
assertEquals(1, stringObjectMap.get("skipCount"));
assertEquals(configInfoList.get(1).getDataId(), ((List<Map<String, String>>) stringObjectMap.get("skipData")).get(0).get("dataId"));
}
|
@Override
public List<ProductCategoryDO> getCategoryList(ProductCategoryListReqVO listReqVO) {
return productCategoryMapper.selectList(listReqVO);
}
|
@Test
public void testGetCategoryList() {
// mock 数据
ProductCategoryDO dbCategory = randomPojo(ProductCategoryDO.class, o -> { // 等会查询到
o.setName("奥特曼");
o.setStatus(CommonStatusEnum.ENABLE.getStatus());
o.setParentId(PARENT_ID_NULL);
});
productCategoryMapper.insert(dbCategory);
// 测试 name 不匹配
productCategoryMapper.insert(cloneIgnoreId(dbCategory, o -> o.setName("奥特块")));
// 测试 status 不匹配
productCategoryMapper.insert(cloneIgnoreId(dbCategory, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())));
// 测试 parentId 不匹配
productCategoryMapper.insert(cloneIgnoreId(dbCategory, o -> o.setParentId(3333L)));
// 准备参数
ProductCategoryListReqVO reqVO = new ProductCategoryListReqVO();
reqVO.setName("特曼");
reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus());
reqVO.setParentId(PARENT_ID_NULL);
// 调用
List<ProductCategoryDO> list = productCategoryService.getCategoryList(reqVO);
List<ProductCategoryDO> all = productCategoryService.getCategoryList(new ProductCategoryListReqVO());
// 断言
assertEquals(1, list.size());
assertEquals(4, all.size());
assertPojoEquals(dbCategory, list.get(0));
}
|
public static <T> TreeSet<Point<T>> subset(TimeWindow subsetWindow, NavigableSet<Point<T>> points) {
checkNotNull(subsetWindow);
checkNotNull(points);
//if the input collection is empty the output collection will be empty to
if (points.isEmpty()) {
return newTreeSet();
}
Point<T> midPoint = Point.<T>builder()
.time(subsetWindow.instantWithin(.5))
.latLong(0.0, 0.0)
.build();
/*
* Find exactly one point in the actual Track, ideally this point will be in the middle of
* the time window
*/
Point<T> aPointInTrack = points.floor(midPoint);
if (aPointInTrack == null) {
aPointInTrack = points.ceiling(midPoint);
}
TreeSet<Point<T>> outputSubset = newTreeSet();
//given a starting point....go up until you hit startTime.
NavigableSet<Point<T>> headset = points.headSet(aPointInTrack, true);
Iterator<Point<T>> iter = headset.descendingIterator();
while (iter.hasNext()) {
Point<T> pt = iter.next();
if (subsetWindow.contains(pt.time())) {
outputSubset.add(pt);
}
if (pt.time().isBefore(subsetWindow.start())) {
break;
}
}
//given a starting point....go down until you hit endTime.
NavigableSet<Point<T>> tailSet = points.tailSet(aPointInTrack, true);
iter = tailSet.iterator();
while (iter.hasNext()) {
Point<T> pt = iter.next();
if (subsetWindow.contains(pt.time())) {
outputSubset.add(pt);
}
if (pt.time().isAfter(subsetWindow.end())) {
break;
}
}
return outputSubset;
}
|
@Test
public void subset_reflectsEndTime() {
Track<NopHit> t1 = createTrackFromFile(getResourceFile("Track1.txt"));
//this is the time of 21st point in the track
Instant endTime = parseNopTime("07/08/2017", "14:10:45.534");
TimeWindow extractionWindow = TimeWindow.of(EPOCH, endTime);
TreeSet<Point<NopHit>> subset = subset(extractionWindow, t1.points());
assertThat(subset, hasSize(21));
assertThat(subset.last().time(), is(endTime));
}
|
@Override
public MenuButton deserializeResponse(String answer) throws TelegramApiRequestException {
return deserializeResponse(answer, MenuButton.class);
}
|
@Test
public void testGetChatMenuButtonDeserializeValidResponse() {
String responseText = "{\"ok\":true,\"result\":{\"type\": \"default\"}}";
GetChatMenuButton getChatMenuButton = GetChatMenuButton
.builder()
.chatId("12345")
.build();
try {
MenuButton result = getChatMenuButton.deserializeResponse(responseText);
assertNotNull(result);
assertTrue(result instanceof MenuButtonDefault);
} catch (TelegramApiRequestException e) {
fail(e.getMessage());
}
}
|
@Override
public Page<RoleInfo> getRoles(int pageNo, int pageSize) {
AuthPaginationHelper<RoleInfo> helper = createPaginationHelper();
String sqlCountRows = "SELECT count(*) FROM (SELECT DISTINCT role FROM roles) roles WHERE ";
String sqlFetchRows = "SELECT role,username FROM roles WHERE ";
String where = " 1=1 ";
Page<RoleInfo> pageInfo = helper.fetchPage(sqlCountRows + where, sqlFetchRows + where,
new ArrayList<String>().toArray(), pageNo, pageSize, ROLE_INFO_ROW_MAPPER);
if (pageInfo == null) {
pageInfo = new Page<>();
pageInfo.setTotalCount(0);
pageInfo.setPageItems(new ArrayList<>());
}
return pageInfo;
}
|
@Test
void testGetRoles() {
Page<RoleInfo> roles = embeddedRolePersistService.getRoles(1, 10);
assertNotNull(roles);
}
|
public String resolve(String ensName) {
if (Strings.isBlank(ensName) || (ensName.trim().length() == 1 && ensName.contains("."))) {
return null;
}
try {
if (isValidEnsName(ensName, addressLength)) {
OffchainResolverContract resolver = obtainOffchainResolver(ensName);
boolean supportWildcard =
resolver.supportsInterface(EnsUtils.ENSIP_10_INTERFACE_ID).send();
byte[] nameHash = NameHash.nameHashAsBytes(ensName);
String resolvedName;
if (supportWildcard) {
String dnsEncoded = NameHash.dnsEncode(ensName);
String addrFunction = resolver.addr(nameHash).encodeFunctionCall();
String lookupDataHex =
resolver.resolve(
Numeric.hexStringToByteArray(dnsEncoded),
Numeric.hexStringToByteArray(addrFunction))
.send();
resolvedName = resolveOffchain(lookupDataHex, resolver, LOOKUP_LIMIT);
} else {
try {
resolvedName = resolver.addr(nameHash).send();
} catch (Exception e) {
throw new RuntimeException("Unable to execute Ethereum request: ", e);
}
}
if (!WalletUtils.isValidAddress(resolvedName)) {
throw new EnsResolutionException(
"Unable to resolve address for name: " + ensName);
} else {
return resolvedName;
}
} else {
return ensName;
}
} catch (Exception e) {
throw new EnsResolutionException(e);
}
}
|
@Test
public void testResolve() throws Exception {
configureSyncing(false);
configureLatestBlock(System.currentTimeMillis() / 1000); // block timestamp is in seconds
NetVersion netVersion = new NetVersion();
netVersion.setResult(Long.toString(ChainIdLong.MAINNET));
String resolverAddress =
"0x0000000000000000000000004c641fb9bad9b60ef180c31f56051ce826d21a9a";
String contractAddress =
"0x00000000000000000000000019e03255f667bdfd50a32722df860b1eeaf4d635";
EthCall resolverAddressResponse = new EthCall();
resolverAddressResponse.setResult(resolverAddress);
EthCall contractAddressResponse = new EthCall();
contractAddressResponse.setResult(contractAddress);
when(web3jService.send(any(Request.class), eq(NetVersion.class))).thenReturn(netVersion);
when(web3jService.send(any(Request.class), eq(EthCall.class)))
.thenReturn(resolverAddressResponse);
when(web3jService.send(any(Request.class), eq(EthCall.class)))
.thenReturn(contractAddressResponse);
assertEquals(
ensResolver.resolve("web3j.eth"), ("0x19e03255f667bdfd50a32722df860b1eeaf4d635"));
}
|
public static Sensor droppedRecordsSensor(final String threadId,
final String taskId,
final StreamsMetricsImpl streamsMetrics) {
return invocationRateAndTotalSensor(
threadId,
taskId,
DROPPED_RECORDS,
DROPPED_RECORDS_RATE_DESCRIPTION,
DROPPED_RECORDS_TOTAL_DESCRIPTION,
RecordingLevel.INFO,
streamsMetrics
);
}
|
@Test
public void shouldGetDroppedRecordsSensor() {
final String operation = "dropped-records";
final String totalDescription = "The total number of dropped records";
final String rateDescription = "The average number of dropped records per second";
when(streamsMetrics.taskLevelSensor(THREAD_ID, TASK_ID, operation, RecordingLevel.INFO)).thenReturn(expectedSensor);
when(streamsMetrics.taskLevelTagMap(THREAD_ID, TASK_ID)).thenReturn(tagMap);
try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) {
final Sensor sensor = TaskMetrics.droppedRecordsSensor(THREAD_ID, TASK_ID, streamsMetrics);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addInvocationRateToSensor(
expectedSensor,
TASK_LEVEL_GROUP,
tagMap,
operation,
rateDescription
)
);
streamsMetricsStaticMock.verify(
() -> StreamsMetricsImpl.addSumMetricToSensor(
expectedSensor,
TASK_LEVEL_GROUP,
tagMap,
operation,
true,
totalDescription
)
);
assertThat(sensor, is(expectedSensor));
}
}
|
public synchronized long run(JobConfig jobConfig)
throws JobDoesNotExistException, ResourceExhaustedException {
long jobId = getNewJobId();
run(jobConfig, jobId);
return jobId;
}
|
@Test
public void flowControl() throws Exception {
try (MockedStatic<PlanCoordinator> mockStaticPlanCoordinator = mockPlanCoordinator()) {
TestPlanConfig jobConfig = new TestPlanConfig("/test");
for (long i = 0; i < TEST_JOB_MASTER_JOB_CAPACITY; i++) {
mJobMaster.run(jobConfig);
}
try {
mJobMaster.run(jobConfig);
Assert.fail("should not be able to run more jobs than job master capacity");
} catch (ResourceExhaustedException e) {
Assert.assertEquals(ExceptionMessage.JOB_MASTER_FULL_CAPACITY
.getMessage(Configuration.get(PropertyKey.JOB_MASTER_JOB_CAPACITY)),
e.getMessage());
}
}
}
|
@Override
public void publishLong(MetricDescriptor descriptor, long value) {
publishNumber(descriptor, value, LONG);
}
|
@Test
public void when_double_rendering_values_are_reported() {
MetricDescriptor descriptor1 = newDescriptor()
.withMetric("c")
.withTag("tag1", "a")
.withTag("tag2", "b");
jmxPublisher.publishLong(descriptor1, 1L);
MetricDescriptor descriptor2 = newDescriptor()
.withMetric("c")
.withTag("tag1", "a")
.withTag("tag2", "b");
AssertionError assertionError = assertThrows(AssertionError.class, () -> jmxPublisher.publishLong(descriptor2, 2L));
assertTrue(assertionError.getMessage().contains("[metric=c,tag1=a,tag2=b,excludedTargets={}]"));
assertTrue(assertionError.getMessage().contains("Present value: 1, new value: 2"));
}
|
@Override
public HttpServletRequest readRequest(HttpApiV2ProxyRequest request, SecurityContext securityContext, Context lambdaContext, ContainerConfig config) throws InvalidRequestEventException {
if (request.getRequestContext() == null || request.getRequestContext().getHttp().getMethod() == null || request.getRequestContext().getHttp().getMethod().equals("")) {
throw new InvalidRequestEventException(INVALID_REQUEST_ERROR);
}
// clean out the request path based on the container config
request.setRawPath(stripBasePath(request.getRawPath(), config));
AwsHttpApiV2ProxyHttpServletRequest servletRequest = new AwsHttpApiV2ProxyHttpServletRequest(request, lambdaContext, securityContext, config);
servletRequest.setAttribute(HTTP_API_CONTEXT_PROPERTY, request.getRequestContext());
servletRequest.setAttribute(HTTP_API_STAGE_VARS_PROPERTY, request.getStageVariables());
servletRequest.setAttribute(HTTP_API_EVENT_PROPERTY, request);
servletRequest.setAttribute(LAMBDA_CONTEXT_PROPERTY, lambdaContext);
servletRequest.setAttribute(JAX_SECURITY_CONTEXT_PROPERTY, securityContext);
return servletRequest;
}
|
@Test
void baseRequest_read_populatesSuccessfully() {
HttpApiV2ProxyRequest req = new AwsProxyRequestBuilder("/hello", "GET")
.referer("localhost")
.userAgent("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.61 Safari/537.36")
.queryString("param1", "value1")
.header("custom", "value")
.cookie("cookey", "cooval")
.apiId("test").toHttpApiV2Request();
AwsHttpApiV2HttpServletRequestReader reader = new AwsHttpApiV2HttpServletRequestReader();
try {
HttpServletRequest servletRequest = reader.readRequest(req, null, null, LambdaContainerHandler.getContainerConfig());
assertEquals("/hello", servletRequest.getPathInfo());
assertEquals("value1", servletRequest.getParameter("param1"));
assertEquals("value", servletRequest.getHeader("CUSTOM"));
assertEquals("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.5005.61 Safari/537.36", servletRequest.getHeader(HttpHeaders.USER_AGENT));
assertNotNull(servletRequest.getCookies());
assertEquals(1, servletRequest.getCookies().length);
assertEquals("cookey", servletRequest.getCookies()[0].getName());
assertEquals("cooval", servletRequest.getCookies()[0].getValue());
assertNotNull(servletRequest.getAttribute(AwsHttpApiV2HttpServletRequestReader.HTTP_API_CONTEXT_PROPERTY));
assertEquals("test",
((HttpApiV2ProxyRequestContext)servletRequest.getAttribute(AwsHttpApiV2HttpServletRequestReader.HTTP_API_CONTEXT_PROPERTY)).getApiId());
} catch (InvalidRequestEventException e) {
e.printStackTrace();
fail("Could not read request");
}
}
|
public final void moveToEnd(E element) {
if (element.prev() == INVALID_INDEX || element.next() == INVALID_INDEX) {
throw new RuntimeException("Element " + element + " is not in the collection.");
}
Element prevElement = indexToElement(head, elements, element.prev());
Element nextElement = indexToElement(head, elements, element.next());
int slot = prevElement.next();
prevElement.setNext(element.next());
nextElement.setPrev(element.prev());
addToListTail(head, elements, slot);
}
|
@Test
public void testMoveToEnd() {
ImplicitLinkedHashCollection<TestElement> coll = new ImplicitLinkedHashCollection<>();
TestElement e1 = new TestElement(1, 1);
TestElement e2 = new TestElement(2, 2);
TestElement e3 = new TestElement(3, 3);
assertTrue(coll.add(e1));
assertTrue(coll.add(e2));
assertTrue(coll.add(e3));
coll.moveToEnd(e1);
expectTraversal(coll.iterator(), 2, 3, 1);
assertThrows(RuntimeException.class, () -> coll.moveToEnd(new TestElement(4, 4)));
}
|
protected GelfMessage toGELFMessage(final Message message) {
final DateTime timestamp;
final Object fieldTimeStamp = message.getField(Message.FIELD_TIMESTAMP);
if (fieldTimeStamp instanceof DateTime) {
timestamp = (DateTime) fieldTimeStamp;
} else {
timestamp = Tools.nowUTC();
}
final GelfMessageLevel messageLevel = extractLevel(message.getField(Message.FIELD_LEVEL));
final String fullMessage = (String) message.getField(Message.FIELD_FULL_MESSAGE);
final String forwarder = GelfOutput.class.getCanonicalName();
final GelfMessageBuilder builder = new GelfMessageBuilder(message.getMessage(), message.getSource())
.timestamp(timestamp.getMillis() / 1000.0d)
.additionalField("_forwarder", forwarder)
.additionalFields(message.getFields());
if (messageLevel != null) {
builder.level(messageLevel);
}
if (fullMessage != null) {
builder.fullMessage(fullMessage);
}
return builder.build();
}
|
@Test
public void testToGELFMessageWithInvalidNumericLevel() throws Exception {
final GelfTransport transport = mock(GelfTransport.class);
final GelfOutput gelfOutput = new GelfOutput(transport);
final DateTime now = DateTime.now(DateTimeZone.UTC);
final Message message = messageFactory.createMessage("Test", "Source", now);
message.addField("level", -1L);
final GelfMessage gelfMessage = gelfOutput.toGELFMessage(message);
assertEquals(GelfMessageLevel.ALERT, gelfMessage.getLevel());
}
|
public void processVerstrekkingAanAfnemer(VerstrekkingAanAfnemer verstrekkingAanAfnemer){
if (logger.isDebugEnabled())
logger.debug("Processing verstrekkingAanAfnemer: {}", marshallElement(verstrekkingAanAfnemer));
Afnemersbericht afnemersbericht = afnemersberichtRepository.findByOnzeReferentie(verstrekkingAanAfnemer.getReferentieId());
if(mismatch(verstrekkingAanAfnemer, afnemersbericht)){
digidXClient.remoteLogBericht(Log.NO_RELATION_TO_SENT_MESSAGE, verstrekkingAanAfnemer, afnemersbericht);
return;
}
switch (verstrekkingAanAfnemer.getGebeurtenissoort().getNaam()) {
case "Null" -> {
logger.info("Start processing Null message");
dglResponseService.processNullMessage(verstrekkingAanAfnemer.getGebeurtenisinhoud().getNull(), afnemersbericht);
digidXClient.remoteLogWithoutRelatingToAccount(Log.MESSAGE_PROCESSED, "Null");
}
case "Ag01" -> {
logger.info("Start processing Ag01 message");
dglResponseService.processAg01(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAg01(), afnemersbericht);
digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht);
}
case "Ag31" -> {
logger.info("Start processing Ag31 message");
dglResponseService.processAg31(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAg31(), afnemersbericht);
digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht);
}
case "Af01" -> {
logger.info("Start processing Af01 message");
dglResponseService.processAf01(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAf01(), afnemersbericht);
digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht);
}
case "Af11" -> {
logger.info("Start processing Af11 message");
dglResponseService.processAf11(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAf11(), afnemersbericht);
digidXClient.remoteLogWithoutRelatingToAccount(Log.MESSAGE_PROCESSED, "Af11");
}
case "Gv01" -> {
logger.info("Start processing Gv01 message");
Gv01 gv01 = verstrekkingAanAfnemer.getGebeurtenisinhoud().getGv01();
dglResponseService.processGv01(gv01);
String bsn = CategorieUtil.findBsnOudeWaarde(gv01.getCategorie());
if (bsn == null) {
bsn = CategorieUtil.findBsn(gv01.getCategorie());
}
digidXClient.remoteLogSpontaneVerstrekking(Log.MESSAGE_PROCESSED, "Gv01", gv01.getANummer(), bsn);
}
case "Ng01" -> {
logger.info("Start processing Ng01 message");
Ng01 ng01 = verstrekkingAanAfnemer.getGebeurtenisinhoud().getNg01();
dglResponseService.processNg01(ng01);
digidXClient.remoteLogSpontaneVerstrekking(Log.MESSAGE_PROCESSED, "Ng01", CategorieUtil.findANummer(ng01.getCategorie()), "");
}
case "Wa11" -> {
logger.info("Start processing Wa11 message");
dglResponseService.processWa11(verstrekkingAanAfnemer.getGebeurtenisinhoud().getWa11());
}
}
}
|
@Test
public void testProcessAf11(){
String testANummer = "SSSSSSSSSS";
Af11 testAf11 = TestDglMessagesUtil.createTestAf11(testANummer);
VerstrekkingInhoudType inhoudType = new VerstrekkingInhoudType();
inhoudType.setAf11(testAf11);
GeversioneerdType type = new GeversioneerdType();
type.setNaam("Af11");
when(verstrekkingAanAfnemer.getReferentieId()).thenReturn("referentieId");
when(afnemersberichtRepository.findByOnzeReferentie("referentieId")).thenReturn(afnemersbericht);
when(verstrekkingAanAfnemer.getGebeurtenissoort()).thenReturn(type);
when(verstrekkingAanAfnemer.getGebeurtenisinhoud()).thenReturn(inhoudType);
when(afnemersbericht.getANummer()).thenReturn(testANummer);
classUnderTest.processVerstrekkingAanAfnemer(verstrekkingAanAfnemer);
verify(dglResponseService, times(1)).processAf11(testAf11, afnemersbericht);
}
|
@Override
public boolean accept(RequestedField field) {
return Message.FIELD_GL2_SOURCE_NODE.equals(field.name()) && acceptsDecorator(field.decorator());
}
|
@Test
void accept() {
Assertions.assertThat(decorator.accept(RequestedField.parse(Message.FIELD_GL2_SOURCE_NODE))).isTrue();
Assertions.assertThat(decorator.accept(RequestedField.parse(Message.FIELD_STREAMS))).isFalse();
}
|
@Override
public int hashCode() {
return Objects.hash(change, changedIssues);
}
|
@Test
public void hashcode_is_based_on_change_and_issues() {
AnalysisChange analysisChange = new AnalysisChange(new Random().nextLong());
ChangedIssue changedIssue = IssuesChangesNotificationBuilderTesting.newChangedIssue("doo", IssuesChangesNotificationBuilderTesting.newProject("prj"),
newRandomNotAHotspotRule("rul"));
ChangesOnMyIssuesNotification underTest = new ChangesOnMyIssuesNotification(analysisChange, ImmutableSet.of(changedIssue));
assertThat(underTest.hashCode())
.isEqualTo(new ChangesOnMyIssuesNotification(analysisChange, ImmutableSet.of(changedIssue)).hashCode())
.isNotEqualTo(mock(Notification.class).hashCode())
.isNotEqualTo(new ChangesOnMyIssuesNotification(new AnalysisChange(analysisChange.getDate() + 10), ImmutableSet.of(changedIssue)).hashCode())
.isNotEqualTo(new ChangesOnMyIssuesNotification(analysisChange, ImmutableSet.of())).hashCode();
}
|
public void addPet(Pet body) throws RestClientException {
addPetWithHttpInfo(body);
}
|
@Test
public void addPetTest() {
Pet body = null;
api.addPet(body);
// TODO: test validations
}
|
@Override
public <R extends MessageResponse<?>> void chatStream(Prompt<R> prompt, StreamResponseListener<R> listener, ChatOptions options) {
LlmClient llmClient = new SseClient();
Map<String, String> headers = new HashMap<>();
headers.put("Content-Type", "application/json");
headers.put("Authorization", "Bearer " + getConfig().getApiKey());
String payload = OpenAiLLmUtil.promptToPayload(prompt, config, options, true);
String endpoint = config.getEndpoint();
LlmClientListener clientListener = new BaseLlmClientListener(this, llmClient, listener, prompt, streamMessageParser, functionMessageParser);
llmClient.start(endpoint + "/v1/chat/completions", headers, payload, clientListener, config);
}
|
@Test
public void testChatOllama() {
OpenAiLlmConfig config = new OpenAiLlmConfig();
config.setEndpoint("http://localhost:11434");
config.setModel("llama3");
// config.setDebug(true);
Llm llm = new OpenAiLlm(config);
llm.chatStream("who are you", new StreamResponseListener<AiMessageResponse>() {
@Override
public void onMessage(ChatContext context, AiMessageResponse response) {
System.out.println(response.getMessage().getContent());
}
});
try {
Thread.sleep(20000);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
|
@Override
public void verify(X509Certificate certificate, Date date) {
logger.debug("Verifying {} issued by {}", certificate.getSubjectX500Principal(),
certificate.getIssuerX500Principal());
// Create trustAnchors
final Set<TrustAnchor> trustAnchors = getTrusted().stream().map(
c -> new TrustAnchor(c, null)
).collect(Collectors.toSet());
if (trustAnchors.isEmpty()) {
throw new VerificationException("No trust anchors available");
}
// Create the selector that specifies the starting certificate
final X509CertSelector selector = new X509CertSelector();
selector.setCertificate(certificate);
// Configure the PKIX certificate builder algorithm parameters
try {
final PKIXBuilderParameters pkixParams = new PKIXBuilderParameters(trustAnchors, selector);
// Set assume date
if (date != null) {
pkixParams.setDate(date);
}
// Add cert store with certificate to check
pkixParams.addCertStore(CertStore.getInstance(
"Collection", new CollectionCertStoreParameters(ImmutableList.of(certificate)), "BC"));
// Add cert store with intermediates
pkixParams.addCertStore(CertStore.getInstance(
"Collection", new CollectionCertStoreParameters(getIntermediates()), "BC"));
// Add cert store with CRLs
pkixParams.addCertStore(CertStore.getInstance(
"Collection", new CollectionCertStoreParameters(getCRLs()), "BC"));
// Toggle to check revocation list
pkixParams.setRevocationEnabled(checkRevocation());
// Build and verify the certification chain
final CertPathBuilder builder = CertPathBuilder.getInstance("PKIX", "BC");
builder.build(pkixParams);
} catch (CertPathBuilderException e) {
throw new VerificationException(
String.format("Invalid certificate %s issued by %s",
certificate.getSubjectX500Principal(), certificate.getIssuerX500Principal()
), e
);
} catch (GeneralSecurityException e) {
throw new CryptoException(
String.format("Could not verify certificate %s issued by %s",
certificate.getSubjectX500Principal(), certificate.getIssuerX500Principal()
), e
);
}
}
|
@Test
public void shouldThrowExceptionIfCertificateIsExpired() {
thrown.expect(VerificationException.class);
thrown.expectMessage("PPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPPP");
createCertificateService(
new String[] { "root.crt" }, new String[] { "intermediate.crt"},
new String[0], false
).verify(readCert("expired.crt"), new Date(1521638102000L));
}
|
@Override
public void accept(Point newPoint) {
//ensure this method is never called by multiple threads at the same time.
parallelismDetector.run(
() -> doAccept(newPoint)
);
}
|
@Test
public void testTrackClosure_atTimeLimit() {
TestConsumer consumer = new TestConsumer();
Duration TIME_LIMIT = Duration.ofSeconds(5);
TrackMaker maker = new TrackMaker(TIME_LIMIT, consumer);
assertTrue(
consumer.numCallsToAccept == 0,
"The consumer has not been access yet"
);
maker.accept(newPoint("track1", Instant.EPOCH));
maker.accept(newPoint("differentTrack", Instant.EPOCH.plus(TIME_LIMIT)));
assertTrue(
consumer.numCallsToAccept == 0,
"The track should not be closed quite yet, we are at the TIME_LIMIT, not over it"
);
}
|
public void remove(String key) {
if (key == null) {
return;
}
Map<String, String> oldMap = copyOnThreadLocal.get();
if (oldMap == null) return;
Integer lastOp = getAndSetLastOperation(WRITE_OPERATION);
if (wasLastOpReadOrNull(lastOp)) {
Map<String, String> newMap = duplicateAndInsertNewMap(oldMap);
newMap.remove(key);
} else {
oldMap.remove(key);
}
}
|
@Test
public void removeForNullKeyTest() {
mdcAdapter.remove(null);
}
|
public static void main(String[] args) throws Exception {
Path tikaConfigPath = Paths.get(args[0]);
PipesIterator pipesIterator = PipesIterator.build(tikaConfigPath);
long start = System.currentTimeMillis();
try (AsyncProcessor processor = new AsyncProcessor(tikaConfigPath, pipesIterator)) {
for (FetchEmitTuple t : pipesIterator) {
boolean offered = processor.offer(t, TIMEOUT_MS);
if (!offered) {
throw new TimeoutException("timed out waiting to add a fetch emit tuple");
}
}
processor.finished();
while (true) {
if (processor.checkActive()) {
Thread.sleep(500);
} else {
break;
}
}
long elapsed = System.currentTimeMillis() - start;
LOG.info("Successfully finished processing {} files in {} ms", processor.getTotalProcessed(), elapsed);
}
}
|
@Test
public void testCrash() throws Exception {
Path config = getPath("/configs/tika-config-broken.xml");
assertThrows(TikaConfigException.class, () -> TikaAsyncCLI.main(new String[]{config.toAbsolutePath().toString()}));
}
|
@Override
public CompletableFuture<ConsumeMessageDirectlyResult> consumeMessageDirectly(String address,
ConsumeMessageDirectlyResultRequestHeader requestHeader, long timeoutMillis) {
CompletableFuture<ConsumeMessageDirectlyResult> future = new CompletableFuture<>();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.CONSUME_MESSAGE_DIRECTLY, requestHeader);
remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> {
if (response.getCode() == ResponseCode.SUCCESS) {
ConsumeMessageDirectlyResult info = ConsumeMessageDirectlyResult.decode(response.getBody(), ConsumeMessageDirectlyResult.class);
future.complete(info);
} else {
log.warn("consumeMessageDirectly getResponseCommand failed, {} {}", response.getCode(), response.getRemark());
future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark()));
}
});
return future;
}
|
@Test
public void assertConsumeMessageDirectlyWithError() {
setResponseError();
ConsumeMessageDirectlyResultRequestHeader requestHeader = mock(ConsumeMessageDirectlyResultRequestHeader.class);
CompletableFuture<ConsumeMessageDirectlyResult> actual = mqClientAdminImpl.consumeMessageDirectly(defaultBrokerAddr, requestHeader, defaultTimeout);
Throwable thrown = assertThrows(ExecutionException.class, actual::get);
assertTrue(thrown.getCause() instanceof MQClientException);
MQClientException mqException = (MQClientException) thrown.getCause();
assertEquals(ResponseCode.SYSTEM_ERROR, mqException.getResponseCode());
assertTrue(mqException.getMessage().contains("CODE: 1 DESC: null"));
}
|
@Override
public Collection<LocalDataQueryResultRow> getRows(final ShowReadwriteSplittingRulesStatement sqlStatement, final ContextManager contextManager) {
Collection<LocalDataQueryResultRow> result = new LinkedList<>();
Map<String, Map<String, String>> exportableDataSourceMap = getExportableDataSourceMap(rule);
ReadwriteSplittingRuleConfiguration ruleConfig = rule.getConfiguration();
ruleConfig.getDataSourceGroups().forEach(each -> {
LocalDataQueryResultRow dataItem = buildDataItem(exportableDataSourceMap, each, getLoadBalancers(ruleConfig));
if (null == sqlStatement.getRuleName() || sqlStatement.getRuleName().equalsIgnoreCase(each.getName())) {
result.add(dataItem);
}
});
return result;
}
|
@Test
void assertGetRowDataWithSpecifiedRuleName() throws SQLException {
engine = setUp(new ShowReadwriteSplittingRulesStatement("readwrite_ds", null), createRuleConfiguration());
engine.executeQuery();
Collection<LocalDataQueryResultRow> actual = engine.getRows();
assertThat(actual.size(), is(1));
Iterator<LocalDataQueryResultRow> iterator = actual.iterator();
LocalDataQueryResultRow row = iterator.next();
assertThat(row.getCell(1), is("readwrite_ds"));
assertThat(row.getCell(2), is("ds_primary"));
assertThat(row.getCell(3), is("ds_slave_0,ds_slave_1"));
assertThat(row.getCell(4), is("DYNAMIC"));
assertThat(row.getCell(5), is("random"));
assertThat(row.getCell(6), is("{\"read_weight\":\"2:1\"}"));
}
|
@Override
public RetryStrategy getNextRetryStrategy() {
int nextRemainingRetries = remainingRetries - 1;
Preconditions.checkState(
nextRemainingRetries >= 0, "The number of remaining retries must not be negative");
long nextRetryDelayMillis =
Math.min(currentRetryDelay.plus(increment).toMillis(), maxRetryDelay.toMillis());
return new IncrementalDelayRetryStrategy(
nextRemainingRetries,
Duration.ofMillis(nextRetryDelayMillis),
increment,
maxRetryDelay);
}
|
@Test
void testRetryFailure() {
assertThatThrownBy(
() ->
new IncrementalDelayRetryStrategy(
0,
Duration.ofMillis(20L),
Duration.ofMillis(5L),
Duration.ofMillis(20L))
.getNextRetryStrategy())
.isInstanceOf(IllegalStateException.class);
}
|
@Override
public Optional<KsqlConstants.PersistentQueryType> getPersistentQueryType() {
if (!queryPlan.isPresent()) {
return Optional.empty();
}
// CREATE_AS and CREATE_SOURCE commands contain a DDL command and a Query plan.
if (ddlCommand.isPresent()) {
if (ddlCommand.get() instanceof CreateTableCommand
&& ((CreateTableCommand) ddlCommand.get()).getIsSource()) {
return Optional.of(KsqlConstants.PersistentQueryType.CREATE_SOURCE);
} else {
return Optional.of(KsqlConstants.PersistentQueryType.CREATE_AS);
}
} else {
// INSERT INTO persistent queries are the only queries types that exist without a
// DDL command linked to the plan.
return Optional.of(KsqlConstants.PersistentQueryType.INSERT);
}
}
|
@Test
public void shouldReturnCreateSourcePersistentQueryTypeOnCreateSourceTable() {
// Given:
final CreateTableCommand ddlCommand = Mockito.mock(CreateTableCommand.class);
when(ddlCommand.getIsSource()).thenReturn(true);
final KsqlPlanV1 plan = new KsqlPlanV1(
"stmt",
Optional.of(ddlCommand),
Optional.of(queryPlan1));
// When/Then:
assertThat(plan.getPersistentQueryType(),
is(Optional.of(KsqlConstants.PersistentQueryType.CREATE_SOURCE)));
}
|
public LeaderAndIsr newLeader(int leader) {
return newLeaderAndIsrWithBrokerEpoch(leader, isrWithBrokerEpoch);
}
|
@Test
public void testNewLeader() {
LeaderAndIsr leaderAndIsr = new LeaderAndIsr(2, Arrays.asList(1, 2, 3));
assertEquals(2, leaderAndIsr.leader());
assertEquals(Arrays.asList(1, 2, 3), leaderAndIsr.isr());
LeaderAndIsr newLeaderAndIsr = leaderAndIsr.newLeader(3);
assertEquals(3, newLeaderAndIsr.leader());
assertEquals(Arrays.asList(1, 2, 3), newLeaderAndIsr.isr());
}
|
static Collection<String> getMandatoryJvmOptions(int javaMajorVersion){
return Arrays.stream(MANDATORY_JVM_OPTIONS)
.map(option -> jvmOptionFromLine(javaMajorVersion, option))
.flatMap(Optional::stream)
.collect(Collectors.toUnmodifiableList());
}
|
@Test
public void testMandatoryJvmOptionApplicableJvmPresent() throws IOException{
assertTrue("Contains add-exports value for Java 17",
JvmOptionsParser.getMandatoryJvmOptions(17).contains("--add-exports=jdk.compiler/com.sun.tools.javac.api=ALL-UNNAMED"));
}
|
@Override
public boolean putIfAbsent(K key, V value) {
begin();
V oldValue = transactionalMap.putIfAbsent(key, value);
commit();
return oldValue == null;
}
|
@Test
public void testPutIfAbsent() {
map.put(42, "oldValue");
assertTrue(adapter.putIfAbsent(23, "newValue"));
assertFalse(adapter.putIfAbsent(42, "newValue"));
assertEquals("newValue", map.get(23));
assertEquals("oldValue", map.get(42));
}
|
@Override
public void inc() {
inc(1L);
}
|
@Test
public void testCounterSanity() {
long expected = 1000L;
for (int i = 0; i < expected; i++) {
counter.inc();
}
assertSanity(expected);
}
|
@Override
public SeriesSpec apply(final Metric metric) {
metricValidator.validate(metric);
return switch (metric.functionName()) {
case Average.NAME -> Average.builder().field(metric.fieldName()).build();
case Cardinality.NAME -> Cardinality.builder().field(metric.fieldName()).build();
case Count.NAME -> Count.builder().field(metric.fieldName()).build();
case Latest.NAME -> Latest.builder().field(metric.fieldName()).build();
case Max.NAME -> Max.builder().field(metric.fieldName()).build();
case Min.NAME -> Min.builder().field(metric.fieldName()).build();
case Percentage.NAME -> Percentage.builder()
.field(metric.fieldName())
.strategy(metric.configuration() != null ? ((PercentageConfiguration) metric.configuration()).strategy() : null)
.build();
case Percentile.NAME -> Percentile.builder()
.field(metric.fieldName())
.percentile(((PercentileConfiguration) metric.configuration()).percentile())
.build();
case StdDev.NAME -> StdDev.builder().field(metric.fieldName()).build();
case Sum.NAME -> Sum.builder().field(metric.fieldName()).build();
case SumOfSquares.NAME -> SumOfSquares.builder().field(metric.fieldName()).build();
case Variance.NAME -> Variance.builder().field(metric.fieldName()).build();
default -> Count.builder().field(metric.fieldName()).build(); //TODO: do we want to have a default at all?
};
}
|
@Test
void constructsProperSeriesSpec() {
final Metric metric = new Metric("avg", "took_ms");
final SeriesSpec result = toTest.apply(metric);
assertThat(result)
.isNotNull()
.isInstanceOf(Average.class)
.satisfies(a -> assertEquals("took_ms", ((Average) a).field()))
.satisfies(a -> assertEquals(Average.NAME, a.type()));
}
|
@Override
public ResponseHeader execute() throws SQLException {
check(sqlStatement, connectionSession.getConnectionContext().getGrantee());
if (isDropCurrentDatabase(sqlStatement.getDatabaseName())) {
checkSupportedDropCurrentDatabase(connectionSession);
connectionSession.setCurrentDatabaseName(null);
}
if (ProxyContext.getInstance().databaseExists(sqlStatement.getDatabaseName())) {
ProxyContext.getInstance().getContextManager().getPersistServiceFacade().getMetaDataManagerPersistService().dropDatabase(sqlStatement.getDatabaseName());
}
return new UpdateResponseHeader(sqlStatement);
}
|
@Test
void assertExecuteDropNotExistDatabaseWithIfExists() {
when(sqlStatement.getDatabaseName()).thenReturn("test_not_exist_db");
when(sqlStatement.isIfExists()).thenReturn(true);
assertDoesNotThrow(() -> handler.execute());
}
|
@Override
public synchronized Throwable fillInStackTrace() {
// do nothing
return null;
}
|
@Test
void testFillInStackTrace() {
SkipCallbackWrapperException skipCallbackWrapperException = new SkipCallbackWrapperException(new Throwable("error"));
assertNull(skipCallbackWrapperException.fillInStackTrace());
}
|
@OnlyForTest
protected static ThreadPoolExecutor getExecutor(String groupId) {
return GROUP_THREAD_POOLS.getOrDefault(groupId, GlobalThreadPoolHolder.INSTANCE);
}
|
@Test
public void testGlobalExecutor() {
ThreadPoolExecutor executor1 = ThreadPoolsFactory.getExecutor(GROUP_ID_001);
ThreadPoolExecutor executor2 = ThreadPoolsFactory.getExecutor(GROUP_ID_002);
Assert.assertEquals(executor1, executor2);
}
|
private void bfs(int v, int[] cc, int id) {
cc[v] = id;
Queue<Integer> queue = new LinkedList<>();
queue.offer(v);
int n = graph.length;
while (!queue.isEmpty()) {
int t = queue.poll();
for (int i = 0; i < n; i++) {
if (graph[t][i] != 0.0 && cc[i] == -1) {
queue.offer(i);
cc[i] = id;
}
}
}
}
|
@Test
public void testBfs() {
System.out.println("bfs sort");
int[] ts = {0, 8, 1, 2, 7, 3, 6, 5, 4, 9, 10, 11, 12};
Graph graph = new AdjacencyMatrix(13, true);
graph.addEdge(8, 7);
graph.addEdge(7, 6);
graph.addEdge(0, 1);
graph.addEdge(0, 2);
graph.addEdge(0, 3);
graph.addEdge(0, 5);
graph.addEdge(0, 6);
graph.addEdge(2, 3);
graph.addEdge(3, 4);
graph.addEdge(3, 5);
graph.addEdge(6, 4);
graph.addEdge(6, 9);
graph.addEdge(4, 9);
graph.addEdge(9, 10);
graph.addEdge(9, 11);
graph.addEdge(9, 12);
graph.addEdge(11, 12);
assertArrayEquals(ts, graph.sortbfs());
}
|
public void setAbandon(boolean abandon) {
this.abandon = abandon;
}
|
@Test
void testSetAbandon() {
assertFalse(connection.isAbandon());
connection.setAbandon(true);
assertTrue(connection.isAbandon());
}
|
@Override
public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final DelayedHttpEntityCallable<Node> command = new DelayedHttpEntityCallable<Node>(file) {
@Override
public Node call(final HttpEntity entity) throws BackgroundException {
try {
final HttpEntityEnclosingRequestBase request;
if(status.isExists()) {
request = new HttpPut(String.format("%s/api/v1/nodes/%s/revisions", session.getClient().getBasePath(), fileid.getFileId(file)));
}
else {
request = new HttpPost(String.format("%s/api/v1/deepBoxes/%s/boxes/%s/files/%s",
session.getClient().getBasePath(),
fileid.getDeepBoxNodeId(file),
fileid.getBoxNodeId(file),
fileid.getFileId(file.getParent())));
}
final Checksum checksum = status.getChecksum();
if(Checksum.NONE != checksum) {
switch(checksum.algorithm) {
case sha1:
request.addHeader(HttpHeaders.CONTENT_MD5, checksum.hash);
}
}
final MultipartEntityBuilder multipart = MultipartEntityBuilder.create();
multipart.setMode(HttpMultipartMode.BROWSER_COMPATIBLE);
multipart.setCharset(StandardCharsets.UTF_8);
final ByteArrayOutputStream out = new ByteArrayOutputStream();
entity.writeTo(out);
if(status.isExists()) {
multipart.addBinaryBody("file", out.toByteArray(),
null == status.getMime() ? ContentType.APPLICATION_OCTET_STREAM : ContentType.create(status.getMime()), file.getName());
request.setEntity(multipart.build());
return session.getClient().getClient().execute(request, new AbstractResponseHandler<Node>() {
@Override
public Node handleEntity(final HttpEntity entity) throws IOException {
final ObjectReader reader = new JSON().getContext(null).reader(Node.class);
return reader.readValue(entity.getContent());
}
});
}
else {
multipart.addBinaryBody("files", out.toByteArray(),
null == status.getMime() ? ContentType.APPLICATION_OCTET_STREAM : ContentType.create(status.getMime()), file.getName());
request.setEntity(multipart.build());
return session.getClient().getClient().execute(request, new AbstractResponseHandler<Node>() {
@Override
public Node handleEntity(final HttpEntity entity) throws IOException {
final ObjectReader reader = new JSON().getContext(null).readerForArrayOf(Node.class);
final Node[] node = reader.readValue(entity.getContent());
return node[0];
}
});
}
}
catch(HttpResponseException e) {
throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
}
@Override
public long getContentLength() {
return -1L;
}
};
return this.write(file, status, command);
}
|
@Test
public void testOverwrite() throws Exception {
final DeepboxIdProvider nodeid = new DeepboxIdProvider(session);
final Path documents = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Documents/", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path file = new Path(documents, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
new DeepboxTouchFeature(session, nodeid).touch(file, new TransferStatus());
assertTrue(new DefaultFindFeature(session).find(file));
try {
final byte[] content = RandomUtils.nextBytes(2047);
final HttpResponseOutputStream<Node> out = new DeepboxWriteFeature(session, nodeid).write(file,
new TransferStatus().exists(true), new DisabledConnectionCallback());
final ByteArrayInputStream in = new ByteArrayInputStream(content);
final TransferStatus progress = new TransferStatus();
final BytecountStreamListener count = new BytecountStreamListener();
new StreamCopier(progress, progress).withListener(count).transfer(in, out);
assertEquals(content.length, count.getSent());
in.close();
out.close();
assertTrue(new DefaultFindFeature(session).find(file));
assertTrue(new DeepboxFindFeature(session, nodeid).find(file));
assertEquals(content.length, new DeepboxAttributesFinderFeature(session, nodeid).find(file).getSize());
}
finally {
new DeepboxDeleteFeature(session, nodeid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
}
|
static Collection<String> getMandatoryJvmOptions(int javaMajorVersion){
return Arrays.stream(MANDATORY_JVM_OPTIONS)
.map(option -> jvmOptionFromLine(javaMajorVersion, option))
.flatMap(Optional::stream)
.collect(Collectors.toUnmodifiableList());
}
|
@Test
public void testAlwaysMandatoryJvmPresent() {
assertTrue("Contains regexp interruptible for Java 11",
JvmOptionsParser.getMandatoryJvmOptions(11).contains("-Djruby.regexp.interruptible=true"));
assertTrue("Contains regexp interruptible for Java 17",
JvmOptionsParser.getMandatoryJvmOptions(17).contains("-Djruby.regexp.interruptible=true"));
}
|
public Exchange createExchange(Message message, Session session) {
Exchange exchange = createExchange(getExchangePattern());
exchange.setIn(new SjmsMessage(exchange, message, session, getBinding()));
return exchange;
}
|
@Test
public void testInOutExchangePattern() {
try {
Endpoint sjms = context.getEndpoint("sjms:queue:test.SjmsEndpointTest?exchangePattern=" + ExchangePattern.InOut);
assertNotNull(sjms);
assertEquals(ExchangePattern.InOut, sjms.createExchange().getPattern());
} catch (Exception e) {
fail("Exception thrown: " + e.getLocalizedMessage());
}
}
|
@Override
public int countWords(Note note) {
return countChars(note);
}
|
@Test
public void getWords() {
Note note = getNote(1L, "这是中文测试", "這是中文測試\n これは日本語のテストです");
assertEquals(24, new IdeogramsWordCounter().countWords(note));
note.setTitle("这");
assertEquals(19, new IdeogramsWordCounter().countWords(note));
}
|
public BucketId getBucketIdForType(ClientParameters.SelectionType type, String id) throws BucketStatsException {
switch (type) {
case DOCUMENT:
return bucketIdFactory.getBucketId(new DocumentId(id));
case BUCKET:
// The internal parser of BucketID is used since the Java Long.decode cannot handle unsigned longs.
return new BucketId(String.format("BucketId(%s)", id));
case GID:
return convertGidToBucketId(id);
case USER:
case GROUP:
try {
BucketSet bucketList = selector.getBucketList(createDocumentSelection(type, id));
if (bucketList.size() != 1) {
String message = String.format("Document selection must map to only one location. " +
"Specified selection matches %d locations.", bucketList.size());
throw new BucketStatsException(message);
}
return bucketList.iterator().next();
} catch (ParseException e) {
throw new BucketStatsException(String.format("Invalid id: %s (%s).", id, e.getMessage()), e);
}
default:
throw new RuntimeException("Unreachable code");
}
}
|
@Test
void testGetBucketId() throws BucketStatsException {
BucketStatsRetriever retriever = createRetriever();
assertEquals("BucketId(0x80000000000004d2)",
retriever.getBucketIdForType(ClientParameters.SelectionType.USER, "1234").toString());
assertEquals("BucketId(0x800000003a7455d7)",
retriever.getBucketIdForType(ClientParameters.SelectionType.GROUP, "mygroup").toString());
assertEquals("BucketId(0x800000003a7455d7)",
retriever.getBucketIdForType(ClientParameters.SelectionType.BUCKET, "0x800000003a7455d7").toString());
assertEquals("BucketId(0xeb018ac5e5732db3)",
retriever.getBucketIdForType(ClientParameters.SelectionType.DOCUMENT, "id:ns:type::another").toString());
assertEquals("BucketId(0xeadd5fe811a2012c)",
retriever.getBucketIdForType(ClientParameters.SelectionType.GID, "0x2c01a21163cb7d0ce85fddd6").toString());
}
|
public void tick() {
// The main loop does two primary things: 1) drive the group membership protocol, responding to rebalance events
// as they occur, and 2) handle external requests targeted at the leader. All the "real" work of the herder is
// performed in this thread, which keeps synchronization straightforward at the cost of some operations possibly
// blocking up this thread (especially those in callbacks due to rebalance events).
try {
// if we failed to read to end of log before, we need to make sure the issue was resolved before joining group
// Joining and immediately leaving for failure to read configs is exceedingly impolite
if (!canReadConfigs) {
if (readConfigToEnd(workerSyncTimeoutMs)) {
canReadConfigs = true;
} else {
return; // Safe to return and tick immediately because readConfigToEnd will do the backoff for us
}
}
log.debug("Ensuring group membership is still active");
String stageDescription = "ensuring membership in the cluster";
member.ensureActive(() -> new TickThreadStage(stageDescription));
completeTickThreadStage();
// Ensure we're in a good state in our group. If not restart and everything should be setup to rejoin
if (!handleRebalanceCompleted()) return;
} catch (WakeupException e) {
// May be due to a request from another thread, or might be stopping. If the latter, we need to check the
// flag immediately. If the former, we need to re-run the ensureActive call since we can't handle requests
// unless we're in the group.
log.trace("Woken up while ensure group membership is still active");
return;
}
if (fencedFromConfigTopic) {
if (isLeader()) {
// We were accidentally fenced out, possibly by a zombie leader
try {
log.debug("Reclaiming write privileges for config topic after being fenced out");
try (TickThreadStage stage = new TickThreadStage("reclaiming write privileges for the config topic")) {
configBackingStore.claimWritePrivileges();
}
fencedFromConfigTopic = false;
log.debug("Successfully reclaimed write privileges for config topic after being fenced out");
} catch (Exception e) {
log.warn("Unable to claim write privileges for config topic. Will backoff and possibly retry if still the leader", e);
backoff(CONFIG_TOPIC_WRITE_PRIVILEGES_BACKOFF_MS);
return;
}
} else {
log.trace("Relinquished write privileges for config topic after being fenced out, since worker is no longer the leader of the cluster");
// We were meant to be fenced out because we fell out of the group and a new leader was elected
fencedFromConfigTopic = false;
}
}
long now = time.milliseconds();
if (checkForKeyRotation(now)) {
log.debug("Distributing new session key");
keyExpiration = Long.MAX_VALUE;
try {
SessionKey newSessionKey = new SessionKey(keyGenerator.generateKey(), now);
writeToConfigTopicAsLeader(
"writing a new session key to the config topic",
() -> configBackingStore.putSessionKey(newSessionKey)
);
} catch (Exception e) {
log.info("Failed to write new session key to config topic; forcing a read to the end of the config topic before possibly retrying", e);
canReadConfigs = false;
return;
}
}
// Process any external requests
// TODO: Some of these can be performed concurrently or even optimized away entirely.
// For example, if three different connectors are slated to be restarted, it's fine to
// restart all three at the same time instead.
// Another example: if multiple configurations are submitted for the same connector,
// the only one that actually has to be written to the config topic is the
// most-recently one.
Long scheduledTick = null;
while (true) {
final DistributedHerderRequest next = peekWithoutException();
if (next == null) {
break;
} else if (now >= next.at) {
currentRequest = requests.pollFirst();
} else {
scheduledTick = next.at;
break;
}
runRequest(next.action(), next.callback());
}
// Process all pending connector restart requests
processRestartRequests();
if (scheduledRebalance < Long.MAX_VALUE) {
scheduledTick = scheduledTick != null ? Math.min(scheduledTick, scheduledRebalance) : scheduledRebalance;
rebalanceResolved = false;
log.debug("Scheduled rebalance at: {} (now: {} scheduledTick: {}) ",
scheduledRebalance, now, scheduledTick);
}
if (isLeader() && internalRequestValidationEnabled() && keyExpiration < Long.MAX_VALUE) {
scheduledTick = scheduledTick != null ? Math.min(scheduledTick, keyExpiration) : keyExpiration;
log.debug("Scheduled next key rotation at: {} (now: {} scheduledTick: {}) ",
keyExpiration, now, scheduledTick);
}
// Process any configuration updates
AtomicReference<Set<String>> connectorConfigUpdatesCopy = new AtomicReference<>();
AtomicReference<Set<String>> connectorTargetStateChangesCopy = new AtomicReference<>();
AtomicReference<Set<ConnectorTaskId>> taskConfigUpdatesCopy = new AtomicReference<>();
boolean shouldReturn;
if (member.currentProtocolVersion() == CONNECT_PROTOCOL_V0) {
shouldReturn = updateConfigsWithEager(connectorConfigUpdatesCopy,
connectorTargetStateChangesCopy);
// With eager protocol we should return immediately if needsReconfigRebalance has
// been set to retain the old workflow
if (shouldReturn) {
return;
}
if (connectorConfigUpdatesCopy.get() != null) {
processConnectorConfigUpdates(connectorConfigUpdatesCopy.get());
}
if (connectorTargetStateChangesCopy.get() != null) {
processTargetStateChanges(connectorTargetStateChangesCopy.get());
}
} else {
shouldReturn = updateConfigsWithIncrementalCooperative(connectorConfigUpdatesCopy,
connectorTargetStateChangesCopy, taskConfigUpdatesCopy);
if (connectorConfigUpdatesCopy.get() != null) {
processConnectorConfigUpdates(connectorConfigUpdatesCopy.get());
}
if (connectorTargetStateChangesCopy.get() != null) {
processTargetStateChanges(connectorTargetStateChangesCopy.get());
}
if (taskConfigUpdatesCopy.get() != null) {
processTaskConfigUpdatesWithIncrementalCooperative(taskConfigUpdatesCopy.get());
}
if (shouldReturn) {
return;
}
}
// Let the group take any actions it needs to
try {
long nextRequestTimeoutMs = scheduledTick != null ? Math.max(scheduledTick - time.milliseconds(), 0L) : Long.MAX_VALUE;
log.trace("Polling for group activity; will wait for {}ms or until poll is interrupted by "
+ "either config backing store updates or a new external request",
nextRequestTimeoutMs);
String pollDurationDescription = scheduledTick != null ? "for up to " + nextRequestTimeoutMs + "ms or " : "";
String stageDescription = "polling the group coordinator " + pollDurationDescription + "until interrupted";
member.poll(nextRequestTimeoutMs, () -> new TickThreadStage(stageDescription));
completeTickThreadStage();
// Ensure we're in a good state in our group. If not restart and everything should be setup to rejoin
handleRebalanceCompleted();
} catch (WakeupException e) { // FIXME should not be WakeupException
log.trace("Woken up while polling for group activity");
// Ignore. Just indicates we need to check the exit flag, for requested actions, etc.
}
}
|
@Test
public void testTaskConfigAdded() {
// Task config always requires rebalance
when(member.memberId()).thenReturn("member");
when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0);
when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE);
// join
expectRebalance(-1, Collections.emptyList(), Collections.emptyList());
expectMemberPoll();
herder.tick(); // join
// Checks for config updates and starts rebalance
when(configBackingStore.snapshot()).thenReturn(SNAPSHOT);
// Rebalance will be triggered when the new config is detected
doNothing().when(member).requestRejoin();
configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK0, TASK1, TASK2)); // read updated config
herder.tick(); // apply config
// Performs rebalance and gets new assignment
expectRebalance(Collections.emptyList(), Collections.emptyList(),
ConnectProtocol.Assignment.NO_ERROR, 1, Collections.emptyList(),
singletonList(TASK0));
when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.STARTED))).thenReturn(true);
herder.tick(); // do rebalance
verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore);
}
|
public static AppsInfo mergeAppsInfo(ArrayList<AppInfo> appsInfo,
boolean returnPartialResult) {
AppsInfo allApps = new AppsInfo();
Map<String, AppInfo> federationAM = new HashMap<>();
Map<String, AppInfo> federationUAMSum = new HashMap<>();
for (AppInfo a : appsInfo) {
// Check if this AppInfo is an AM
if (a.getAMHostHttpAddress() != null) {
// Insert in the list of AM
federationAM.put(a.getAppId(), a);
// Check if there are any UAM found before
if (federationUAMSum.containsKey(a.getAppId())) {
// Merge the current AM with the found UAM
mergeAMWithUAM(a, federationUAMSum.get(a.getAppId()));
// Remove the sum of the UAMs
federationUAMSum.remove(a.getAppId());
}
// This AppInfo is an UAM
} else {
if (federationAM.containsKey(a.getAppId())) {
// Merge the current UAM with its own AM
mergeAMWithUAM(federationAM.get(a.getAppId()), a);
} else if (federationUAMSum.containsKey(a.getAppId())) {
// Merge the current UAM with its own UAM and update the list of UAM
federationUAMSum.put(a.getAppId(),
mergeUAMWithUAM(federationUAMSum.get(a.getAppId()), a));
} else {
// Insert in the list of UAM
federationUAMSum.put(a.getAppId(), a);
}
}
}
// Check the remaining UAMs are depending or not from federation
for (AppInfo a : federationUAMSum.values()) {
if (returnPartialResult || (a.getName() != null
&& !(a.getName().startsWith(UnmanagedApplicationManager.APP_NAME)
|| a.getName().startsWith(PARTIAL_REPORT)))) {
federationAM.put(a.getAppId(), a);
}
}
allApps.addAll(new ArrayList<>(federationAM.values()));
return allApps;
}
|
@Test
public void testMergeAppsRunning() {
AppsInfo apps = new AppsInfo();
String amHost = "http://i_am_the_AM2:1234";
AppInfo am = new AppInfo();
am.setAppId(APPID2.toString());
am.setAMHostHttpAddress(amHost);
am.setState(YarnApplicationState.RUNNING);
int value = 1000;
setAppInfoRunning(am, value);
apps.add(am);
AppInfo uam1 = new AppInfo();
uam1.setAppId(APPID2.toString());
uam1.setState(YarnApplicationState.RUNNING);
apps.add(uam1);
setAppInfoRunning(uam1, value);
AppInfo uam2 = new AppInfo();
uam2.setAppId(APPID2.toString());
uam2.setState(YarnApplicationState.RUNNING);
apps.add(uam2);
setAppInfoRunning(uam2, value);
// in this case the result does not change if we enable partial result
AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false);
Assert.assertNotNull(result);
Assert.assertEquals(1, result.getApps().size());
AppInfo app = result.getApps().get(0);
Assert.assertEquals(APPID2.toString(), app.getAppId());
Assert.assertEquals(amHost, app.getAMHostHttpAddress());
Assert.assertEquals(value * 3, app.getAllocatedMB());
Assert.assertEquals(value * 3, app.getAllocatedVCores());
Assert.assertEquals(value * 3, app.getReservedMB());
Assert.assertEquals(value * 3, app.getReservedVCores());
Assert.assertEquals(value * 3, app.getRunningContainers());
Assert.assertEquals(value * 3, app.getMemorySeconds());
Assert.assertEquals(value * 3, app.getVcoreSeconds());
Assert.assertEquals(3, app.getResourceRequests().size());
}
|
@Override
public void addVisualizedAutoTrackActivities(List<Class<?>> activitiesList) {
}
|
@Test
public void addVisualizedAutoTrackActivities() {
List<Class<?>> activities = new ArrayList<>();
activities.add(EmptyActivity.class);
activities.add(ListActivity.class);
mSensorsAPI.addVisualizedAutoTrackActivities(activities);
Assert.assertFalse(mSensorsAPI.isHeatMapActivity(EmptyActivity.class));
}
|
@Override
public void delete(final String key) {
try (
Connection connection = dataSource.getConnection();
PreparedStatement preparedStatement = connection.prepareStatement(repositorySQL.getDeleteSQL())) {
preparedStatement.setString(1, key + "%");
preparedStatement.executeUpdate();
} catch (final SQLException ex) {
log.error("Delete {} data by key: {} failed", getType(), key, ex);
}
}
|
@Test
void assertDeleteFailure() throws SQLException {
String key = "key";
when(mockJdbcConnection.prepareStatement(repositorySQL.getDeleteSQL())).thenReturn(mockPreparedStatementForPersist);
repository.delete(key);
verify(mockPreparedStatement, times(0)).executeUpdate();
}
|
abstract void execute(Admin admin, Namespace ns, PrintStream out) throws Exception;
|
@Test
public void testFindHangingLookupTopicPartitionsForTopic() throws Exception {
String topic = "foo";
String[] args = new String[]{
"--bootstrap-server",
"localhost:9092",
"find-hanging",
"--topic",
topic
};
Node node0 = new Node(0, "localhost", 9092);
Node node1 = new Node(1, "localhost", 9093);
Node node5 = new Node(5, "localhost", 9097);
TopicPartitionInfo partition0 = new TopicPartitionInfo(
0,
node0,
Arrays.asList(node0, node1),
Arrays.asList(node0, node1)
);
TopicPartitionInfo partition1 = new TopicPartitionInfo(
1,
node1,
Arrays.asList(node1, node5),
Arrays.asList(node1, node5)
);
TopicDescription description = new TopicDescription(
topic,
false,
Arrays.asList(partition0, partition1)
);
expectDescribeTopics(singletonMap(topic, description));
DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
Mockito.when(result.all()).thenReturn(completedFuture(emptyMap()));
Mockito.when(admin.describeProducers(
Arrays.asList(new TopicPartition(topic, 0), new TopicPartition(topic, 1)),
new DescribeProducersOptions()
)).thenReturn(result);
execute(args);
assertNormalExit();
assertNoHangingTransactions();
}
|
public String text() {
return text;
}
|
@Test
public void textInfo() {
badge = NodeBadge.text(Status.INFO, TXT);
checkFields(badge, Status.INFO, false, TXT, null);
}
|
public static LocalDate toLocalDate(int value) {
final int day = value % 100;
value /= 100;
final int month = value % 100;
value /= 100;
final int year = value < 1000 ? value + 2000: value;
return LocalDate.of(year, month, day);
}
|
@Test
public void shouldConvertIntWithFourDigitYearToLocalDate() {
final LocalDate ld = LocalDate.of(2013, 8, 30);
assertEquals(ld, DateUtils.toLocalDate(20130830));
}
|
public static String validSchemaName(String identifier)
{
return validIdentifier(identifier);
}
|
@Test
public void testValidSchemaName()
{
assertEquals("foo", validSchemaName("foo"));
assertEquals("\"select\"", validSchemaName("select"));
}
|
public User userDTOToUser(AdminUserDTO userDTO) {
if (userDTO == null) {
return null;
} else {
User user = new User();
user.setId(userDTO.getId());
user.setLogin(userDTO.getLogin());
user.setFirstName(userDTO.getFirstName());
user.setLastName(userDTO.getLastName());
user.setEmail(userDTO.getEmail());
user.setImageUrl(userDTO.getImageUrl());
user.setActivated(userDTO.isActivated());
user.setLangKey(userDTO.getLangKey());
Set<Authority> authorities = this.authoritiesFromStrings(userDTO.getAuthorities());
user.setAuthorities(authorities);
return user;
}
}
|
@Test
void userDTOToUserMapWithAuthoritiesStringShouldReturnUserWithAuthorities() {
Set<String> authoritiesAsString = new HashSet<>();
authoritiesAsString.add("ADMIN");
userDto.setAuthorities(authoritiesAsString);
User user = userMapper.userDTOToUser(userDto);
assertThat(user).isNotNull();
assertThat(user.getAuthorities()).isNotNull();
assertThat(user.getAuthorities()).isNotEmpty();
assertThat(user.getAuthorities().iterator().next().getName()).isEqualTo("ADMIN");
}
|
@Override
public boolean isValid() {
// Validate type/devices
type();
devices();
return super.isValid()
&& hasOnlyFields(ALLOWED, NAME, LATITUDE, LONGITUDE, UI_TYPE,
RACK_ADDRESS, OWNER, TYPE, DEVICES, LOC_IN_PEERS);
}
|
@Test(expected = InvalidFieldException.class)
public void sampleInvalidConfig() {
ObjectNode node = new TmpJson()
.props(NAME, TYPE, "foo")
.arrays(DEVICES)
.node();
cfg = new BasicRegionConfig();
cfg.init(regionId(R1), BASIC, node, mapper, delegate);
cfg.isValid();
}
|
public static SpringBeanUtils getInstance() {
return INSTANCE;
}
|
@Test
public void testGetInstance() {
final SpringBeanUtils result = SpringBeanUtils.getInstance();
assertNotNull(result);
}
|
@Override
public final void isEqualTo(@Nullable Object other) {
if (Objects.equal(actual, other)) {
return;
}
// Fail but with a more descriptive message:
if (actual == null || !(other instanceof Map)) {
super.isEqualTo(other);
return;
}
containsEntriesInAnyOrder((Map<?, ?>) other, /* allowUnexpected= */ false);
}
|
@Test
public void isEqualToActualNullOtherMap() {
expectFailureWhenTestingThat(null).isEqualTo(ImmutableMap.of());
}
|
@Deprecated
@Restricted(DoNotUse.class)
public static String resolve(ConfigurationContext context, String toInterpolate) {
return context.getSecretSourceResolver().resolve(toInterpolate);
}
|
@Test
public void resolve_JsonWithNewlineInValue() {
String input = "{ \"a\": \"hello\\nworld\", \"b\": 2 }";
environment.set("FOO", input);
String output = resolve("${json:a:${FOO}}");
assertThat(output, equalTo("hello\nworld"));
}
|
public static String toParams(Map<String, ?> paramMap) {
return toParams(paramMap, CharsetUtil.CHARSET_UTF_8);
}
|
@Test
public void toParamsTest() {
final String paramsStr = "uuuu=0&a=b&c=3Ddsssss555555";
final Map<String, List<String>> map = HttpUtil.decodeParams(paramsStr, CharsetUtil.UTF_8);
final String encodedParams = HttpUtil.toParams(map);
assertEquals(paramsStr, encodedParams);
}
|
@Override
public Set<Pair<Integer, StructLike>> keySet() {
PartitionSet keySet = PartitionSet.create(specs);
for (Entry<Integer, Map<StructLike, V>> specIdAndPartitionMap : partitionMaps.entrySet()) {
int specId = specIdAndPartitionMap.getKey();
Map<StructLike, V> partitionMap = specIdAndPartitionMap.getValue();
for (StructLike partition : partitionMap.keySet()) {
keySet.add(specId, partition);
}
}
return Collections.unmodifiableSet(keySet);
}
|
@Test
public void testKeySet() {
PartitionMap<String> map = PartitionMap.create(SPECS);
map.put(BY_DATA_SPEC.specId(), Row.of("aaa"), "v1");
map.put(BY_DATA_SPEC.specId(), CustomRow.of("ccc"), "v2");
assertThat(map.get(BY_DATA_SPEC.specId(), CustomRow.of("aaa"))).isEqualTo("v1");
assertThat(map.get(BY_DATA_SPEC.specId(), Row.of("ccc"))).isEqualTo("v2");
}
|
public static String unQuote(String string) {
return string == null ? null : string.replaceAll("^\"|\"$", "");
}
|
@Test
public void shouldUnQuote() throws Exception {
assertThat(unQuote("\"Hello World\""), is("Hello World"));
assertThat(unQuote(null), is(nullValue()));
assertThat(unQuote("\"Hello World\" to everyone\""), is("Hello World\" to everyone"));
}
|
public AccessPrivilege getAccessPrivilege(InetAddress addr) {
return getAccessPrivilege(addr.getHostAddress(),
addr.getCanonicalHostName());
}
|
@Test
public void testMultiMatchers() throws Exception {
long shortExpirationPeriod = 1 * 1000 * 1000 * 1000; // 1s
NfsExports matcher = new NfsExports(CacheSize, shortExpirationPeriod,
"192.168.0.[0-9]+;[a-z]+.b.com rw");
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname2));
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, address1));
Assert.assertEquals(AccessPrivilege.READ_ONLY,
matcher.getAccessPrivilege(address1, hostname1));
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address2, hostname1));
// address2 will hit the cache
Assert.assertEquals(AccessPrivilege.READ_WRITE,
matcher.getAccessPrivilege(address2, hostname2));
Thread.sleep(1000);
// no cache for address2 now
AccessPrivilege ap;
long startNanos = System.nanoTime();
do {
ap = matcher.getAccessPrivilege(address2, address2);
if (ap == AccessPrivilege.NONE) {
break;
}
Thread.sleep(500);
} while ((System.nanoTime() - startNanos) / NanosPerMillis < 5000);
Assert.assertEquals(AccessPrivilege.NONE, ap);
}
|
static final String generateForFunction(RuleBuilderStep step, FunctionDescriptor<?> function) {
return generateForFunction(step, function, 1);
}
|
@Test
public void generateFunctionWithNoParamGeneration() {
RuleBuilderStep step = RuleBuilderStep.builder().function("function1").build();
final FunctionDescriptor<Boolean> descriptor = FunctionUtil.testFunction(
"function1", ImmutableList.of(
string("required").build(),
integer("optional").optional().build()
), Boolean.class
).descriptor();
assertThat(ParserUtil.generateForFunction(step, descriptor)).isEqualTo(
"function1()"
);
}
|
public KsqlTarget target(final URI server) {
return target(server, Collections.emptyMap());
}
|
@Test
public void shouldRequestStatus() {
// Given:
CommandStatus commandStatus = new CommandStatus(Status.SUCCESS, "msg");
server.setResponseObject(commandStatus);
// When:
KsqlTarget target = ksqlClient.target(serverUri);
RestResponse<CommandStatus> response = target.getStatus("foo");
// Then:
assertThat(server.getHttpMethod(), is(HttpMethod.GET));
assertThat(server.getBody(), nullValue());
assertThat(server.getPath(), is("/status/foo"));
assertThat(server.getHeaders().get("Accept"), is("application/json"));
assertThat(response.get(), is(commandStatus));
}
|
@Nullable
public static String getDisplayNameFromCertificate(@Nonnull X509Certificate certificate, boolean withLocation) throws CertificateParsingException {
X500Name name = new X500Name(certificate.getSubjectX500Principal().getName());
String commonName = null, org = null, location = null, country = null;
for (RDN rdn : name.getRDNs()) {
AttributeTypeAndValue pair = rdn.getFirst();
String val = ((ASN1String) pair.getValue()).getString();
ASN1ObjectIdentifier type = pair.getType();
if (type.equals(RFC4519Style.cn))
commonName = val;
else if (type.equals(RFC4519Style.o))
org = val;
else if (type.equals(RFC4519Style.l))
location = val;
else if (type.equals(RFC4519Style.c))
country = val;
}
String altName = null;
try {
final Collection<List<?>> subjectAlternativeNames = certificate.getSubjectAlternativeNames();
if (subjectAlternativeNames != null)
for (final List<?> subjectAlternativeName : subjectAlternativeNames)
if ((Integer) subjectAlternativeName.get(0) == 1) // rfc822name
altName = (String) subjectAlternativeName.get(1);
} catch (CertificateParsingException e) {
// swallow
}
if (org != null) {
return withLocation ? Stream.of(org, location, country).filter(Objects::nonNull).collect(Collectors.joining()) : org;
} else if (commonName != null) {
return commonName;
} else {
return altName;
}
}
|
@Test
public void testDisplayName() throws Exception {
CertificateFactory cf = CertificateFactory.getInstance("X.509");
X509Certificate clientCert = (X509Certificate) cf.generateCertificate(getClass().getResourceAsStream(
"startssl-client.crt"));
assertEquals("Andreas Schildbach", X509Utils.getDisplayNameFromCertificate(clientCert, false));
X509Certificate comodoCert = (X509Certificate) cf.generateCertificate(getClass().getResourceAsStream(
"comodo-smime.crt"));
assertEquals("[email protected]", X509Utils.getDisplayNameFromCertificate(comodoCert, true));
}
|
public static String toJson(Message message) {
StringWriter json = new StringWriter();
try (JsonWriter jsonWriter = JsonWriter.of(json)) {
write(message, jsonWriter);
}
return json.toString();
}
|
@Test
public void do_not_write_null_wrapper_of_map() {
TestNullableMap msg = TestNullableMap.newBuilder()
.setLabel("world")
.build();
assertThat(toJson(msg)).isEqualTo("{\"label\":\"world\"}");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.