focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public String getProgram() {
return program;
}
|
@Test
public void testRpcCallCacheConstructor(){
RpcCallCache cache = new RpcCallCache("test", 100);
assertEquals("test", cache.getProgram());
}
|
@VisibleForTesting
void validateOldPassword(Long id, String oldPassword) {
AdminUserDO user = userMapper.selectById(id);
if (user == null) {
throw exception(USER_NOT_EXISTS);
}
if (!isPasswordMatch(oldPassword, user.getPassword())) {
throw exception(USER_PASSWORD_FAILED);
}
}
|
@Test
public void testValidateOldPassword_passwordFailed() {
// mock 数据
AdminUserDO user = randomAdminUserDO();
userMapper.insert(user);
// 准备参数
Long id = user.getId();
String oldPassword = user.getPassword();
// 调用,校验异常
assertServiceException(() -> userService.validateOldPassword(id, oldPassword),
USER_PASSWORD_FAILED);
// 校验调用
verify(passwordEncoder, times(1)).matches(eq(oldPassword), eq(user.getPassword()));
}
|
@VisibleForTesting
ExportResult<PhotosContainerResource> exportAlbums(
TokensAndUrlAuthData authData, Optional<PaginationData> paginationData, UUID jobId)
throws IOException, InvalidTokenException, PermissionDeniedException {
Optional<String> paginationToken = Optional.empty();
if (paginationData.isPresent()) {
String token = ((StringPaginationToken) paginationData.get()).getToken();
Preconditions.checkArgument(
token.startsWith(ALBUM_TOKEN_PREFIX), "Invalid pagination token " + token);
paginationToken = Optional.of(token.substring(ALBUM_TOKEN_PREFIX.length()));
}
AlbumListResponse albumListResponse;
albumListResponse = getOrCreatePhotosInterface(authData).listAlbums(paginationToken);
PaginationData nextPageData;
String token = albumListResponse.getNextPageToken();
List<PhotoAlbum> albums = new ArrayList<>();
GoogleAlbum[] googleAlbums = albumListResponse.getAlbums();
if (Strings.isNullOrEmpty(token)) {
nextPageData = new StringPaginationToken(PHOTO_TOKEN_PREFIX);
} else {
nextPageData = new StringPaginationToken(ALBUM_TOKEN_PREFIX + token);
}
ContinuationData continuationData = new ContinuationData(nextPageData);
if (googleAlbums != null && googleAlbums.length > 0) {
for (GoogleAlbum googleAlbum : googleAlbums) {
// Add album info to list so album can be recreated later
PhotoAlbum photoAlbum = new PhotoAlbum(googleAlbum.getId(), googleAlbum.getTitle(), null);
albums.add(photoAlbum);
monitor.debug(
() ->
String.format("%s: Google Photos exporting album: %s", jobId, photoAlbum.getId()));
// Add album id to continuation data
continuationData.addContainerResource(new IdOnlyContainerResource(googleAlbum.getId()));
}
}
ResultType resultType = ResultType.CONTINUE;
PhotosContainerResource containerResource = new PhotosContainerResource(albums, null);
return new ExportResult<>(resultType, containerResource, continuationData);
}
|
@Test
public void exportAlbumFirstSet() throws IOException, InvalidTokenException, PermissionDeniedException {
setUpSingleAlbum();
when(albumListResponse.getNextPageToken()).thenReturn(ALBUM_TOKEN);
// Run test
ExportResult<PhotosContainerResource> result =
googlePhotosExporter.exportAlbums(null, Optional.empty(), uuid);
// Check results
// Verify correct methods were called
verify(photosInterface).listAlbums(Optional.empty());
verify(albumListResponse).getAlbums();
// Check pagination token
ContinuationData continuationData = result.getContinuationData();
StringPaginationToken paginationToken =
(StringPaginationToken) continuationData.getPaginationData();
assertThat(paginationToken.getToken()).isEqualTo(ALBUM_TOKEN_PREFIX + ALBUM_TOKEN);
// Check albums field of container
Collection<PhotoAlbum> actualAlbums = result.getExportedData().getAlbums();
assertThat(actualAlbums.stream().map(PhotoAlbum::getId).collect(Collectors.toList()))
.containsExactly(ALBUM_ID);
// Check photos field of container (should be empty, even though there is a photo in the
// original album)
Collection<PhotoModel> actualPhotos = result.getExportedData().getPhotos();
assertThat(actualPhotos).isEmpty();
// Should be one container in the resource list
List<ContainerResource> actualResources = continuationData.getContainerResources();
assertThat(
actualResources.stream()
.map(a -> ((IdOnlyContainerResource) a).getId())
.collect(Collectors.toList()))
.containsExactly(ALBUM_ID);
}
|
public int generate(Class<? extends CustomResource> crdClass, Writer out) throws IOException {
ObjectNode node = nf.objectNode();
Crd crd = crdClass.getAnnotation(Crd.class);
if (crd == null) {
err(crdClass + " is not annotated with @Crd");
} else {
node.put("apiVersion", "apiextensions.k8s.io/" + crdApiVersion)
.put("kind", "CustomResourceDefinition")
.putObject("metadata")
.put("name", crd.spec().names().plural() + "." + crd.spec().group());
if (!labels.isEmpty()) {
((ObjectNode) node.get("metadata"))
.putObject("labels")
.setAll(labels.entrySet().stream()
.collect(Collectors.<Map.Entry<String, String>, String, JsonNode, LinkedHashMap<String, JsonNode>>toMap(
Map.Entry::getKey,
e -> new TextNode(
e.getValue()
.replace("%group%", crd.spec().group())
.replace("%plural%", crd.spec().names().plural())
.replace("%singular%", crd.spec().names().singular())),
(x, y) -> x,
LinkedHashMap::new)));
}
node.set("spec", buildSpec(crdApiVersion, crd.spec(), crdClass));
}
mapper.writeValue(out, node);
return numErrors;
}
|
@Test
void simpleTestWithErrors() throws IOException {
CrdGenerator crdGenerator = new CrdGenerator(KubeVersion.V1_16_PLUS, ApiVersion.V1, CrdGenerator.YAML_MAPPER,
emptyMap(), crdGeneratorReporter, emptyList(), null, null,
new CrdGenerator.NoneConversionStrategy(), null);
StringWriter w = new StringWriter();
crdGenerator.generate(ExampleCrdWithErrors.class, w);
assertTrue(errors.contains("class io.strimzi.crdgenerator.ExampleCrdWithErrors is missing @JsonInclude"), errors.toString());
assertFalse(errors.contains("class io.strimzi.crdgenerator.ExampleCrdWithErrors$ObjectProperty is missing @JsonInclude"), errors.toString());
assertTrue(errors.contains("class io.strimzi.crdgenerator.ExampleCrdWithErrors is missing @JsonPropertyOrder"), errors.toString());
assertFalse(errors.contains("class io.strimzi.crdgenerator.ExampleCrdWithErrors$ObjectProperty is missing @JsonPropertyOrder"), errors.toString());
assertTrue(errors.contains("class io.strimzi.crdgenerator.ExampleCrdWithErrors$ObjectProperty has a property bar which is not in the @JsonPropertyOrder"), errors.toString());
}
|
public Message createMessage(String messageString) {
Message message;
try {
Map<String, Object> map = objectMapper.readValue(messageString, Map.class);
if (!map.containsKey("_id")) {
map.put("_id", UUID.randomUUID().toString());
}
final String messageField = "message"; // message field must be of type string
if (map.containsKey(messageField) && !(map.get(messageField) instanceof String)) {
map.put(messageField, String.valueOf(map.get(messageField)));
}
message = messageFactory.createMessage(map);
} catch (JacksonException e) {
message = messageFactory.createMessage(messageString, "127.0.0.1", DateTime.now(DateTimeZone.UTC));
if (StringUtils.startsWith(StringUtils.trim(messageString), "{")) {
message.addField("gl2_simulator_json_error",
"Cannot parse simulation message as JSON. Using as raw message field instead: " + e.getMessage());
}
}
return message;
}
|
@Test
void createMessage() {
String notAJsonMessage = "{Not a json message}";
Message result = ruleSimulator.createMessage(notAJsonMessage);
Assertions.assertEquals(result.getMessage(), notAJsonMessage);
}
|
public CodegenTableDO buildTable(TableInfo tableInfo) {
CodegenTableDO table = CodegenConvert.INSTANCE.convert(tableInfo);
initTableDefault(table);
return table;
}
|
@Test
public void testBuildTable() {
// 准备参数
TableInfo tableInfo = mock(TableInfo.class);
// mock 方法
when(tableInfo.getName()).thenReturn("system_user");
when(tableInfo.getComment()).thenReturn("用户");
// 调用
CodegenTableDO table = codegenBuilder.buildTable(tableInfo);
// 断言
assertEquals("system_user", table.getTableName());
assertEquals("用户", table.getTableComment());
assertEquals("system", table.getModuleName());
assertEquals("user", table.getBusinessName());
assertEquals("User", table.getClassName());
assertEquals("用户", table.getClassComment());
}
|
public List<PluginInfo> getExtensionInfos() {
return new ArrayList<>(this);
}
|
@Test
public void shouldGetAllIndividualExtensionInfos() {
NotificationPluginInfo notificationPluginInfo = new NotificationPluginInfo(null, null);
PluggableTaskPluginInfo pluggableTaskPluginInfo = new PluggableTaskPluginInfo(null, null, null);
CombinedPluginInfo pluginInfo = new CombinedPluginInfo(List.of(pluggableTaskPluginInfo, notificationPluginInfo));
assertThat(pluginInfo.getExtensionInfos(), containsInAnyOrder(notificationPluginInfo, pluggableTaskPluginInfo));
}
|
@Override
public void parse(InputStream stream, ContentHandler handler, Metadata metadata,
ParseContext context) throws IOException, SAXException, TikaException {
metadata.set(Metadata.CONTENT_TYPE, XLF_CONTENT_TYPE.toString());
final XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata);
XMLReaderUtils.parseSAX(CloseShieldInputStream.wrap(stream),
new XLIFF12ContentHandler(xhtml, metadata), context);
}
|
@Test
public void testXLIFF12() throws Exception {
try (InputStream input = getResourceAsStream("/test-documents/testXLIFF12.xlf")) {
Metadata metadata = new Metadata();
ContentHandler handler = new BodyContentHandler();
new XLIFF12Parser().parse(input, handler, metadata, new ParseContext());
String content = handler.toString();
assertContains("Hooray", content);
assertEquals("2", metadata.get("file-count"));
assertEquals("4", metadata.get("tu-count"));
assertEquals("en", metadata.get("source-language"));
assertEquals("fr", metadata.get("target-language"));
}
}
|
@Override
public String getContextName() {
return contextName;
}
|
@Test
public void testGetContextName() {
assertEquals(contextName, defaultSnmpv3Device.getContextName());
}
|
@Override
public boolean supportsSubqueriesInQuantifieds() {
return false;
}
|
@Test
void assertSupportsSubqueriesInQuantifieds() {
assertFalse(metaData.supportsSubqueriesInQuantifieds());
}
|
public RuntimeOptionsBuilder parse(Class<?> clazz) {
RuntimeOptionsBuilder args = new RuntimeOptionsBuilder();
for (Class<?> classWithOptions = clazz; hasSuperClass(
classWithOptions); classWithOptions = classWithOptions.getSuperclass()) {
CucumberOptions options = requireNonNull(optionsProvider).getOptions(classWithOptions);
if (options != null) {
addDryRun(options, args);
addMonochrome(options, args);
addTags(classWithOptions, options, args);
addPlugins(options, args);
addPublish(options, args);
addName(options, args);
addSnippets(options, args);
addGlue(options, args);
addFeatures(options, args);
addObjectFactory(options, args);
addUuidGenerator(options, args);
}
}
addDefaultFeaturePathIfNoFeaturePathIsSpecified(args, clazz);
addDefaultGlueIfNoOverridingGlueIsSpecified(args, clazz);
return args;
}
|
@Test
void create_with_glue() {
RuntimeOptions runtimeOptions = parser().parse(ClassWithGlue.class).build();
assertThat(runtimeOptions.getGlue(),
contains(uri("classpath:/app/features/user/registration"), uri("classpath:/app/features/hooks")));
}
|
static ByteBuffer fromEncodedKey(ByteString encodedKey) {
return ByteBuffer.wrap(encodedKey.toByteArray());
}
|
@Test
@SuppressWarnings("ByteBufferBackingArray")
public void testFromEncodedKey() {
ByteString input = ByteString.copyFrom("hello world".getBytes(StandardCharsets.UTF_8));
ByteBuffer encodedKey = FlinkKeyUtils.fromEncodedKey(input);
assertThat(encodedKey.array(), is(input.toByteArray()));
}
|
@Override
public Object getConfig(String key) {
return SOURCE.get(key);
}
|
@Test
public void getConfig() {
final Object config = source.getConfig(OriginConfigDisableSource.ZK_CONFIG_CENTER_ENABLED);
Assert.assertNotNull(config);
Assert.assertTrue(config instanceof Boolean);
Assert.assertEquals(config, false);
}
|
@Override
protected Object createObject(ValueWrapper<Object> initialInstance, String className, Map<List<String>, Object> params, ClassLoader classLoader) {
return fillBean(initialInstance, className, params, classLoader);
}
|
@Test
public void createObject() {
Map<List<String>, Object> params = new HashMap<>();
params.put(List.of("firstName"), "TestName");
params.put(List.of("age"), 10);
ValueWrapper<Object> initialInstance = runnerHelper.getDirectMapping(params);
Object objectRaw = runnerHelper.createObject(initialInstance, Person.class.getCanonicalName(), params, getClass().getClassLoader());
assertThat(objectRaw).isInstanceOf(Person.class);
Person object = (Person) objectRaw;
assertThat(object.getAge()).isEqualTo(10);
assertThat(object.getFirstName()).isEqualTo("TestName");
}
|
public void isInStrictOrder() {
isInStrictOrder(Ordering.natural());
}
|
@Test
public void isInStrictOrderFailure() {
expectFailureWhenTestingThat(asList(1, 2, 2, 4)).isInStrictOrder();
assertFailureKeys(
"expected to be in strict order", "but contained", "followed by", "full contents");
assertFailureValue("but contained", "2");
assertFailureValue("followed by", "2");
assertFailureValue("full contents", "[1, 2, 2, 4]");
}
|
public static <V> Read<V> read() {
return new AutoValue_SparkReceiverIO_Read.Builder<V>().build();
}
|
@Test
public void testReadFromReceiverByteBufferData() {
ReceiverBuilder<String, ByteBufferDataReceiver> receiverBuilder =
new ReceiverBuilder<>(ByteBufferDataReceiver.class).withConstructorArgs();
SparkReceiverIO.Read<String> reader =
SparkReceiverIO.<String>read()
.withGetOffsetFn(Long::valueOf)
.withTimestampFn(Instant::parse)
.withPullFrequencySec(PULL_FREQUENCY_SEC)
.withStartPollTimeoutSec(START_POLL_TIMEOUT_SEC)
.withStartOffset(START_OFFSET)
.withSparkReceiverBuilder(receiverBuilder);
List<String> expected = new ArrayList<>();
for (int i = 0; i < ByteBufferDataReceiver.RECORDS_COUNT; i++) {
expected.add(String.valueOf(i));
}
PCollection<String> actual = pipeline.apply(reader).setCoder(StringUtf8Coder.of());
PAssert.that(actual).containsInAnyOrder(expected);
pipeline.run().waitUntilFinish(Duration.standardSeconds(15));
}
|
public static DNMappingAddress dnMappingAddress(String dn) {
return new DNMappingAddress(dn);
}
|
@Test
public void testDnMappingAddressMethod() {
String dn = "1";
MappingAddress mappingAddress = MappingAddresses.dnMappingAddress(dn);
DNMappingAddress dnMappingAddress =
checkAndConvert(mappingAddress,
MappingAddress.Type.DN,
DNMappingAddress.class);
assertThat(dnMappingAddress.name(), is(equalTo(dn)));
}
|
@VisibleForTesting
static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) {
return createStreamExecutionEnvironment(
options,
MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()),
options.getFlinkConfDir());
}
|
@Test
public void shouldSetMaxParallelismStreaming() {
FlinkPipelineOptions options = getDefaultPipelineOptions();
options.setRunner(TestFlinkRunner.class);
options.setMaxParallelism(42);
StreamExecutionEnvironment sev =
FlinkExecutionEnvironments.createStreamExecutionEnvironment(options);
assertThat(options.getMaxParallelism(), is(42));
assertThat(sev.getMaxParallelism(), is(42));
}
|
@Override
public Object getValue(final int columnIndex, final Class<?> type) throws SQLException {
if (boolean.class == type) {
return resultSet.getBoolean(columnIndex);
}
if (byte.class == type) {
return resultSet.getByte(columnIndex);
}
if (short.class == type) {
return resultSet.getShort(columnIndex);
}
if (int.class == type) {
return resultSet.getInt(columnIndex);
}
if (long.class == type) {
return resultSet.getLong(columnIndex);
}
if (float.class == type) {
return resultSet.getFloat(columnIndex);
}
if (double.class == type) {
return resultSet.getDouble(columnIndex);
}
if (String.class == type) {
return resultSet.getString(columnIndex);
}
if (BigDecimal.class == type) {
return resultSet.getBigDecimal(columnIndex);
}
if (byte[].class == type) {
return resultSet.getBytes(columnIndex);
}
if (Date.class == type) {
return resultSet.getDate(columnIndex);
}
if (Time.class == type) {
return resultSet.getTime(columnIndex);
}
if (Timestamp.class == type) {
return resultSet.getTimestamp(columnIndex);
}
if (Blob.class == type) {
return resultSet.getBlob(columnIndex);
}
if (Clob.class == type) {
return resultSet.getClob(columnIndex);
}
if (Array.class == type) {
return resultSet.getArray(columnIndex);
}
return resultSet.getObject(columnIndex);
}
|
@Test
void assertGetValueByBigDecimal() throws SQLException {
ResultSet resultSet = mock(ResultSet.class);
when(resultSet.getBigDecimal(1)).thenReturn(new BigDecimal("0"));
assertThat(new JDBCStreamQueryResult(resultSet).getValue(1, BigDecimal.class), is(new BigDecimal("0")));
}
|
public final long getServerId() {
return serverId;
}
|
@Test
public void getServerIdOutputZero() {
// Arrange
final LogHeader objectUnderTest = new LogHeader(0);
// Act
final long actual = objectUnderTest.getServerId();
// Assert result
Assert.assertEquals(0L, actual);
}
|
@Override
public void onCreating(AbstractJob job) {
JobDetails jobDetails = job.getJobDetails();
Optional<Job> jobAnnotation = getJobAnnotation(jobDetails);
setJobName(job, jobAnnotation);
setAmountOfRetries(job, jobAnnotation);
setLabels(job, jobAnnotation);
}
|
@Test
void testDisplayNameWithAnnotationUsingJobParametersAndMDCVariablesThatDoNotExist() {
MDC.put("key-not-used-in-annotation", "1");
Job job = anEnqueuedJob()
.withoutName()
.withJobDetails(jobDetails()
.withClassName(TestService.class)
.withMethodName("doWorkWithAnnotation")
.withJobParameter(5)
.withJobParameter("John Doe"))
.build();
defaultJobFilter.onCreating(job);
assertThat(job.getJobName()).isEqualTo("Doing some hard work for user John Doe (customerId: (customer.id is not found in MDC))");
}
|
public abstract Map<String, String> properties(final Map<String, String> defaultProperties, final long additionalRetentionMs);
|
@Test
public void shouldAugmentRetentionMsWithWindowedChangelog() {
final WindowedChangelogTopicConfig topicConfig = new WindowedChangelogTopicConfig("name", Collections.emptyMap(), 10);
assertEquals("30", topicConfig.properties(Collections.emptyMap(), 20).get(TopicConfig.RETENTION_MS_CONFIG));
}
|
static void populateJavaParserDTOAndSourcesMap(final JavaParserDTO toPopulate,
final Map<String, String> sourcesMap,
final NodeNamesDTO nodeNamesDTO,
final List<Field<?>> fields,
final boolean isRoot) {
// Set 'evaluateNode'
populateEvaluateNode(toPopulate, nodeNamesDTO, fields, isRoot);
// Set the nested nodes
populatedNestedNodes(toPopulate, sourcesMap, fields, nodeNamesDTO);
// merge generated methods in one class
// dump generated sources
sourcesMap.put(toPopulate.fullNodeClassName, toPopulate.getSource());
}
|
@Test
void populateJavaParserDTOAndSourcesMap() {
boolean isRoot = true;
Map<String, String> sourcesMap = new HashMap<>();
KiePMMLNodeFactory.NodeNamesDTO nodeNamesDTO = new KiePMMLNodeFactory.NodeNamesDTO(nodeRoot,
createNodeClassName(), null,
1.0);
KiePMMLNodeFactory.JavaParserDTO toPopulate = new KiePMMLNodeFactory.JavaParserDTO(nodeNamesDTO, PACKAGE_NAME);
KiePMMLNodeFactory.populateJavaParserDTOAndSourcesMap(toPopulate, sourcesMap, nodeNamesDTO,
getFieldsFromDataDictionaryAndDerivedFields(dataDictionary2, derivedFields2),
isRoot);
commonVerifyEvaluateNode(toPopulate, nodeNamesDTO, isRoot);
}
|
public synchronized void reload(long checkpointId) {
this.accCkp += 1;
if (this.accCkp > 1) {
// do not clean the new file assignment state for the first checkpoint,
// this #reload calling is triggered by checkpoint success event, the coordinator
// also relies on the checkpoint success event to commit the inflight instant,
// and very possibly this component receives the notification before the coordinator,
// if we do the cleaning, the records processed within the time range:
// (start checkpoint, checkpoint success(and instant committed))
// would be assigned to a fresh new data bucket which is not the right behavior.
this.newFileAssignStates.clear();
this.accCkp = 0;
}
this.smallFileAssignMap.clear();
this.writeProfile.reload(checkpointId);
}
|
@Test
public void testWriteProfileReload() throws Exception {
WriteProfile writeProfile = new WriteProfile(writeConfig, context);
List<SmallFile> smallFiles1 = writeProfile.getSmallFiles("par1");
assertTrue(smallFiles1.isEmpty(), "Should have no small files");
TestData.writeData(TestData.DATA_SET_INSERT, conf);
String instantOption = getLastCompleteInstant(writeProfile);
assertNull(instantOption);
writeProfile.reload(1);
String instant1 = getLastCompleteInstant(writeProfile);
assertNotNull(instant1);
List<SmallFile> smallFiles2 = writeProfile.getSmallFiles("par1");
assertThat("Should have 1 small file", smallFiles2.size(), is(1));
assertThat("Small file should have same timestamp as last complete instant",
smallFiles2.get(0).location.getInstantTime(), is(instant1));
TestData.writeData(TestData.DATA_SET_INSERT, conf);
List<SmallFile> smallFiles3 = writeProfile.getSmallFiles("par1");
assertThat("Should have 1 small file", smallFiles3.size(), is(1));
assertThat("Non-reloaded write profile has the same base file view as before",
smallFiles3.get(0).location.getInstantTime(), is(instant1));
writeProfile.reload(2);
String instant2 = getLastCompleteInstant(writeProfile);
assertNotEquals(instant2, instant1, "Should have new complete instant");
List<SmallFile> smallFiles4 = writeProfile.getSmallFiles("par1");
assertThat("Should have 1 small file", smallFiles4.size(), is(1));
assertThat("Small file should have same timestamp as last complete instant",
smallFiles4.get(0).location.getInstantTime(), is(instant2));
}
|
@Override
public AppTimeoutsInfo getAppTimeouts(HttpServletRequest hsr, String appId)
throws AuthorizationException {
try {
long startTime = clock.getTime();
DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorByAppId(appId);
AppTimeoutsInfo appTimeoutsInfo = interceptor.getAppTimeouts(hsr, appId);
if (appTimeoutsInfo != null) {
long stopTime = clock.getTime();
RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_APP_TIMEOUTS,
TARGET_WEB_SERVICE);
routerMetrics.succeededGetAppTimeoutsRetrieved((stopTime - startTime));
return appTimeoutsInfo;
}
} catch (IllegalArgumentException e) {
routerMetrics.incrGetAppTimeoutsFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APP_TIMEOUTS,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException(e,
"Unable to get the getAppTimeouts appId: %s.", appId);
} catch (YarnException e) {
routerMetrics.incrGetAppTimeoutsFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APP_TIMEOUTS,
UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage());
RouterServerUtil.logAndThrowRunTimeException("getAppTimeouts error.", e);
}
routerMetrics.incrGetAppTimeoutsFailedRetrieved();
RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APP_TIMEOUTS,
UNKNOWN, TARGET_WEB_SERVICE, "getAppTimeouts Failed.");
throw new RuntimeException("getAppTimeouts Failed.");
}
|
@Test
public void testGetAppTimeouts() throws IOException, InterruptedException {
// Generate ApplicationId information
ApplicationId appId = ApplicationId.newInstance(Time.now(), 1);
ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo();
context.setApplicationId(appId.toString());
// Generate ApplicationAttemptId information
Assert.assertNotNull(interceptor.submitApplication(context, null));
AppTimeoutsInfo appTimeoutsInfo = interceptor.getAppTimeouts(null, appId.toString());
Assert.assertNotNull(appTimeoutsInfo);
List<AppTimeoutInfo> timeouts = appTimeoutsInfo.getAppTimeouts();
Assert.assertNotNull(timeouts);
Assert.assertEquals(1, timeouts.size());
AppTimeoutInfo resultAppTimeout = timeouts.get(0);
Assert.assertNotNull(resultAppTimeout);
Assert.assertEquals(10, resultAppTimeout.getRemainingTimeInSec());
Assert.assertEquals("UNLIMITED", resultAppTimeout.getExpireTime());
Assert.assertEquals(ApplicationTimeoutType.LIFETIME, resultAppTimeout.getTimeoutType());
}
|
@Override
public BaseCombineOperator run() {
try (InvocationScope ignored = Tracing.getTracer().createScope(CombinePlanNode.class)) {
return getCombineOperator();
}
}
|
@Test
public void testPlanNodeThrowException() {
List<PlanNode> planNodes = new ArrayList<>();
for (int i = 0; i < 20; i++) {
planNodes.add(() -> {
throw new RuntimeException("Inner exception message.");
});
}
_queryContext.setEndTimeMs(System.currentTimeMillis() + Server.DEFAULT_QUERY_EXECUTOR_TIMEOUT_MS);
CombinePlanNode combinePlanNode = new CombinePlanNode(planNodes, _queryContext, _executorService, null);
try {
combinePlanNode.run();
} catch (RuntimeException e) {
Assert.assertEquals(e.getCause().getMessage(), "java.lang.RuntimeException: Inner exception message.");
return;
}
// Fail.
Assert.fail();
}
|
@Override public long get(long key) {
return super.get0(key, 0);
}
|
@Test(expected = AssertionError.class)
@RequireAssertEnabled
public void testGet_whenDisposed() {
hsa.dispose();
hsa.get(1);
}
|
private Function<KsqlConfig, Kudf> getUdfFactory(
final Method method,
final UdfDescription udfDescriptionAnnotation,
final String functionName,
final FunctionInvoker invoker,
final String sensorName
) {
return ksqlConfig -> {
final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance(
method.getDeclaringClass(), udfDescriptionAnnotation.name());
if (actualUdf instanceof Configurable) {
ExtensionSecurityManager.INSTANCE.pushInUdf();
try {
((Configurable) actualUdf)
.configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName));
} finally {
ExtensionSecurityManager.INSTANCE.popOutUdf();
}
}
final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf);
return metrics.<Kudf>map(m -> new UdfMetricProducer(
m.getSensor(sensorName),
theUdf,
Time.SYSTEM
)).orElse(theUdf);
};
}
|
@Test
public void shouldCreateUdfFactoryWithInternalPathWhenInternal() {
final UdfFactory substring = FUNC_REG.getUdfFactory(FunctionName.of("substring"));
assertThat(substring.getMetadata().getPath(), equalTo(KsqlScalarFunction.INTERNAL_PATH));
}
|
public static StateRequestHandler delegateBasedUponType(
EnumMap<StateKey.TypeCase, StateRequestHandler> handlers) {
return new StateKeyTypeDelegatingStateRequestHandler(handlers);
}
|
@Test
public void testDelegatingStateHandlerDelegates() throws Exception {
StateRequestHandler mockHandler = Mockito.mock(StateRequestHandler.class);
StateRequestHandler mockHandler2 = Mockito.mock(StateRequestHandler.class);
EnumMap<StateKey.TypeCase, StateRequestHandler> handlers =
new EnumMap<>(StateKey.TypeCase.class);
handlers.put(StateKey.TypeCase.TYPE_NOT_SET, mockHandler);
handlers.put(TypeCase.MULTIMAP_SIDE_INPUT, mockHandler2);
StateRequest request = StateRequest.getDefaultInstance();
StateRequest request2 =
StateRequest.newBuilder()
.setStateKey(
StateKey.newBuilder().setMultimapSideInput(MultimapSideInput.getDefaultInstance()))
.build();
StateRequestHandlers.delegateBasedUponType(handlers).handle(request);
StateRequestHandlers.delegateBasedUponType(handlers).handle(request2);
verify(mockHandler).handle(request);
verify(mockHandler2).handle(request2);
verifyNoMoreInteractions(mockHandler, mockHandler2);
}
|
public JsonNode resolve(JsonNode tree, String path, String refFragmentPathDelimiters) {
return resolve(tree, new ArrayList<>(asList(split(path, refFragmentPathDelimiters))));
}
|
@Test(expected = IllegalArgumentException.class)
public void missingPathThrowsIllegalArgumentException() {
ObjectNode root = new ObjectMapper().createObjectNode();
resolver.resolve(root, "#/a/b/c", "#/.");
}
|
@Override
public void deleteRewardActivity(Long id) {
// 校验存在
RewardActivityDO dbRewardActivity = validateRewardActivityExists(id);
if (!dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.CLOSE.getStatus())) { // 未关闭的活动,不能删除噢
throw exception(REWARD_ACTIVITY_DELETE_FAIL_STATUS_NOT_CLOSED);
}
// 删除
rewardActivityMapper.deleteById(id);
}
|
@Test
public void testDeleteRewardActivity_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> rewardActivityService.deleteRewardActivity(id), REWARD_ACTIVITY_NOT_EXISTS);
}
|
@Override
public ByteBuf asReadOnly() {
return newSharedLeakAwareByteBuf(super.asReadOnly());
}
|
@Test
public void testWrapReadOnly() {
assertWrapped(newBuffer(8).asReadOnly());
}
|
public Connection connection(Connection connection) {
// It is common to implement both interfaces
if (connection instanceof XAConnection) {
return xaConnection((XAConnection) connection);
}
return TracingConnection.create(connection, this);
}
|
@Test void connection_wrapsInput() {
assertThat(jmsTracing.connection(mock(Connection.class)))
.isInstanceOf(TracingConnection.class);
}
|
@Override
public SchemaResult getValueSchema(
final Optional<String> topicName,
final Optional<Integer> schemaId,
final FormatInfo expectedFormat,
final SerdeFeatures serdeFeatures
) {
return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, false);
}
|
@Test
public void shouldReturnSchemaFromGetValueSchemaIfFound() {
// When:
final SchemaResult result = supplier.getValueSchema(Optional.of(TOPIC_NAME),
Optional.empty(), expectedFormat, SerdeFeatures.of());
// Then:
assertThat(result.schemaAndId, is(not(Optional.empty())));
assertThat(result.schemaAndId.get().id, is(SCHEMA_ID));
assertThat(result.schemaAndId.get().columns, is(ImmutableList.of(column1)));
}
|
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException, IOException {
for (Callback callBack : callbacks) {
if (callBack instanceof NameCallback nameCallback) {
// Handles username callback
nameCallback.setName(name);
} else if (callBack instanceof PasswordCallback passwordCallback) {
// Handles password callback
passwordCallback.setPassword(password.toCharArray());
} else {
throw new UnsupportedCallbackException(callBack, "Callback not supported");
}
}
}
|
@Test
public void unsupportedCallback() {
assertThatThrownBy(() -> {
new CallbackHandlerImpl("tester", "secret").handle(new Callback[] {mock(Callback.class)});
})
.isInstanceOf(UnsupportedCallbackException.class);
}
|
public static void generateNodeTableMapping(Set<NodeDetails> nodeDetails,
String filePath) throws IOException {
List<String> entries = new ArrayList<>();
for (NodeDetails nodeDetail : nodeDetails) {
if (nodeDetail.getHostname().contains("/")) {
String hostname = nodeDetail.getHostname();
int lIndex = hostname.lastIndexOf("/");
String node = hostname.substring(lIndex + 1);
String rack = hostname.substring(0, lIndex);
entries.add(node + " " + rack);
}
}
Files.write(Paths.get(filePath),
entries,
StandardCharsets.UTF_8,
StandardOpenOption.CREATE);
}
|
@Test
public void testGenerateNodeTableMapping() throws Exception {
Set<NodeDetails> nodes = SLSUtils.generateNodes(3, 3);
File tempFile = File.createTempFile("testslsutils", ".tmp");
tempFile.deleteOnExit();
String fileName = tempFile.getAbsolutePath();
SLSUtils.generateNodeTableMapping(nodes, fileName);
List<String> lines = Files.readAllLines(Paths.get(fileName));
Assert.assertEquals(3, lines.size());
for (String line : lines) {
Assert.assertTrue(line.contains("node"));
Assert.assertTrue(line.contains("/rack"));
}
}
|
@SuppressWarnings("unchecked")
public Future<Void> executeRunnable(final Runnable r) {
return (Future<Void>) executor.submit(r::run);
}
|
@Test
public void testRunnableFails() throws Exception {
ExecutorServiceFuturePool futurePool =
new ExecutorServiceFuturePool(executorService);
Future<Void> future = futurePool.executeRunnable(() -> {
throw new IllegalStateException("deliberate");
});
interceptFuture(IllegalStateException.class, "deliberate", 30,
TimeUnit.SECONDS, future);
}
|
@Override
public ProtobufSystemInfo.Section toProtobuf() {
ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder();
protobuf.setName("Search State");
try {
setAttribute(protobuf, "State", getStateAsEnum().name());
completeNodeAttributes(protobuf);
} catch (Exception es) {
LoggerFactory.getLogger(EsStateSection.class).warn("Failed to retrieve ES attributes. There will be only a single \"state\" attribute.", es);
setAttribute(protobuf, "State", es.getCause() instanceof ElasticsearchException ? es.getCause().getMessage() : es.getMessage());
}
return protobuf.build();
}
|
@Test
public void attributes_displays_exception_message_when_cause_null_when_client_fails() {
EsClient esClientMock = mock(EsClient.class);
EsStateSection underTest = new EsStateSection(esClientMock);
when(esClientMock.clusterHealth(any())).thenThrow(new RuntimeException("RuntimeException with no cause"));
ProtobufSystemInfo.Section section = underTest.toProtobuf();
assertThatAttributeIs(section, "State", "RuntimeException with no cause");
}
|
public static ShowResultSet execute(ShowStmt statement, ConnectContext context) {
return GlobalStateMgr.getCurrentState().getShowExecutor().showExecutorVisitor.visit(statement, context);
}
|
@Test
public void testShowCreateExternalCatalogWithMask() throws AnalysisException, DdlException {
// More mask logic please write in CredentialUtilTest
new MockUp<CatalogMgr>() {
@Mock
public Catalog getCatalogByName(String name) {
Map<String, String> properties = new HashMap<>();
properties.put("hive.metastore.uris", "thrift://hadoop:9083");
properties.put("type", "hive");
properties.put("aws.s3.access_key", "iam_user_access_key");
properties.put("aws.s3.secret_key", "iam_user_secret_key");
return new Catalog(1, "test_hive", properties, "hive_test");
}
};
ShowCreateExternalCatalogStmt stmt = new ShowCreateExternalCatalogStmt("test_hive");
ShowResultSet resultSet = ShowExecutor.execute(stmt, ctx);
Assert.assertEquals("test_hive", resultSet.getResultRows().get(0).get(0));
Assert.assertEquals("CREATE EXTERNAL CATALOG `test_hive`\n" +
"comment \"hive_test\"\n" +
"PROPERTIES (\"aws.s3.access_key\" = \"ia******ey\",\n" +
"\"aws.s3.secret_key\" = \"ia******ey\",\n" +
"\"hive.metastore.uris\" = \"thrift://hadoop:9083\",\n" +
"\"type\" = \"hive\"\n" +
")", resultSet.getResultRows().get(0).get(1));
}
|
@Override
public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) {
if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) {
return resolveRequestConfig(propertyName);
} else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX)
&& !propertyName.startsWith(KSQL_STREAMS_PREFIX)) {
return resolveKsqlConfig(propertyName);
}
return resolveStreamsConfig(propertyName, strict);
}
|
@Test
public void shouldFindUnknownProducerPropertyIfNotStrict() {
// Given:
final String configName = StreamsConfig.PRODUCER_PREFIX
+ "custom.interceptor.config";
// Then:
assertThat(resolver.resolve(configName, false), is(unresolvedItem(configName)));
}
|
void restoreBatch(final Collection<ConsumerRecord<byte[], byte[]>> records) {
// compute the observed stream time at the end of the restore batch, in order to speed up
// restore by not bothering to read from/write to segments which will have expired by the
// time the restoration process is complete.
long endOfBatchStreamTime = observedStreamTime;
for (final ConsumerRecord<byte[], byte[]> record : records) {
endOfBatchStreamTime = Math.max(endOfBatchStreamTime, record.timestamp());
}
final VersionedStoreClient<?> restoreClient = restoreWriteBuffer.getClient();
// note: there is increased risk for hitting an out-of-memory during this restore loop,
// compared to for non-versioned key-value stores, because this versioned store
// implementation stores multiple records (for the same key) together in a single RocksDB
// "segment" entry -- restoring a single changelog entry could require loading multiple
// records into memory. how high this memory amplification will be is very much dependent
// on the specific workload and the value of the "segment interval" parameter.
synchronized (position) {
for (final ConsumerRecord<byte[], byte[]> record : records) {
if (record.timestamp() < observedStreamTime - gracePeriod) {
// record is older than grace period and was therefore never written to the store
continue;
}
// advance observed stream time as usual, for use in deciding whether records have
// exceeded the store's grace period and should be dropped.
observedStreamTime = Math.max(observedStreamTime, record.timestamp());
ChangelogRecordDeserializationHelper.applyChecksAndUpdatePosition(
record,
consistencyEnabled,
position
);
// put records to write buffer
doPut(
restoreClient,
endOfBatchStreamTime,
new Bytes(record.key()),
record.value(),
record.timestamp()
);
}
try {
restoreWriteBuffer.flush();
} catch (final RocksDBException e) {
throw new ProcessorStateException("Error restoring batch to store " + name, e);
}
}
}
|
@Test
public void shouldNotRestoreExpired() {
final List<DataRecord> records = new ArrayList<>();
records.add(new DataRecord("k", "v", HISTORY_RETENTION + 10));
records.add(new DataRecord("k1", "v1", HISTORY_RETENTION + 10 - GRACE_PERIOD)); // grace period has not elapsed
records.add(new DataRecord("k2", "v2", HISTORY_RETENTION + 9 - GRACE_PERIOD)); // grace period has elapsed, so this record should not be restored
store.restoreBatch(getChangelogRecords(records));
verifyGetValueFromStore("k", "v", HISTORY_RETENTION + 10);
verifyGetValueFromStore("k1", "v1", HISTORY_RETENTION + 10 - GRACE_PERIOD);
verifyGetNullFromStore("k2");
verifyExpiredRecordSensor(0);
}
|
public static ResolvedSchema removeTimeAttributeFromResolvedSchema(
ResolvedSchema resolvedSchema) {
return new ResolvedSchema(
resolvedSchema.getColumns().stream()
.map(col -> col.copy(DataTypeUtils.removeTimeAttribute(col.getDataType())))
.collect(Collectors.toList()),
resolvedSchema.getWatermarkSpecs(),
resolvedSchema.getPrimaryKey().orElse(null));
}
|
@Test
void testRemoveTimeAttribute() {
DataType rowTimeType =
DataTypeUtils.replaceLogicalType(
DataTypes.TIMESTAMP(3), new TimestampType(true, TimestampKind.ROWTIME, 3));
ResolvedSchema schema =
new ResolvedSchema(
Arrays.asList(
Column.physical("id", DataTypes.INT().notNull()),
Column.physical("t", rowTimeType),
Column.computed(
"date",
ResolvedExpressionMock.of(DataTypes.DATE(), "TO_DATE(t)")),
Column.metadata("metadata-1", DataTypes.INT(), "metadata", false)),
Collections.singletonList(
WatermarkSpec.of("t", ResolvedExpressionMock.of(rowTimeType, "t"))),
UniqueConstraint.primaryKey("test-pk", Collections.singletonList("id")));
assertThat(TableSchemaUtils.removeTimeAttributeFromResolvedSchema(schema))
.isEqualTo(
new ResolvedSchema(
Arrays.asList(
Column.physical("id", DataTypes.INT().notNull()),
Column.physical("t", DataTypes.TIMESTAMP(3)),
Column.computed(
"date",
new ResolvedExpressionMock(
DataTypes.DATE(), () -> "TO_DATE(t)")),
Column.metadata(
"metadata-1", DataTypes.INT(), "metadata", false)),
Collections.singletonList(
WatermarkSpec.of(
"t", ResolvedExpressionMock.of(rowTimeType, "t"))),
UniqueConstraint.primaryKey(
"test-pk", Collections.singletonList("id"))));
}
|
@Deprecated
@NonNull
public static WriteRequest newWriteRequest(
@Nullable final BluetoothGattCharacteristic characteristic,
@Nullable final byte[] value) {
return new WriteRequest(Type.WRITE, characteristic, value, 0,
value != null ? value.length : 0,
characteristic != null ?
characteristic.getWriteType() :
BluetoothGattCharacteristic.WRITE_TYPE_DEFAULT);
}
|
@Test
public void split_basic() {
final WriteRequest request = Request.newWriteRequest(characteristic, text.getBytes(), BluetoothGattCharacteristic.WRITE_TYPE_DEFAULT)
.split();
chunk = request.getData(MTU);
// Verify the chunk
assertNotNull(chunk);
assertEquals(MTU - 3, chunk.length);
final String expected = text.substring(0, MTU - 3);
assertArrayEquals(expected.getBytes(), chunk);
}
|
void handleSegmentWithCopySegmentFinishedState(Long startOffset, RemoteLogSegmentId remoteLogSegmentId,
Long leaderEpochEndOffset) {
// If there are duplicate segments uploaded due to leader-election, then mark them as unreferenced.
// Duplicate segments can be uploaded when the previous leader had tier-lags and the next leader uploads the
// segment for the same leader-epoch which is a super-set of previously uploaded segments.
// (eg)
// case-1: Duplicate segment
// L0 uploaded segment S0 with offsets 0-100 and L1 uploaded segment S1 with offsets 0-200.
// We will mark the segment S0 as duplicate and add it to unreferencedSegmentIds.
// case-2: Overlapping segments
// L0 uploaded segment S0 with offsets 10-90 and L1 uploaded segment S1 with offsets 5-100, S2-101-200,
// and so on. When the consumer request for segment with offset 95, it should get the segment S1 and not S0.
Map.Entry<Long, RemoteLogSegmentId> lastEntry = offsetToId.lastEntry();
while (lastEntry != null && lastEntry.getKey() >= startOffset && highestLogOffset <= leaderEpochEndOffset) {
offsetToId.remove(lastEntry.getKey());
unreferencedSegmentIds.add(lastEntry.getValue());
lastEntry = offsetToId.lastEntry();
}
// Add the segment epochs mapping as the segment is copied successfully.
offsetToId.put(startOffset, remoteLogSegmentId);
// Remove the metadata from unreferenced entries as it is successfully copied and added to the offset mapping.
unreferencedSegmentIds.remove(remoteLogSegmentId);
// Update the highest offset entry for this leader epoch as we added a new mapping.
if (highestLogOffset == null || leaderEpochEndOffset > highestLogOffset) {
highestLogOffset = leaderEpochEndOffset;
}
}
|
@Test
void handleSegmentWithCopySegmentFinishedState() {
RemoteLogSegmentId segmentId1 = new RemoteLogSegmentId(tpId, Uuid.randomUuid());
RemoteLogSegmentId segmentId2 = new RemoteLogSegmentId(tpId, Uuid.randomUuid());
epochState.handleSegmentWithCopySegmentFinishedState(10L, segmentId1, 100L);
epochState.handleSegmentWithCopySegmentFinishedState(101L, segmentId2, 200L);
assertEquals(2, epochState.referencedSegmentIds().size());
assertEquals(segmentId1, epochState.floorEntry(90L));
assertEquals(segmentId2, epochState.floorEntry(150L));
assertTrue(epochState.unreferencedSegmentIds().isEmpty());
assertEquals(200L, epochState.highestLogOffset());
}
|
@Override
public List<Node> sniff(List<Node> nodes) {
if (attribute == null || value == null) {
return nodes;
}
return nodes.stream()
.filter(node -> nodeMatchesFilter(node, attribute, value))
.collect(Collectors.toList());
}
|
@Test
void worksWithEmptyNodesListIfFilterIsSet() throws Exception {
final List<Node> nodes = Collections.emptyList();
final NodesSniffer nodesSniffer = new FilteredOpenSearchNodesSniffer("rack", "42");
assertThat(nodesSniffer.sniff(nodes)).isEqualTo(nodes);
}
|
@Override
public UpdateFeaturesResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return UpdateFeaturesResponse.createWithErrors(
ApiError.fromThrowable(e),
Collections.emptyMap(),
throttleTimeMs
);
}
|
@Test
public void testGetErrorResponse() {
UpdateFeaturesRequestData.FeatureUpdateKeyCollection features =
new UpdateFeaturesRequestData.FeatureUpdateKeyCollection();
features.add(new UpdateFeaturesRequestData.FeatureUpdateKey()
.setFeature("foo")
.setMaxVersionLevel((short) 2)
);
features.add(new UpdateFeaturesRequestData.FeatureUpdateKey()
.setFeature("bar")
.setMaxVersionLevel((short) 3)
);
UpdateFeaturesRequest request = new UpdateFeaturesRequest(
new UpdateFeaturesRequestData().setFeatureUpdates(features),
UpdateFeaturesRequestData.HIGHEST_SUPPORTED_VERSION
);
UpdateFeaturesResponse response = request.getErrorResponse(0, new UnknownServerException());
assertEquals(Errors.UNKNOWN_SERVER_ERROR, response.topLevelError().error());
assertEquals(0, response.data().results().size());
assertEquals(Collections.singletonMap(Errors.UNKNOWN_SERVER_ERROR, 1), response.errorCounts());
}
|
@Override
public RestLiResponseData<CreateResponseEnvelope> buildRestLiResponseData(Request request,
RoutingResult routingResult,
Object result,
Map<String, String> headers,
List<HttpCookie> cookies)
{
CreateResponse createResponse = (CreateResponse) result;
boolean isGetAfterCreate = createResponse instanceof CreateKVResponse;
if (createResponse.hasError())
{
RestLiServiceException exception = createResponse.getError();
return new RestLiResponseDataImpl<>(new CreateResponseEnvelope(exception, isGetAfterCreate), headers, cookies);
}
Object id = null;
if (createResponse.hasId())
{
id = ResponseUtils.translateCanonicalKeyToAlternativeKeyIfNeeded(createResponse.getId(), routingResult);
final ProtocolVersion protocolVersion = routingResult.getContext().getRestliProtocolVersion();
String stringKey = URIParamUtils.encodeKeyForUri(id, UriComponent.Type.PATH_SEGMENT, protocolVersion);
UriBuilder uribuilder = UriBuilder.fromUri(request.getURI());
uribuilder.path(stringKey);
uribuilder.replaceQuery(null);
if (routingResult.getContext().hasParameter(RestConstants.ALT_KEY_PARAM))
{
// add altkey param to location URI
uribuilder.queryParam(RestConstants.ALT_KEY_PARAM, routingResult.getContext().getParameter(RestConstants.ALT_KEY_PARAM));
}
headers.put(RestConstants.HEADER_LOCATION, uribuilder.build((Object) null).toString());
headers.put(HeaderUtil.getIdHeaderName(protocolVersion), URIParamUtils.encodeKeyForHeader(id, protocolVersion));
}
// Verify that a null status was not passed into the CreateResponse. If so, this is a developer error.
if (createResponse.getStatus() == null)
{
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Unexpected null encountered. HttpStatus is null inside of a CreateResponse from the resource method: "
+ routingResult.getResourceMethod());
}
final ResourceContext resourceContext = routingResult.getContext();
RecordTemplate idResponse;
if (createResponse instanceof CreateKVResponse && resourceContext.isReturnEntityRequested())
{
RecordTemplate entity = ((CreateKVResponse<?, ?>) createResponse).getEntity();
// Verify that a null entity was not passed into the CreateKVResponse. If so, this is a developer error.
if (entity == null)
{
throw new RestLiServiceException(HttpStatus.S_500_INTERNAL_SERVER_ERROR,
"Unexpected null encountered. Entity is null inside of a CreateKVResponse when the entity should be returned. In resource method: " + routingResult.getResourceMethod());
}
DataMap entityData = entity.data();
TimingContextUtil.beginTiming(resourceContext.getRawRequestContext(),
FrameworkTimingKeys.SERVER_RESPONSE_RESTLI_PROJECTION_APPLY.key());
final DataMap data = RestUtils.projectFields(entityData, resourceContext);
TimingContextUtil.endTiming(resourceContext.getRawRequestContext(),
FrameworkTimingKeys.SERVER_RESPONSE_RESTLI_PROJECTION_APPLY.key());
idResponse = new AnyRecord(data);
// Ideally, we should set an IdEntityResponse to the envelope. But we are keeping AnyRecord
// to make sure the runtime object is backwards compatible.
// idResponse = new IdEntityResponse<>(id, new AnyRecord(data));
}
else //Instance of idResponse
{
idResponse = new IdResponse<>(id);
}
return new RestLiResponseDataImpl<>(new CreateResponseEnvelope(createResponse.getStatus(), idResponse, isGetAfterCreate), headers, cookies);
}
|
@Test(dataProvider = "returnEntityData")
public void testReturnEntityInBuildRestLiResponseData(CreateResponse createResponse, boolean isReturnEntityRequested, boolean expectEntityReturned) throws URISyntaxException
{
ServerResourceContext mockContext = EasyMock.createMock(ServerResourceContext.class);
EasyMock.expect(mockContext.isReturnEntityRequested()).andReturn(isReturnEntityRequested);
EasyMock.expect(mockContext.getProjectionMask()).andReturn(null);
EasyMock.expect(mockContext.getProjectionMode()).andReturn(ProjectionMode.AUTOMATIC);
EasyMock.expect(mockContext.getRawRequestContext()).andReturn(new RequestContext()).anyTimes();
EasyMock.expect(mockContext.getAlwaysProjectedFields()).andReturn(Collections.emptySet()).anyTimes();
EasyMock.replay(mockContext);
RoutingResult routingResult = new RoutingResult(mockContext, null);
CreateResponseBuilder responseBuilder = new CreateResponseBuilder();
RestLiResponseData<CreateResponseEnvelope> envelope = responseBuilder.buildRestLiResponseData(new RestRequestBuilder(new URI("/foo")).build(),
routingResult,
createResponse,
Collections.emptyMap(),
Collections.emptyList());
RecordTemplate record = envelope.getResponseEnvelope().getRecord();
if (expectEntityReturned)
{
Assert.assertTrue(record instanceof AnyRecord, "Entity in response envelope should be of type AnyRecord.");
Assert.assertEquals(record, ((CreateKVResponse) createResponse).getEntity(), "Entity in response envelope should match the original.");
Assert.assertTrue(envelope.getResponseEnvelope().isGetAfterCreate(), "Response envelope should be get after create.");
}
else
{
Assert.assertTrue(record instanceof IdResponse, "Entity in response envelope should be of type IdResponse.");
Assert.assertNull(((IdResponse) record).getId(), "IdResponse in response envelope should have same ID.");
}
}
|
@Override
public Size.Output run(RunContext runContext) throws Exception {
StorageInterface storageInterface = ((DefaultRunContext)runContext).getApplicationContext().getBean(StorageInterface.class);
URI render = URI.create(runContext.render(this.uri));
Long size = storageInterface.getAttributes(runContext.tenantId(), render).getSize();
return Output.builder()
.size(size)
.build();
}
|
@Test
void run() throws Exception {
RunContext runContext = runContextFactory.of();
final Long size = 42L;
byte[] randomBytes = new byte[size.intValue()];
new Random().nextBytes(randomBytes);
URI put = storageInterface.put(
null,
new URI("/file/storage/get.yml"),
new ByteArrayInputStream(randomBytes)
);
Size bash = Size.builder()
.uri(put.toString())
.build();
Size.Output run = bash.run(runContext);
assertThat(run.getSize(), is(size));
}
|
public List<String> getUuids() {
return this.stream().map(EnvironmentAgentConfig::getUuid).collect(toList());
}
|
@Test
void shouldGetAllAgentUUIDs(){
EnvironmentAgentConfig envAgentConfig1 = new EnvironmentAgentConfig("uuid1");
EnvironmentAgentConfig envAgentConfig2 = new EnvironmentAgentConfig("uuid2");
EnvironmentAgentConfig envAgentConfig3 = new EnvironmentAgentConfig("uuid3");
envAgentsConfig.addAll(List.of(envAgentConfig1, envAgentConfig2, envAgentConfig3));
List<String> uuids = envAgentsConfig.getUuids();
assertThat(uuids.size(), is(3));
assertThat(uuids.containsAll(List.of("uuid1", "uuid2", "uuid3")), is(true));
}
|
@Override
public void pluginUnLoaded(GoPluginDescriptor pluginDescriptor) {
if (notificationExtension.canHandlePlugin(pluginDescriptor.id())) {
notificationPluginRegistry.deregisterPlugin(pluginDescriptor.id());
notificationPluginRegistry.removePluginInterests(pluginDescriptor.id());
}
}
|
@Test
public void shouldUnregisterPluginOnPluginUnLoad() {
NotificationPluginRegistrar notificationPluginRegistrar = new NotificationPluginRegistrar(pluginManager, notificationExtension, notificationPluginRegistry);
notificationPluginRegistrar.pluginUnLoaded(GoPluginDescriptor.builder().id(PLUGIN_ID_1).isBundledPlugin(true).build());
verify(notificationPluginRegistry).deregisterPlugin(PLUGIN_ID_1);
}
|
@Override
public void start() {
if (realm != null) {
try {
LOG.info("Security realm: {}", realm.getName());
realm.init();
LOG.info("Security realm started");
} catch (RuntimeException e) {
if (ignoreStartupFailure) {
LOG.error("IGNORED - Security realm fails to start: {}", e.getMessage());
} else {
throw new SonarException("Security realm fails to start: " + e.getMessage(), e);
}
}
}
}
|
@Test
public void should_fail() {
SecurityRealm realm = spy(new AlwaysFailsRealm());
settings.setProperty("sonar.security.realm", realm.getName());
try {
new SecurityRealmFactory(settings.asConfig(), new SecurityRealm[] {realm}).start();
fail();
} catch (SonarException e) {
assertThat(e.getCause()).isInstanceOf(IllegalStateException.class);
assertThat(e.getMessage()).contains("Security realm fails to start");
}
}
|
@Override
public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) {
String taskType = MergeRollupTask.TASK_TYPE;
List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>();
for (TableConfig tableConfig : tableConfigs) {
if (!validate(tableConfig, taskType)) {
continue;
}
String tableNameWithType = tableConfig.getTableName();
LOGGER.info("Start generating task configs for table: {} for task: {}", tableNameWithType, taskType);
// Get all segment metadata
List<SegmentZKMetadata> allSegments = getSegmentsZKMetadataForTable(tableNameWithType);
// Filter segments based on status
List<SegmentZKMetadata> preSelectedSegmentsBasedOnStatus
= filterSegmentsBasedOnStatus(tableConfig.getTableType(), allSegments);
// Select current segment snapshot based on lineage, filter out empty segments
SegmentLineage segmentLineage = _clusterInfoAccessor.getSegmentLineage(tableNameWithType);
Set<String> preSelectedSegmentsBasedOnLineage = new HashSet<>();
for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) {
preSelectedSegmentsBasedOnLineage.add(segment.getSegmentName());
}
SegmentLineageUtils.filterSegmentsBasedOnLineageInPlace(preSelectedSegmentsBasedOnLineage, segmentLineage);
List<SegmentZKMetadata> preSelectedSegments = new ArrayList<>();
for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) {
if (preSelectedSegmentsBasedOnLineage.contains(segment.getSegmentName()) && segment.getTotalDocs() > 0
&& MergeTaskUtils.allowMerge(segment)) {
preSelectedSegments.add(segment);
}
}
if (preSelectedSegments.isEmpty()) {
// Reset the watermark time if no segment found. This covers the case where the table is newly created or
// all segments for the existing table got deleted.
resetDelayMetrics(tableNameWithType);
LOGGER.info("Skip generating task: {} for table: {}, no segment is found.", taskType, tableNameWithType);
continue;
}
// Sort segments based on startTimeMs, endTimeMs and segmentName in ascending order
preSelectedSegments.sort((a, b) -> {
long aStartTime = a.getStartTimeMs();
long bStartTime = b.getStartTimeMs();
if (aStartTime != bStartTime) {
return Long.compare(aStartTime, bStartTime);
}
long aEndTime = a.getEndTimeMs();
long bEndTime = b.getEndTimeMs();
return aEndTime != bEndTime ? Long.compare(aEndTime, bEndTime)
: a.getSegmentName().compareTo(b.getSegmentName());
});
// Sort merge levels based on bucket time period
Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType);
Map<String, Map<String, String>> mergeLevelToConfigs = MergeRollupTaskUtils.getLevelToConfigMap(taskConfigs);
List<Map.Entry<String, Map<String, String>>> sortedMergeLevelConfigs =
new ArrayList<>(mergeLevelToConfigs.entrySet());
sortedMergeLevelConfigs.sort(Comparator.comparingLong(
e -> TimeUtils.convertPeriodToMillis(e.getValue().get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY))));
// Get incomplete merge levels
Set<String> inCompleteMergeLevels = new HashSet<>();
for (Map.Entry<String, TaskState> entry : TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType,
_clusterInfoAccessor).entrySet()) {
for (PinotTaskConfig taskConfig : _clusterInfoAccessor.getTaskConfigs(entry.getKey())) {
inCompleteMergeLevels.add(taskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY));
}
}
// Get scheduling mode which is "processFromWatermark" by default. If "processAll" mode is enabled, there will be
// no watermark, and each round we pick the buckets in chronological order which have unmerged segments.
boolean processAll = MergeTask.PROCESS_ALL_MODE.equalsIgnoreCase(taskConfigs.get(MergeTask.MODE));
ZNRecord mergeRollupTaskZNRecord = _clusterInfoAccessor
.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE, tableNameWithType);
int expectedVersion = mergeRollupTaskZNRecord != null ? mergeRollupTaskZNRecord.getVersion() : -1;
MergeRollupTaskMetadata mergeRollupTaskMetadata =
mergeRollupTaskZNRecord != null ? MergeRollupTaskMetadata.fromZNRecord(mergeRollupTaskZNRecord)
: new MergeRollupTaskMetadata(tableNameWithType, new TreeMap<>());
List<PinotTaskConfig> pinotTaskConfigsForTable = new ArrayList<>();
// Schedule tasks from lowest to highest merge level (e.g. Hourly -> Daily -> Monthly -> Yearly)
String mergeLevel = null;
for (Map.Entry<String, Map<String, String>> mergeLevelConfig : sortedMergeLevelConfigs) {
String lowerMergeLevel = mergeLevel;
mergeLevel = mergeLevelConfig.getKey();
Map<String, String> mergeConfigs = mergeLevelConfig.getValue();
// Skip scheduling if there's incomplete task for current mergeLevel
if (inCompleteMergeLevels.contains(mergeLevel)) {
LOGGER.info("Found incomplete task of merge level: {} for the same table: {}, Skipping task generation: {}",
mergeLevel, tableNameWithType, taskType);
continue;
}
// Get the bucket size, buffer size and maximum number of parallel buckets (by default 1)
String bucketPeriod = mergeConfigs.get(MergeTask.BUCKET_TIME_PERIOD_KEY);
long bucketMs = TimeUtils.convertPeriodToMillis(bucketPeriod);
if (bucketMs <= 0) {
LOGGER.error("Bucket time period: {} (table : {}, mergeLevel : {}) must be larger than 0", bucketPeriod,
tableNameWithType, mergeLevel);
continue;
}
String bufferPeriod = mergeConfigs.get(MergeTask.BUFFER_TIME_PERIOD_KEY);
long bufferMs = TimeUtils.convertPeriodToMillis(bufferPeriod);
if (bufferMs < 0) {
LOGGER.error("Buffer time period: {} (table : {}, mergeLevel : {}) must be larger or equal to 0",
bufferPeriod, tableNameWithType, mergeLevel);
continue;
}
String maxNumParallelBucketsStr = mergeConfigs.get(MergeTask.MAX_NUM_PARALLEL_BUCKETS);
int maxNumParallelBuckets = maxNumParallelBucketsStr != null ? Integer.parseInt(maxNumParallelBucketsStr)
: DEFAULT_NUM_PARALLEL_BUCKETS;
if (maxNumParallelBuckets <= 0) {
LOGGER.error("Maximum number of parallel buckets: {} (table : {}, mergeLevel : {}) must be larger than 0",
maxNumParallelBuckets, tableNameWithType, mergeLevel);
continue;
}
// Get bucket start/end time
long preSelectedSegStartTimeMs = preSelectedSegments.get(0).getStartTimeMs();
long bucketStartMs = preSelectedSegStartTimeMs / bucketMs * bucketMs;
long watermarkMs = 0;
if (!processAll) {
// Get watermark from MergeRollupTaskMetadata ZNode
// bucketStartMs = watermarkMs
// bucketEndMs = bucketStartMs + bucketMs
watermarkMs = getWatermarkMs(preSelectedSegStartTimeMs, bucketMs, mergeLevel,
mergeRollupTaskMetadata);
bucketStartMs = watermarkMs;
}
long bucketEndMs = bucketStartMs + bucketMs;
if (lowerMergeLevel == null) {
long lowestLevelMaxValidBucketEndTimeMs = Long.MIN_VALUE;
for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) {
// Compute lowestLevelMaxValidBucketEndTimeMs among segments that are ready for merge
long currentValidBucketEndTimeMs =
getValidBucketEndTimeMsForSegment(preSelectedSegment, bucketMs, bufferMs);
lowestLevelMaxValidBucketEndTimeMs =
Math.max(lowestLevelMaxValidBucketEndTimeMs, currentValidBucketEndTimeMs);
}
_tableLowestLevelMaxValidBucketEndTimeMs.put(tableNameWithType, lowestLevelMaxValidBucketEndTimeMs);
}
// Create metrics even if there's no task scheduled, this helps the case that the controller is restarted
// but the metrics are not available until the controller schedules a valid task
List<String> sortedMergeLevels =
sortedMergeLevelConfigs.stream().map(e -> e.getKey()).collect(Collectors.toList());
if (processAll) {
createOrUpdateNumBucketsToProcessMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, bufferMs, bucketMs,
preSelectedSegments, sortedMergeLevels);
} else {
createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, null, watermarkMs, bufferMs, bucketMs);
}
if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) {
LOGGER.info("Bucket with start: {} and end: {} (table : {}, mergeLevel : {}, mode : {}) cannot be merged yet",
bucketStartMs, bucketEndMs, tableNameWithType, mergeLevel, processAll ? MergeTask.PROCESS_ALL_MODE
: MergeTask.PROCESS_FROM_WATERMARK_MODE);
continue;
}
// Find overlapping segments for each bucket, skip the buckets that has all segments merged
List<List<SegmentZKMetadata>> selectedSegmentsForAllBuckets = new ArrayList<>(maxNumParallelBuckets);
List<SegmentZKMetadata> selectedSegmentsForBucket = new ArrayList<>();
boolean hasUnmergedSegments = false;
boolean hasSpilledOverData = false;
boolean areAllSegmentsReadyToMerge = true;
// The for loop terminates in following cases:
// 1. Found buckets with unmerged segments:
// For each bucket find all segments overlapping with the target bucket, skip the bucket if all overlapping
// segments are merged. Schedule k (numParallelBuckets) buckets at most, and stops at the first bucket that
// contains spilled over data.
// One may wonder how a segment with records spanning different buckets is handled. The short answer is that
// it will be cut into multiple segments, each for a separate bucket. This is achieved by setting bucket time
// period as PARTITION_BUCKET_TIME_PERIOD when generating PinotTaskConfigs
// 2. There's no bucket with unmerged segments, skip scheduling
for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) {
long startTimeMs = preSelectedSegment.getStartTimeMs();
if (startTimeMs < bucketEndMs) {
long endTimeMs = preSelectedSegment.getEndTimeMs();
if (endTimeMs >= bucketStartMs) {
// For segments overlapping with current bucket, add to the result list
if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) {
hasUnmergedSegments = true;
}
if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) {
areAllSegmentsReadyToMerge = false;
}
if (hasSpilledOverData(preSelectedSegment, bucketMs)) {
hasSpilledOverData = true;
}
selectedSegmentsForBucket.add(preSelectedSegment);
}
// endTimeMs < bucketStartMs
// Haven't find the first overlapping segment, continue to the next segment
} else {
// Has gone through all overlapping segments for current bucket
if (hasUnmergedSegments && areAllSegmentsReadyToMerge) {
// Add the bucket if there are unmerged segments
selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket);
}
if (selectedSegmentsForAllBuckets.size() == maxNumParallelBuckets || hasSpilledOverData) {
// If there are enough buckets or found spilled over data, schedule merge tasks
break;
} else {
// Start with a new bucket
// TODO: If there are many small merged segments, we should merge them again
selectedSegmentsForBucket = new ArrayList<>();
hasUnmergedSegments = false;
areAllSegmentsReadyToMerge = true;
bucketStartMs = (startTimeMs / bucketMs) * bucketMs;
bucketEndMs = bucketStartMs + bucketMs;
if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) {
break;
}
if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) {
hasUnmergedSegments = true;
}
if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) {
areAllSegmentsReadyToMerge = false;
}
if (hasSpilledOverData(preSelectedSegment, bucketMs)) {
hasSpilledOverData = true;
}
selectedSegmentsForBucket.add(preSelectedSegment);
}
}
}
// Add the last bucket if it contains unmerged segments and is not added before
if (hasUnmergedSegments && areAllSegmentsReadyToMerge && (selectedSegmentsForAllBuckets.isEmpty() || (
selectedSegmentsForAllBuckets.get(selectedSegmentsForAllBuckets.size() - 1)
!= selectedSegmentsForBucket))) {
selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket);
}
if (selectedSegmentsForAllBuckets.isEmpty()) {
LOGGER.info("No unmerged segment found for table: {}, mergeLevel: {}", tableNameWithType, mergeLevel);
continue;
}
// Bump up watermark to the earliest start time of selected segments truncated to the closest bucket boundary
long newWatermarkMs = selectedSegmentsForAllBuckets.get(0).get(0).getStartTimeMs() / bucketMs * bucketMs;
mergeRollupTaskMetadata.getWatermarkMap().put(mergeLevel, newWatermarkMs);
LOGGER.info("Update watermark for table: {}, mergeLevel: {} from: {} to: {}", tableNameWithType, mergeLevel,
watermarkMs, newWatermarkMs);
// Update the delay metrics
if (!processAll) {
createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, newWatermarkMs, bufferMs,
bucketMs);
}
// Create task configs
int maxNumRecordsPerTask =
mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY) != null ? Integer.parseInt(
mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY)) : DEFAULT_MAX_NUM_RECORDS_PER_TASK;
SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig();
if (segmentPartitionConfig == null) {
for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) {
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(selectedSegmentsPerBucket, tableConfig, maxNumRecordsPerTask, mergeLevel,
null, mergeConfigs, taskConfigs));
}
} else {
// For partitioned table, schedule separate tasks for each partitionId (partitionId is constructed from
// partitions of all partition columns. There should be exact match between partition columns of segment and
// partition columns of table configuration, and there is only partition per column in segment metadata).
// Other segments which do not meet these conditions are considered as outlier segments, and additional tasks
// are generated for them.
Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap();
List<String> partitionColumns = new ArrayList<>(columnPartitionMap.keySet());
for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) {
Map<List<Integer>, List<SegmentZKMetadata>> partitionToSegments = new HashMap<>();
List<SegmentZKMetadata> outlierSegments = new ArrayList<>();
for (SegmentZKMetadata selectedSegment : selectedSegmentsPerBucket) {
SegmentPartitionMetadata segmentPartitionMetadata = selectedSegment.getPartitionMetadata();
List<Integer> partitions = new ArrayList<>();
if (segmentPartitionMetadata != null && columnPartitionMap.keySet()
.equals(segmentPartitionMetadata.getColumnPartitionMap().keySet())) {
for (String partitionColumn : partitionColumns) {
if (segmentPartitionMetadata.getPartitions(partitionColumn).size() == 1) {
partitions.add(segmentPartitionMetadata.getPartitions(partitionColumn).iterator().next());
} else {
partitions.clear();
break;
}
}
}
if (partitions.isEmpty()) {
outlierSegments.add(selectedSegment);
} else {
partitionToSegments.computeIfAbsent(partitions, k -> new ArrayList<>()).add(selectedSegment);
}
}
for (Map.Entry<List<Integer>, List<SegmentZKMetadata>> entry : partitionToSegments.entrySet()) {
List<Integer> partition = entry.getKey();
List<SegmentZKMetadata> partitionedSegments = entry.getValue();
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(partitionedSegments, tableConfig, maxNumRecordsPerTask, mergeLevel,
partition, mergeConfigs, taskConfigs));
}
if (!outlierSegments.isEmpty()) {
pinotTaskConfigsForTable.addAll(
createPinotTaskConfigs(outlierSegments, tableConfig, maxNumRecordsPerTask, mergeLevel,
null, mergeConfigs, taskConfigs));
}
}
}
}
// Write updated watermark map to zookeeper
if (!processAll) {
try {
_clusterInfoAccessor
.setMinionTaskMetadata(mergeRollupTaskMetadata, MinionConstants.MergeRollupTask.TASK_TYPE,
expectedVersion);
} catch (ZkException e) {
LOGGER.error(
"Version changed while updating merge/rollup task metadata for table: {}, skip scheduling. There are "
+ "multiple task schedulers for the same table, need to investigate!", tableNameWithType);
continue;
}
}
pinotTaskConfigs.addAll(pinotTaskConfigsForTable);
LOGGER.info("Finished generating task configs for table: {} for task: {}, numTasks: {}", tableNameWithType,
taskType, pinotTaskConfigsForTable.size());
}
// Clean up metrics
cleanUpDelayMetrics(tableConfigs);
return pinotTaskConfigs;
}
|
@Test
public void testSegmentSelectionMultiLevels() {
Map<String, Map<String, String>> taskConfigsMap = new HashMap<>();
Map<String, String> tableTaskConfigs = new HashMap<>();
tableTaskConfigs.put("daily.mergeType", "concat");
tableTaskConfigs.put("daily.bufferTimePeriod", "2d");
tableTaskConfigs.put("daily.bucketTimePeriod", "1d");
tableTaskConfigs.put("daily.maxNumRecordsPerSegment", "1000000");
tableTaskConfigs.put("daily.maxNumRecordsPerTask", "5000000");
tableTaskConfigs.put("monthly.mergeType", "rollup");
tableTaskConfigs.put("monthly.bufferTimePeriod", "30d");
tableTaskConfigs.put("monthly.bucketTimePeriod", "30d");
tableTaskConfigs.put("monthly.roundBucketTimePeriod", "30d");
tableTaskConfigs.put("monthly.maxNumRecordsPerSegment", "2000000");
tableTaskConfigs.put("monthly.maxNumRecordsPerTask", "5000000");
taskConfigsMap.put(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs);
TableConfig offlineTableConfig = getTableConfig(TableType.OFFLINE, taskConfigsMap);
String segmentName1 = "testTable__1";
String segmentName2 = "testTable__2";
String segmentName3 = "testTable__3";
String segmentName4 = "testTable__4";
String segmentName5 = "testTable__5";
SegmentZKMetadata metadata1 = getSegmentZKMetadata(segmentName1, 86_400_000L, 90_000_000L, TimeUnit.MILLISECONDS,
null); // starts 1 day since epoch
SegmentZKMetadata metadata2 = getSegmentZKMetadata(segmentName2, 86_400_000L, 100_000_000L, TimeUnit.MILLISECONDS,
null); // starts 1 day since epoch
SegmentZKMetadata metadata3 = getSegmentZKMetadata(segmentName3, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS,
null); // starts 1 day since epoch
SegmentZKMetadata metadata4 =
getSegmentZKMetadata(segmentName4, 2_505_600_000L, 2_592_010_000L, TimeUnit.MILLISECONDS,
null); // starts 29 days since epoch
SegmentZKMetadata metadata5 =
getSegmentZKMetadata(segmentName5, 2_592_000_000L, 2_592_020_000L, TimeUnit.MILLISECONDS,
null); // starts 30 days since epoch
ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class);
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(getIdealState(OFFLINE_TABLE_NAME,
Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4, segmentName5)));
mockMergeRollupTaskMetadataGetterAndSetter(mockClusterInfoProvide);
// Cold start only schedule daily merge tasks
MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator();
generator.init(mockClusterInfoProvide);
List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(MergeRollupTaskMetadata.fromZNRecord(
mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE,
OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY).longValue(), 86_400_000L);
assertEquals(pinotTaskConfigs.size(), 1);
Map<String, String> taskConfigsDaily1 = pinotTaskConfigs.get(0).getConfigs();
checkPinotTaskConfig(taskConfigsDaily1, segmentName1 + "," + segmentName2 + "," + segmentName3, DAILY, "concat",
"1d", null, "1000000");
// Monthly task is not scheduled until there are 30 days daily merged segments available (monthly merge window
// endTimeMs > daily watermark)
String segmentNameMergedDaily1 = "merged_testTable__1__2__3";
SegmentZKMetadata metadataMergedDaily1 =
getSegmentZKMetadata(segmentNameMergedDaily1, 86_400_000L, 110_000_000L, TimeUnit.MILLISECONDS, null);
metadataMergedDaily1.setCustomMap(
ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadataMergedDaily1));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(getIdealState(OFFLINE_TABLE_NAME,
Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4, segmentName5,
segmentNameMergedDaily1)));
SegmentLineage segmentLineage = new SegmentLineage(OFFLINE_TABLE_NAME);
segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
new LineageEntry(Arrays.asList(segmentName1, segmentName2, segmentName3),
Collections.singletonList(segmentNameMergedDaily1), LineageEntryState.COMPLETED, 11111L));
when(mockClusterInfoProvide.getSegmentLineage(OFFLINE_TABLE_NAME)).thenReturn(segmentLineage);
Map<String, TaskState> taskStatesMap = new HashMap<>();
String taskName1 = "Task_MergeRollupTask_1";
taskStatesMap.put(taskName1, TaskState.COMPLETED);
when(mockClusterInfoProvide.getTaskStates(MinionConstants.MergeRollupTask.TASK_TYPE)).thenReturn(taskStatesMap);
when(mockClusterInfoProvide.getTaskConfigs(taskName1)).thenReturn(
Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily1)));
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(MergeRollupTaskMetadata.fromZNRecord(
mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE,
OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY).longValue(), 2_505_600_000L);
assertEquals(pinotTaskConfigs.size(), 1);
Map<String, String> taskConfigsDaily2 = pinotTaskConfigs.get(0).getConfigs();
checkPinotTaskConfig(taskConfigsDaily2, segmentName4, DAILY, "concat", "1d", null, "1000000");
// Schedule multiple tasks for both merge levels simultaneously
String segmentNameMergedDaily2 = "merged_testTable__4_1";
SegmentZKMetadata metadataMergedDaily2 =
getSegmentZKMetadata(segmentNameMergedDaily2, 2_505_600_000L, 2_591_999_999L, TimeUnit.MILLISECONDS, null);
metadataMergedDaily2.setCustomMap(
ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
String segmentNameMergedDaily3 = "merged_testTable__4_2";
SegmentZKMetadata metadataMergedDaily3 =
getSegmentZKMetadata(segmentNameMergedDaily3, 2_592_000_000L, 2_592_010_000L, TimeUnit.MILLISECONDS, null);
metadataMergedDaily3.setCustomMap(
ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadataMergedDaily1,
metadataMergedDaily2, metadataMergedDaily3));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(getIdealState(OFFLINE_TABLE_NAME,
Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4, segmentName5,
segmentNameMergedDaily1, segmentNameMergedDaily2, segmentNameMergedDaily3)));
segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
new LineageEntry(Collections.singletonList(segmentName4),
Arrays.asList(segmentNameMergedDaily1, segmentNameMergedDaily2), LineageEntryState.COMPLETED, 11111L));
String taskName2 = "Task_MergeRollupTask_2";
taskStatesMap.put(taskName2, TaskState.COMPLETED);
when(mockClusterInfoProvide.getTaskConfigs(taskName2)).thenReturn(
Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily2)));
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(MergeRollupTaskMetadata.fromZNRecord(
mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE,
OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY).longValue(), 2_592_000_000L);
assertEquals(MergeRollupTaskMetadata.fromZNRecord(
mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE,
OFFLINE_TABLE_NAME)).getWatermarkMap().get(MONTHLY).longValue(), 0L);
assertEquals(pinotTaskConfigs.size(), 2);
Map<String, String> taskConfigsDaily3 = pinotTaskConfigs.get(0).getConfigs();
Map<String, String> taskConfigsMonthly1 = pinotTaskConfigs.get(1).getConfigs();
checkPinotTaskConfig(taskConfigsDaily3, segmentNameMergedDaily3 + "," + segmentName5, DAILY, "concat", "1d", null,
"1000000");
checkPinotTaskConfig(taskConfigsMonthly1, segmentNameMergedDaily1 + "," + segmentNameMergedDaily2, MONTHLY,
"rollup", "30d", "30d", "2000000");
// Not scheduling for daily tasks if there are no unmerged segments
// Not scheduling for monthly tasks if there are no 30 days merged daily segments
String segmentNameMergedDaily4 = "merged_testTable__4_2__5";
SegmentZKMetadata metadataMergedDaily4 =
getSegmentZKMetadata(segmentNameMergedDaily4, 2_592_000_000L, 2_592_020_000L, TimeUnit.MILLISECONDS, null);
metadataMergedDaily4.setCustomMap(
ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, DAILY));
String segmentNameMergedMonthly1 = "merged_testTable__1__2__3__4_1";
SegmentZKMetadata metadataMergedMonthly1 =
getSegmentZKMetadata(segmentNameMergedMonthly1, 86_400_000L, 2_591_999_999L, TimeUnit.MILLISECONDS, null);
metadataMergedMonthly1.setCustomMap(
ImmutableMap.of(MinionConstants.MergeRollupTask.SEGMENT_ZK_METADATA_MERGE_LEVEL_KEY, MONTHLY));
when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn(
Lists.newArrayList(metadata1, metadata2, metadata3, metadata4, metadata5, metadataMergedDaily1,
metadataMergedDaily2, metadataMergedDaily3, metadataMergedDaily4, metadataMergedMonthly1));
when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(getIdealState(OFFLINE_TABLE_NAME,
Lists.newArrayList(segmentName1, segmentName2, segmentName3, segmentName4, segmentName5,
segmentNameMergedDaily1, segmentNameMergedDaily2, segmentNameMergedDaily3, segmentNameMergedDaily4,
segmentNameMergedMonthly1)));
segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
new LineageEntry(Arrays.asList(segmentNameMergedDaily3, segmentName5),
Collections.singletonList(segmentNameMergedDaily4), LineageEntryState.COMPLETED, 11111L));
segmentLineage.addLineageEntry(SegmentLineageUtils.generateLineageEntryId(),
new LineageEntry(Arrays.asList(segmentNameMergedDaily1, segmentNameMergedDaily2),
Collections.singletonList(segmentNameMergedMonthly1), LineageEntryState.COMPLETED, 11111L));
String taskName3 = "Task_MergeRollupTask_3";
taskStatesMap.put(taskName3, TaskState.COMPLETED);
when(mockClusterInfoProvide.getTaskConfigs(taskName3)).thenReturn(
Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsDaily3)));
String taskName4 = "Task_MergeRollupTask_4";
taskStatesMap.put(taskName4, TaskState.COMPLETED);
when(mockClusterInfoProvide.getTaskConfigs(taskName4)).thenReturn(
Lists.newArrayList(new PinotTaskConfig(MinionConstants.MergeRollupTask.TASK_TYPE, taskConfigsMonthly1)));
pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig));
assertEquals(MergeRollupTaskMetadata.fromZNRecord(
mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE,
OFFLINE_TABLE_NAME)).getWatermarkMap().get(DAILY).longValue(), 2_592_000_000L); // 30 days since epoch
assertEquals(MergeRollupTaskMetadata.fromZNRecord(
mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE,
OFFLINE_TABLE_NAME)).getWatermarkMap().get(MONTHLY).longValue(), 0L);
assertEquals(pinotTaskConfigs.size(), 0);
}
|
@Override
public PageResult<LoginLogDO> getLoginLogPage(LoginLogPageReqVO pageReqVO) {
return loginLogMapper.selectPage(pageReqVO);
}
|
@Test
public void testGetLoginLogPage() {
// mock 数据
LoginLogDO loginLogDO = randomPojo(LoginLogDO.class, o -> {
o.setUserIp("192.168.199.16");
o.setUsername("wang");
o.setResult(SUCCESS.getResult());
o.setCreateTime(buildTime(2021, 3, 6));
});
loginLogMapper.insert(loginLogDO);
// 测试 status 不匹配
loginLogMapper.insert(cloneIgnoreId(loginLogDO, o -> o.setResult(CAPTCHA_CODE_ERROR.getResult())));
// 测试 ip 不匹配
loginLogMapper.insert(cloneIgnoreId(loginLogDO, o -> o.setUserIp("192.168.128.18")));
// 测试 username 不匹配
loginLogMapper.insert(cloneIgnoreId(loginLogDO, o -> o.setUsername("yunai")));
// 测试 createTime 不匹配
loginLogMapper.insert(cloneIgnoreId(loginLogDO, o -> o.setCreateTime(buildTime(2021, 2, 6))));
// 构造调用参数
LoginLogPageReqVO reqVO = new LoginLogPageReqVO();
reqVO.setUsername("wang");
reqVO.setUserIp("192.168.199");
reqVO.setStatus(true);
reqVO.setCreateTime(buildBetweenTime(2021, 3, 5, 2021, 3, 7));
// 调用
PageResult<LoginLogDO> pageResult = loginLogService.getLoginLogPage(reqVO);
// 断言,只查到了一条符合条件的
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(loginLogDO, pageResult.getList().get(0));
}
|
private void setMode(Request req) {
dispatchRpcRequest(req, () -> {
String suppliedMode = req.parameters().get(0).asString();
String[] s = new String[2];
try {
proxyServer.setMode(suppliedMode);
s[0] = "0";
s[1] = "success";
} catch (Exception e) {
s[0] = "1";
s[1] = e.getMessage();
}
req.returnValues().add(new StringArray(s));
req.returnRequest();
});
}
|
@Test
void testRpcMethodUpdateSources() throws ListenFailedException {
reset();
Request req = new Request("updateSources");
String spec1 = "tcp/a:19070";
String spec2 = "tcp/b:19070";
req.parameters().add(new StringValue(spec1 + "," + spec2));
client.invoke(req);
assertFalse(req.isError(), req.errorMessage());
assertEquals(1, req.returnValues().size());
assertEquals("Updated config sources to: " + spec1 + "," + spec2, req.returnValues().get(0).asString());
server.proxyServer().setMode(Mode.ModeName.MEMORYCACHE.name());
req = new Request("updateSources");
req.parameters().add(new StringValue(spec1 + "," + spec2));
client.invoke(req);
assertFalse(req.isError(), req.errorMessage());
assertEquals(1, req.returnValues().size());
assertEquals("Cannot update sources when in '" + Mode.ModeName.MEMORYCACHE.name().toLowerCase() + "' mode", req.returnValues().get(0).asString());
// TODO source connections needs to have deterministic order to work
/*req = new Request("listSourceConnections");
rpcServer.listSourceConnections(req);
assertFalse(req.errorMessage(), req.isError());
final String[] ret = req.returnValues().get(0).asStringArray();
assertEquals(ret.length, is(2));
assertEquals(ret[0], is("Current source: " + spec1));
assertEquals(ret[1], is("All sources:\n" + spec2 + "\n" + spec1 + "\n"));
*/
}
|
@Override
public String toString() {
return url.toString();
}
|
@Test
void testToString() {
Statistics statistics = new Statistics(new ServiceConfigURL("dubbo", "10.20.153.10", 0));
statistics.setApplication("demo");
statistics.setMethod("findPerson");
statistics.setServer("10.20.153.10");
statistics.setGroup("unit-test");
statistics.setService("MemberService");
assertThat(statistics.toString(), is("dubbo://10.20.153.10"));
Statistics statisticsWithDetailInfo = new Statistics(new URLBuilder(DUBBO_PROTOCOL, "10.20.153.10", 0)
.addParameter(APPLICATION_KEY, "morgan")
.addParameter(INTERFACE_KEY, "MemberService")
.addParameter(METHOD_KEY, "findPerson")
.addParameter(CONSUMER, "10.20.153.11")
.addParameter(GROUP_KEY, "unit-test")
.addParameter(SUCCESS_KEY, 1)
.addParameter(FAILURE_KEY, 0)
.addParameter(ELAPSED_KEY, 3)
.addParameter(MAX_ELAPSED_KEY, 3)
.addParameter(CONCURRENT_KEY, 1)
.addParameter(MAX_CONCURRENT_KEY, 1)
.build());
MatcherAssert.assertThat(statisticsWithDetailInfo.getServer(), equalTo(statistics.getServer()));
MatcherAssert.assertThat(statisticsWithDetailInfo.getService(), equalTo(statistics.getService()));
MatcherAssert.assertThat(statisticsWithDetailInfo.getMethod(), equalTo(statistics.getMethod()));
MatcherAssert.assertThat(statisticsWithDetailInfo.getGroup(), equalTo(statistics.getGroup()));
MatcherAssert.assertThat(statisticsWithDetailInfo, not(equalTo(statistics)));
}
|
@Override
public PullResult pull(MessageQueue mq, String subExpression, long offset, int maxNums)
throws MQClientException, RemotingException, MQBrokerException, InterruptedException {
return this.defaultMQPullConsumerImpl.pull(queueWithNamespace(mq), subExpression, offset, maxNums);
}
|
@Test
public void testPullMessage_Success() throws Exception {
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock mock) throws Throwable {
PullMessageRequestHeader requestHeader = mock.getArgument(1);
return createPullResult(requestHeader, PullStatus.FOUND, Collections.singletonList(new MessageExt()));
}
}).when(mQClientAPIImpl).pullMessage(anyString(), any(PullMessageRequestHeader.class), anyLong(), any(CommunicationMode.class), nullable(PullCallback.class));
MessageQueue messageQueue = new MessageQueue(topic, brokerName, 0);
PullResult pullResult = pullConsumer.pull(messageQueue, "*", 1024, 3);
assertThat(pullResult).isNotNull();
assertThat(pullResult.getPullStatus()).isEqualTo(PullStatus.FOUND);
assertThat(pullResult.getNextBeginOffset()).isEqualTo(1024 + 1);
assertThat(pullResult.getMinOffset()).isEqualTo(123);
assertThat(pullResult.getMaxOffset()).isEqualTo(2048);
assertThat(pullResult.getMsgFoundList()).isEqualTo(new ArrayList<>());
}
|
@Override
public void close() throws IOException {
if (closed) {
return;
}
super.close();
this.closed = true;
if (stream != null) {
stream.close();
}
}
|
@Test
public void testMultipleClose() throws IOException {
DataLakeFileClient fileClient = AZURITE_CONTAINER.fileClient(randomPath());
ADLSOutputStream stream =
new ADLSOutputStream(fileClient, azureProperties, MetricsContext.nullMetrics());
stream.close();
stream.close();
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public void updateJob(JobSaveReqVO updateReqVO) throws SchedulerException {
validateCronExpression(updateReqVO.getCronExpression());
// 1.1 校验存在
JobDO job = validateJobExists(updateReqVO.getId());
// 1.2 只有开启状态,才可以修改.原因是,如果出暂停状态,修改 Quartz Job 时,会导致任务又开始执行
if (!job.getStatus().equals(JobStatusEnum.NORMAL.getStatus())) {
throw exception(JOB_UPDATE_ONLY_NORMAL_STATUS);
}
// 1.3 校验 JobHandler 是否存在
validateJobHandlerExists(updateReqVO.getHandlerName());
// 2. 更新 JobDO
JobDO updateObj = BeanUtils.toBean(updateReqVO, JobDO.class);
fillJobMonitorTimeoutEmpty(updateObj);
jobMapper.updateById(updateObj);
// 3. 更新 Job 到 Quartz 中
schedulerManager.updateJob(job.getHandlerName(), updateReqVO.getHandlerParam(), updateReqVO.getCronExpression(),
updateReqVO.getRetryCount(), updateReqVO.getRetryInterval());
}
|
@Test
public void testUpdateJob_jobNotExists(){
// 准备参数
JobSaveReqVO reqVO = randomPojo(JobSaveReqVO.class, o -> o.setCronExpression("0 0/1 * * * ? *"));
// 调用,并断言异常
assertServiceException(() -> jobService.updateJob(reqVO), JOB_NOT_EXISTS);
}
|
@VisibleForTesting
void checkMaxEligible() {
// If we have too many eligible apps, remove the newest ones first
if (maxEligible > 0 && eligibleApplications.size()
> maxEligible) {
if (verbose) {
LOG.info("Too many applications (" + eligibleApplications
.size() +
" > " + maxEligible + ")");
}
List<AppInfo> sortedApplications =
new ArrayList<AppInfo>(eligibleApplications);
Collections.sort(sortedApplications, new Comparator<
AppInfo>() {
@Override
public int compare(AppInfo o1, AppInfo o2) {
int lCompare = Long.compare(o1.getFinishTime(), o2.getFinishTime());
if (lCompare == 0) {
return o1.getAppId().compareTo(o2.getAppId());
}
return lCompare;
}
});
for (int i = maxEligible; i < sortedApplications.size(); i++) {
if (verbose) {
LOG.info("Removing " + sortedApplications.get(i));
}
eligibleApplications.remove(sortedApplications.get(i));
}
}
}
|
@Test(timeout = 10000)
public void testCheckMaxEligible() throws Exception {
Configuration conf = new Configuration();
HadoopArchiveLogs.AppInfo app1 = new HadoopArchiveLogs.AppInfo(
ApplicationId.newInstance(CLUSTER_TIMESTAMP, 1).toString(), USER);
app1.setFinishTime(CLUSTER_TIMESTAMP - 5);
HadoopArchiveLogs.AppInfo app2 = new HadoopArchiveLogs.AppInfo(
ApplicationId.newInstance(CLUSTER_TIMESTAMP, 2).toString(), USER);
app2.setFinishTime(CLUSTER_TIMESTAMP - 10);
HadoopArchiveLogs.AppInfo app3 = new HadoopArchiveLogs.AppInfo(
ApplicationId.newInstance(CLUSTER_TIMESTAMP, 3).toString(), USER);
// app3 has no finish time set
HadoopArchiveLogs.AppInfo app4 = new HadoopArchiveLogs.AppInfo(
ApplicationId.newInstance(CLUSTER_TIMESTAMP, 4).toString(), USER);
app4.setFinishTime(CLUSTER_TIMESTAMP + 5);
HadoopArchiveLogs.AppInfo app5 = new HadoopArchiveLogs.AppInfo(
ApplicationId.newInstance(CLUSTER_TIMESTAMP, 5).toString(), USER);
app5.setFinishTime(CLUSTER_TIMESTAMP + 10);
HadoopArchiveLogs.AppInfo app6 = new HadoopArchiveLogs.AppInfo(
ApplicationId.newInstance(CLUSTER_TIMESTAMP, 6).toString(), USER);
// app6 has no finish time set
HadoopArchiveLogs.AppInfo app7 = new HadoopArchiveLogs.AppInfo(
ApplicationId.newInstance(CLUSTER_TIMESTAMP, 7).toString(), USER);
app7.setFinishTime(CLUSTER_TIMESTAMP);
HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
Assert.assertEquals(0, hal.eligibleApplications.size());
hal.eligibleApplications.add(app1);
hal.eligibleApplications.add(app2);
hal.eligibleApplications.add(app3);
hal.eligibleApplications.add(app4);
hal.eligibleApplications.add(app5);
hal.eligibleApplications.add(app6);
hal.eligibleApplications.add(app7);
Assert.assertEquals(7, hal.eligibleApplications.size());
hal.maxEligible = -1;
hal.checkMaxEligible();
Assert.assertEquals(7, hal.eligibleApplications.size());
hal.maxEligible = 6;
hal.checkMaxEligible();
Assert.assertEquals(6, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app5));
hal.maxEligible = 5;
hal.checkMaxEligible();
Assert.assertEquals(5, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app4));
hal.maxEligible = 4;
hal.checkMaxEligible();
Assert.assertEquals(4, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app7));
hal.maxEligible = 3;
hal.checkMaxEligible();
Assert.assertEquals(3, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app1));
hal.maxEligible = 2;
hal.checkMaxEligible();
Assert.assertEquals(2, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app2));
hal.maxEligible = 1;
hal.checkMaxEligible();
Assert.assertEquals(1, hal.eligibleApplications.size());
Assert.assertFalse(hal.eligibleApplications.contains(app6));
Assert.assertTrue(hal.eligibleApplications.contains(app3));
}
|
public static FunctionTypeInfo getFunctionTypeInfo(
final ExpressionTypeManager expressionTypeManager,
final FunctionCall functionCall,
final UdfFactory udfFactory,
final Map<String, SqlType> lambdaMapping
) {
// CHECKSTYLE_RULES.ON: CyclomaticComplexity
final List<Expression> arguments = functionCall.getArguments();
final List<SqlArgument> functionArgumentTypes = firstPassOverFunctionArguments(
arguments,
expressionTypeManager,
lambdaMapping
);
final KsqlScalarFunction function = udfFactory.getFunction(functionArgumentTypes);
final SqlType returnSchema;
final List<ArgumentInfo> argumentInfoForFunction = new ArrayList<>();
if (!functionCall.hasLambdaFunctionCallArguments()) {
returnSchema = function.getReturnType(functionArgumentTypes);
return FunctionTypeInfo.of(
functionArgumentTypes.stream()
.map(argument -> ArgumentInfo.of(argument, new HashMap<>(lambdaMapping)))
.collect(Collectors.toList()),
returnSchema,
function
);
} else {
final List<ParamType> paramTypes = function.parameters();
final Map<GenericType, SqlType> reservedGenerics = new HashMap<>();
final List<SqlArgument> functionArgumentTypesWithResolvedLambdaType = new ArrayList<>();
// second pass over the function arguments to properly do lambda type checking
for (int i = 0; i < arguments.size(); i++) {
final Expression expression = arguments.get(i);
final ParamType parameter = paramTypes.get(i);
if (expression instanceof LambdaFunctionCall) {
// the function returned from the UDF factory should have lambda
// at this index in the function arguments if there's a
// lambda node at this index in the function node argument list
if (!(parameter instanceof LambdaType)) {
throw new RuntimeException(String.format("Error while processing lambda function."
+ "Expected lambda parameter but was %s"
+ "This is most likely an internal error and a "
+ "Github issue should be filed for debugging. "
+ "Include the function name, the parameters passed in, the expected "
+ "signature, and any other relevant information.", parameter.toString()));
}
final ArrayList<SqlType> lambdaSqlTypes = new ArrayList<>();
final Map<String, SqlType> variableTypeMapping = mapLambdaParametersToTypes(
(LambdaFunctionCall) expression,
(LambdaType) parameter,
reservedGenerics,
lambdaSqlTypes
);
final Map<String,SqlType> updateLambdaMapping =
LambdaMappingUtil.resolveOldAndNewLambdaMapping(variableTypeMapping, lambdaMapping);
final SqlType resolvedLambdaReturnType =
expressionTypeManager.getExpressionSqlType(expression, updateLambdaMapping);
final SqlArgument lambdaArgument = SqlArgument.of(
SqlLambdaResolved.of(lambdaSqlTypes, resolvedLambdaReturnType));
functionArgumentTypesWithResolvedLambdaType.add(lambdaArgument);
argumentInfoForFunction.add(
ArgumentInfo.of(
lambdaArgument,
new HashMap<>(updateLambdaMapping)));
} else {
functionArgumentTypesWithResolvedLambdaType.add(functionArgumentTypes.get(i));
argumentInfoForFunction.add(
ArgumentInfo.of(
functionArgumentTypes.get(i),
new HashMap<>(lambdaMapping)));
}
if (GenericsUtil.hasGenerics(parameter)) {
final Pair<Boolean, Optional<KsqlException>> success = GenericsUtil.reserveGenerics(
parameter,
functionArgumentTypesWithResolvedLambdaType.get(i),
reservedGenerics
);
if (!success.getLeft() && success.getRight().isPresent()) {
throw success.getRight().get();
}
}
}
returnSchema = function.getReturnType(functionArgumentTypesWithResolvedLambdaType);
return new FunctionTypeInfo(
argumentInfoForFunction,
returnSchema,
function
);
}
}
|
@Test
public void shouldResolveFunctionWithoutLambdas() {
// Given:
givenUdfWithNameAndReturnType("NoLambdas", SqlTypes.STRING);
when(function.parameters()).thenReturn(
ImmutableList.of(ParamTypes.STRING));
final FunctionCall expression = new FunctionCall(FunctionName.of("NoLambdas"), ImmutableList.of(new StringLiteral("a")));
// When:
final FunctionTypeInfo argumentsAndContexts =
FunctionArgumentsUtil.getFunctionTypeInfo(expressionTypeManager, expression, udfFactory, Collections.emptyMap());
// Then:
assertThat(argumentsAndContexts.getReturnType(), is(SqlTypes.STRING));
assertThat(argumentsAndContexts.getArgumentInfos().size(), is(1));
verify(udfFactory).getFunction(
ImmutableList.of(
SqlArgument.of(SqlTypes.STRING)
)
);
verify(function).getReturnType(
ImmutableList.of(
SqlArgument.of(SqlTypes.STRING)
)
);
}
|
@Override
public Object evaluate(final ProcessingDTO processingDTO) {
return getFromPossibleSources(name, processingDTO)
.orElse(mapMissingTo);
}
|
@Test
void evaluateFromKiePMMLNameValues() {
final Object value = 234.45;
final List<KiePMMLNameValue> kiePMMLNameValues = Collections.singletonList(new KiePMMLNameValue(FIELD_NAME,
value));
final KiePMMLFieldRef kiePMMLFieldRef = new KiePMMLFieldRef(FIELD_NAME, Collections.emptyList(), null);
ProcessingDTO processingDTO = getProcessingDTO(Collections.emptyList(), kiePMMLNameValues);
final Object retrieved = kiePMMLFieldRef.evaluate(processingDTO);
assertThat(retrieved).isEqualTo(value);
}
|
@Override
public Integer call() throws Exception {
super.call();
if (this.pluginsPath == null) {
throw new CommandLine.ParameterException(this.spec.commandLine(), "Missing required options '--plugins' " +
"or environment variable 'KESTRA_PLUGINS_PATH"
);
}
if (!pluginsPath.toFile().exists()) {
if (!pluginsPath.toFile().mkdir()) {
throw new RuntimeException("Cannot create directory: " + pluginsPath.toFile().getAbsolutePath());
}
}
if (repositories != null) {
Arrays.stream(repositories)
.forEach(throwConsumer(s -> {
URIBuilder uriBuilder = new URIBuilder(s);
RepositoryConfig.RepositoryConfigBuilder builder = RepositoryConfig.builder()
.id(IdUtils.create());
if (uriBuilder.getUserInfo() != null) {
int index = uriBuilder.getUserInfo().indexOf(":");
builder.basicAuth(new RepositoryConfig.BasicAuth(
uriBuilder.getUserInfo().substring(0, index),
uriBuilder.getUserInfo().substring(index + 1)
));
uriBuilder.setUserInfo(null);
}
builder.url(uriBuilder.build().toString());
pluginDownloader.addRepository(builder.build());
}));
}
List<URL> resolveUrl = pluginDownloader.resolve(dependencies);
stdOut("Resolved Plugin(s) with {0}", resolveUrl);
for (URL url: resolveUrl) {
Files.copy(
Paths.get(url.toURI()),
Paths.get(pluginsPath.toString(), FilenameUtils.getName(url.toString())),
StandardCopyOption.REPLACE_EXISTING
);
}
stdOut("Successfully installed plugins {0} into {1}", dependencies, pluginsPath);
return 0;
}
|
@Test
void rangeVersion() throws IOException {
Path pluginsPath = Files.createTempDirectory(PluginInstallCommandTest.class.getSimpleName());
pluginsPath.toFile().deleteOnExit();
try (ApplicationContext ctx = ApplicationContext.run(Environment.CLI, Environment.TEST)) {
// SNAPSHOT are included in the 0.12 range not the 0.13, so to avoid resolving it, we must declare it in the upper excluded bound.
String[] args = {"--plugins", pluginsPath.toAbsolutePath().toString(), "io.kestra.storage:storage-s3:[0.12,0.13.0-SNAPSHOT)"};
PicocliRunner.call(PluginInstallCommand.class, ctx, args);
List<Path> files = Files.list(pluginsPath).toList();
assertThat(files.size(), is(1));
assertThat(files.getFirst().getFileName().toString(), is("storage-s3-0.12.1.jar"));
}
}
|
@Override
public void batchRegisterInstance(Service service, List<Instance> instances, String clientId) {
Service singleton = ServiceManager.getInstance().getSingleton(service);
if (!singleton.isEphemeral()) {
throw new NacosRuntimeException(NacosException.INVALID_PARAM,
String.format("Current service %s is persistent service, can't batch register ephemeral instance.",
singleton.getGroupedServiceName()));
}
Client client = clientManager.getClient(clientId);
checkClientIsLegal(client, clientId);
BatchInstancePublishInfo batchInstancePublishInfo = new BatchInstancePublishInfo();
List<InstancePublishInfo> resultList = new ArrayList<>();
for (Instance instance : instances) {
InstancePublishInfo instanceInfo = getPublishInfo(instance);
resultList.add(instanceInfo);
}
batchInstancePublishInfo.setInstancePublishInfos(resultList);
client.addServiceInstance(singleton, batchInstancePublishInfo);
client.setLastUpdatedTime();
client.recalculateRevision();
NotifyCenter.publishEvent(new ClientOperationEvent.ClientRegisterServiceEvent(singleton, clientId));
NotifyCenter.publishEvent(
new MetadataEvent.InstanceMetadataEvent(singleton, batchInstancePublishInfo.getMetadataId(), false));
}
|
@Test
void testBatchRegisterAndDeregisterInstance() throws Exception {
// test Batch register instance
Instance instance1 = new Instance();
instance1.setEphemeral(true);
instance1.setIp("127.0.0.1");
instance1.setPort(9087);
instance1.setHealthy(true);
Instance instance2 = new Instance();
instance2.setEphemeral(true);
instance2.setIp("127.0.0.2");
instance2.setPort(9045);
instance2.setHealthy(true);
List<Instance> instances = new ArrayList<>();
instances.add(instance1);
instances.add(instance2);
ephemeralClientOperationServiceImpl.batchRegisterInstance(service, instances, connectionBasedClientId);
assertTrue(connectionBasedClient.getAllPublishedService().contains(service));
}
|
public Optional<PushEventDto> raiseEventOnIssue(String projectUuid, DefaultIssue currentIssue) {
var currentIssueComponentUuid = currentIssue.componentUuid();
if (currentIssueComponentUuid == null) {
return Optional.empty();
}
var component = treeRootHolder.getComponentByUuid(currentIssueComponentUuid);
if (isTaintVulnerability(currentIssue)) {
return raiseTaintVulnerabilityEvent(projectUuid, component, currentIssue);
}
if (isSecurityHotspot(currentIssue)) {
return raiseSecurityHotspotEvent(projectUuid, component, currentIssue);
}
return Optional.empty();
}
|
@Test
public void raiseEventOnIssue_whenComponentUuidNull_shouldSkipEvent() {
DefaultIssue defaultIssue = createDefaultIssue()
.setComponentUuid(null);
assertThat(underTest.raiseEventOnIssue("some-project-uuid", defaultIssue)).isEmpty();
}
|
@Override
public void run() {
if (processor != null) {
processor.execute();
} else {
if (!beforeHook()) {
logger.info("before-feature hook returned [false], aborting: {}", this);
} else {
scenarios.forEachRemaining(this::processScenario);
}
afterFeature();
}
}
|
@Test
void testOutlineCsv() {
run("outline-csv.feature");
}
|
static RuntimeException handleException(Throwable e) {
if (e instanceof OutOfMemoryError error) {
OutOfMemoryErrorDispatcher.onOutOfMemory(error);
throw error;
}
if (e instanceof Error error) {
throw error;
}
if (e instanceof HazelcastSerializationException exception) {
return exception;
}
if (e instanceof HazelcastInstanceNotActiveException exception) {
return exception;
}
if (e instanceof HazelcastClientNotActiveException exception) {
return exception;
}
return new HazelcastSerializationException(e);
}
|
@Test(expected = Error.class)
public void testHandleException_otherError() {
SerializationUtil.handleException(new UnknownError());
}
|
List<Token> tokenize() throws ScanException {
List<Token> tokenList = new ArrayList<Token>();
StringBuffer buf = new StringBuffer();
while (pointer < patternLength) {
char c = pattern.charAt(pointer);
pointer++;
switch (state) {
case LITERAL_STATE:
handleLiteralState(c, tokenList, buf);
break;
case FORMAT_MODIFIER_STATE:
handleFormatModifierState(c, tokenList, buf);
break;
case OPTION_STATE:
processOption(c, tokenList, buf);
break;
case KEYWORD_STATE:
handleKeywordState(c, tokenList, buf);
break;
case RIGHT_PARENTHESIS_STATE:
handleRightParenthesisState(c, tokenList, buf);
break;
default:
}
}
// EOS
switch (state) {
case LITERAL_STATE:
addValuedToken(Token.LITERAL, buf, tokenList);
break;
case KEYWORD_STATE:
tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString()));
break;
case RIGHT_PARENTHESIS_STATE:
tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN);
break;
case FORMAT_MODIFIER_STATE:
case OPTION_STATE:
throw new ScanException("Unexpected end of pattern string");
}
return tokenList;
}
|
@Test
public void testEscapedParanteheses() throws ScanException {
{
List<Token> tl = new TokenStream("\\(%h\\)").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(new Token(Token.LITERAL, "("));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "h"));
witness.add(new Token(Token.LITERAL, ")"));
assertEquals(witness, tl);
}
{
List<Token> tl = new TokenStream("(%h\\)").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(new Token(Token.LITERAL, "("));
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "h"));
witness.add(new Token(Token.LITERAL, ")"));
assertEquals(witness, tl);
}
{
List<Token> tl = new TokenStream("%a(x\\)").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.COMPOSITE_KEYWORD, "a"));
witness.add(new Token(Token.LITERAL, "x)"));
assertEquals(witness, tl);
}
{
List<Token> tl = new TokenStream("%a\\(x)").tokenize();
List<Token> witness = new ArrayList<Token>();
witness.add(Token.PERCENT_TOKEN);
witness.add(new Token(Token.SIMPLE_KEYWORD, "a"));
witness.add(new Token(Token.LITERAL, "(x"));
witness.add(new Token(Token.RIGHT_PARENTHESIS));
assertEquals(witness, tl);
}
}
|
@Override
public E putIfAbsent(String key, E value) { return entryMap.putIfAbsent(key, value); }
|
@Test(expected = NullPointerException.class)
public void shouldThrowNPEWhenKeyIsNull() {
inMemoryRegistryStore.putIfAbsent(null, DEFAULT_CONFIG_VALUE);
}
|
public PaginationContext createPaginationContext(final Collection<ExpressionSegment> expressions, final ProjectionsContext projectionsContext, final List<Object> params) {
Optional<String> rowNumberAlias = findRowNumberAlias(projectionsContext);
if (!rowNumberAlias.isPresent()) {
return new PaginationContext(null, null, params);
}
Collection<AndPredicate> andPredicates = expressions.stream().flatMap(each -> ExpressionExtractUtils.getAndPredicates(each).stream()).collect(Collectors.toList());
Collection<BinaryOperationExpression> rowNumberPredicates = getRowNumberPredicates(andPredicates, rowNumberAlias.get());
return rowNumberPredicates.isEmpty() ? new PaginationContext(null, null, params) : createPaginationWithRowNumber(rowNumberPredicates, params);
}
|
@Test
void assertCreatePaginationContextWhenRowNumberAliasIsPresentAndRowNumberPredicatesIsEmpty() {
Projection projectionWithRowNumberAlias = new ColumnProjection(null, ROW_NUMBER_COLUMN_NAME, ROW_NUMBER_COLUMN_ALIAS, mock(DatabaseType.class));
ProjectionsContext projectionsContext = new ProjectionsContext(0, 0, false, Collections.singleton(projectionWithRowNumberAlias));
PaginationContext paginationContext =
new RowNumberPaginationContextEngine(new OracleDatabaseType()).createPaginationContext(Collections.emptyList(), projectionsContext, Collections.emptyList());
assertFalse(paginationContext.getOffsetSegment().isPresent());
assertFalse(paginationContext.getRowCountSegment().isPresent());
}
|
public static <T> boolean isNotEmpty(T[] array) {
return null != array && array.length > 0;
}
|
@Test
public void isNotEmpty() {
final Object[] array = {};
Assert.assertFalse(CollectionKit.isNotEmpty(array));
final Object[] array2 = {null};
Assert.assertTrue(CollectionKit.isNotEmpty(array2));
final Object[] array3 = null;
Assert.assertFalse(CollectionKit.isNotEmpty(array3));
final ArrayList collection = new ArrayList();
Assert.assertFalse(CollectionKit.isNotEmpty(collection));
final ArrayList collection2 = new ArrayList();
collection2.add(null);
Assert.assertTrue(CollectionKit.isNotEmpty(collection2));
final ArrayList collection3 = null;
Assert.assertFalse(CollectionKit.isNotEmpty(collection3));
}
|
@Override
public String execute(CommandContext commandContext, String[] args) {
StringBuilder result = new StringBuilder();
result.append(listProvider());
result.append(listConsumer());
return result.toString();
}
|
@Test
void testExecute() {
Ls ls = new Ls(frameworkModel);
String result = ls.execute(Mockito.mock(CommandContext.class), new String[0]);
System.out.println(result);
/**
* As Provider side:
* +--------------------------------+---+
* | Provider Service Name |PUB|
* +--------------------------------+---+
* |org.apache.dubbo.qos.DemoService| N |
* +--------------------------------+---+
* As Consumer side:
* +--------------------------------+---+
* | Consumer Service Name |NUM|
* +--------------------------------+---+
* |org.apache.dubbo.qos.DemoService| 0 |
* +--------------------------------+---+
*/
}
|
@Override
public boolean supportsColumnAliasing() {
return false;
}
|
@Test
void assertSupportsColumnAliasing() {
assertFalse(metaData.supportsColumnAliasing());
}
|
@Override
public void checkBeforeUpdate(final AlterReadwriteSplittingRuleStatement sqlStatement) {
ReadwriteSplittingRuleStatementChecker.checkAlteration(database, sqlStatement.getRules(), rule.getConfiguration());
}
|
@Test
void assertCheckSQLStatementWithoutExistedResources() {
when(resourceMetaData.getNotExistedDataSources(any())).thenReturn(Collections.singleton("read_ds_0"));
ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class);
when(rule.getConfiguration()).thenReturn(createCurrentRuleConfiguration());
executor.setRule(rule);
assertThrows(MissingRequiredStorageUnitsException.class, () -> executor.checkBeforeUpdate(createSQLStatement("TEST")));
}
|
@PublicAPI(usage = ACCESS)
public JavaClasses importUrl(URL url) {
return importUrls(singletonList(url));
}
|
@Test
public void imports_simple_class_details() {
JavaClasses classes = new ClassFileImporter().importUrl(getClass().getResource("testexamples/simpleimport"));
assertThat(classes.get(ClassToImportOne.class))
.isFullyImported(true)
.matches(ClassToImportOne.class)
.hasRawSuperclassMatching(Object.class)
.hasNoInterfaces()
.isInterface(false)
.isEnum(false)
.isAnnotation(false)
.isRecord(false)
.hasNoEnclosingClass()
.isTopLevelClass(true)
.isNestedClass(false)
.isMemberClass(false)
.isInnerClass(false)
.isLocalClass(false)
.isAnonymousClass(false);
assertThat(classes.get(ClassToImportTwo.class))
.hasOnlyModifiers(JavaModifier.PUBLIC, JavaModifier.FINAL);
}
|
@Override
public Address translate(Address address) throws Exception {
if (address == null) {
return null;
}
// if it is inside cloud, return private address otherwise we need to translate it.
if (!usePublic) {
return address;
}
Address publicAddress = privateToPublic.get(address);
if (publicAddress != null) {
return publicAddress;
}
privateToPublic = getAddresses.call();
return privateToPublic.get(address);
}
|
@Test
public void testTranslate_dontUsePublic() throws Exception {
Address privateAddress = new Address("10.0.0.1", 5701);
Address publicAddress = new Address("198.51.100.1", 5701);
RemoteAddressProvider provider = new RemoteAddressProvider(() -> Collections.singletonMap(privateAddress, publicAddress)
, false);
Address actual = provider.translate(privateAddress);
assertEquals(privateAddress.getHost(), actual.getHost());
assertEquals(privateAddress.getPort(), actual.getPort());
}
|
public EventWithContext addEventContext(Event event) {
return toBuilder().eventContext(event).build();
}
|
@Test
public void addEventContext() {
final Event event = new TestEvent();
final Event eventContext = new TestEvent();
final EventWithContext withContext = EventWithContext.builder()
.event(event)
.build();
final EventWithContext withContext1 = withContext.addEventContext(eventContext);
assertThat(withContext.eventContext()).isNotPresent();
assertThat(withContext1.eventContext()).get().isEqualTo(eventContext);
assertThat(withContext1.event()).isNotEqualTo(withContext1.eventContext());
}
|
public static boolean isFileNameValid(String fileName) {
// Trim the trailing slash if there is one.
if (fileName.endsWith("/")) fileName = fileName.substring(0, fileName.lastIndexOf('/') - 1);
// Trim the leading slashes if there is any.
if (fileName.contains("/")) fileName = fileName.substring(fileName.lastIndexOf('/') + 1);
return !TextUtils.isEmpty(fileName)
&& !(fileName.contains(ASTERISK)
|| fileName.contains(BACKWARD_SLASH)
|| fileName.contains(COLON)
|| fileName.contains(FOREWARD_SLASH)
|| fileName.contains(GREATER_THAN)
|| fileName.contains(LESS_THAN)
|| fileName.contains(QUESTION_MARK)
|| fileName.contains(QUOTE));
}
|
@Test
public void testIsFileNameValid() {
assertTrue(Operations.isFileNameValid("file.txt"));
assertTrue(Operations.isFileNameValid("/storage/emulated/0/Documents/file.txt"));
assertTrue(Operations.isFileNameValid("/system/etc/file.txt"));
assertTrue(Operations.isFileNameValid("smb://127.0.0.1/trancelove/file.txt"));
assertTrue(Operations.isFileNameValid("ssh://127.0.0.1:54225/home/trancelove/file.txt"));
assertTrue(Operations.isFileNameValid("ftp://127.0.0.1:3721/pub/Incoming/file.txt"));
assertTrue(
Operations.isFileNameValid(
"content://com.amaze.filemanager/storage_root/storage/emulated/0/Documents/file.txt"));
}
|
@Override
public void apply(Project project)
{
checkGradleVersion(project);
project.getPlugins().apply(JavaPlugin.class);
// this HashMap will have a PegasusOptions per sourceSet
project.getExtensions().getExtraProperties().set("pegasus", new HashMap<>());
// this map will extract PegasusOptions.GenerationMode to project property
project.getExtensions().getExtraProperties().set("PegasusGenerationMode",
Arrays.stream(PegasusOptions.GenerationMode.values())
.collect(Collectors.toMap(PegasusOptions.GenerationMode::name, Function.identity())));
synchronized (STATIC_PROJECT_EVALUATED_LOCK)
{
// Check if this is the first time the block will run. Pegasus plugin can run multiple times in a build if
// multiple sub-projects applied the plugin.
if (!project.getRootProject().hasProperty(RUN_ONCE)
|| !Boolean.parseBoolean(String.valueOf(project.getRootProject().property(RUN_ONCE))))
{
project.getGradle().projectsEvaluated(gradle ->
gradle.getRootProject().subprojects(subproject ->
UNUSED_CONFIGURATIONS.forEach(configurationName -> {
Configuration conf = subproject.getConfigurations().findByName(configurationName);
if (conf != null && !conf.getDependencies().isEmpty()) {
subproject.getLogger().warn("*** Project {} declares dependency to unused configuration \"{}\". "
+ "This configuration is deprecated and you can safely remove the dependency. ***",
subproject.getPath(), configurationName);
}
})
)
);
// Re-initialize the static variables as they might have stale values from previous run. With Gradle 3.0 and
// gradle daemon enabled, the plugin class might not be loaded for every run.
DATA_TEMPLATE_FILE_SUFFIXES.clear();
DATA_TEMPLATE_FILE_SUFFIXES.add(DATA_TEMPLATE_FILE_SUFFIX);
DATA_TEMPLATE_FILE_SUFFIXES.add(PDL_FILE_SUFFIX);
_restModelCompatMessage = new StringBuffer();
_needCheckinFiles.clear();
_needBuildFolders.clear();
_possibleMissingFilesInEarlierCommit.clear();
project.getGradle().buildFinished(result ->
{
StringBuilder endOfBuildMessage = new StringBuilder();
if (_restModelCompatMessage.length() > 0)
{
endOfBuildMessage.append(_restModelCompatMessage);
}
if (!_needCheckinFiles.isEmpty())
{
endOfBuildMessage.append(createModifiedFilesMessage(_needCheckinFiles, _needBuildFolders));
}
if (!_possibleMissingFilesInEarlierCommit.isEmpty())
{
endOfBuildMessage.append(createPossibleMissingFilesMessage(_possibleMissingFilesInEarlierCommit));
}
if (endOfBuildMessage.length() > 0)
{
result.getGradle().getRootProject().getLogger().quiet(endOfBuildMessage.toString());
}
});
// Set an extra property on the root project to indicate the initialization is complete for the current build.
project.getRootProject().getExtensions().getExtraProperties().set(RUN_ONCE, true);
}
}
ConfigurationContainer configurations = project.getConfigurations();
// configuration for getting the required classes to make pegasus call main methods
configurations.maybeCreate(PEGASUS_PLUGIN_CONFIGURATION);
// configuration for compiling generated data templates
Configuration dataTemplateCompile = configurations.maybeCreate("dataTemplateCompile");
dataTemplateCompile.setVisible(false);
// configuration for running rest client generator
Configuration restClientCompile = configurations.maybeCreate("restClientCompile");
restClientCompile.setVisible(false);
// configuration for running data template generator
// DEPRECATED! This configuration is no longer used. Please stop using it.
Configuration dataTemplateGenerator = configurations.maybeCreate("dataTemplateGenerator");
dataTemplateGenerator.setVisible(false);
// configuration for running rest client generator
// DEPRECATED! This configuration is no longer used. Please stop using it.
Configuration restTools = configurations.maybeCreate("restTools");
restTools.setVisible(false);
// configuration for running Avro schema generator
// DEPRECATED! To skip avro schema generation, use PegasusOptions.generationModes
Configuration avroSchemaGenerator = configurations.maybeCreate("avroSchemaGenerator");
avroSchemaGenerator.setVisible(false);
// configuration for depending on data schemas and potentially generated data templates
// and for publishing jars containing data schemas to the project artifacts for including in the ivy.xml
Configuration dataModel = configurations.maybeCreate("dataModel");
Configuration testDataModel = configurations.maybeCreate("testDataModel");
testDataModel.extendsFrom(dataModel);
// configuration for depending on data schemas and potentially generated data templates
// and for publishing jars containing data schemas to the project artifacts for including in the ivy.xml
Configuration avroSchema = configurations.maybeCreate("avroSchema");
Configuration testAvroSchema = configurations.maybeCreate("testAvroSchema");
testAvroSchema.extendsFrom(avroSchema);
// configuration for depending on rest idl and potentially generated client builders
// and for publishing jars containing rest idl to the project artifacts for including in the ivy.xml
Configuration restModel = configurations.maybeCreate("restModel");
Configuration testRestModel = configurations.maybeCreate("testRestModel");
testRestModel.extendsFrom(restModel);
// configuration for publishing jars containing data schemas and generated data templates
// to the project artifacts for including in the ivy.xml
//
// published data template jars depends on the configurations used to compile the classes
// in the jar, this includes the data models/templates used by the data template generator
// and the classes used to compile the generated classes.
Configuration dataTemplate = configurations.maybeCreate("dataTemplate");
dataTemplate.extendsFrom(dataTemplateCompile, dataModel);
Configuration testDataTemplate = configurations.maybeCreate("testDataTemplate");
testDataTemplate.extendsFrom(dataTemplate, testDataModel);
// configuration for processing and validating schema annotation during build time.
//
// The configuration contains dependencies to schema annotation handlers which would process schema annotations
// and validate.
Configuration schemaAnnotationHandler = configurations.maybeCreate(SCHEMA_ANNOTATION_HANDLER_CONFIGURATION);
// configuration for publishing jars containing rest idl and generated client builders
// to the project artifacts for including in the ivy.xml
//
// published client builder jars depends on the configurations used to compile the classes
// in the jar, this includes the data models/templates (potentially generated by this
// project and) used by the data template generator and the classes used to compile
// the generated classes.
Configuration restClient = configurations.maybeCreate("restClient");
restClient.extendsFrom(restClientCompile, dataTemplate);
Configuration testRestClient = configurations.maybeCreate("testRestClient");
testRestClient.extendsFrom(restClient, testDataTemplate);
Properties properties = new Properties();
InputStream inputStream = getClass().getResourceAsStream("/pegasus-version.properties");
if (inputStream != null && !"true".equals(System.getenv("PEGASUS_INTEGRATION_TESTING")))
{
try
{
properties.load(inputStream);
}
catch (IOException e)
{
throw new GradleException("Unable to read pegasus-version.properties file.", e);
}
String version = properties.getProperty("pegasus.version");
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "com.linkedin.pegasus:data:" + version);
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "com.linkedin.pegasus:data-avro-generator:" + version);
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "com.linkedin.pegasus:generator:" + version);
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "com.linkedin.pegasus:restli-tools:" + version);
}
else
{
project.getLogger().lifecycle("Unable to add pegasus dependencies to {}. Please be sure that "
+ "'com.linkedin.pegasus:data', 'com.linkedin.pegasus:data-avro-generator', 'com.linkedin.pegasus:generator', 'com.linkedin.pegasus:restli-tools'"
+ " are available on the configuration pegasusPlugin",
project.getPath());
}
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, "org.slf4j:slf4j-simple:1.7.2");
project.getDependencies().add(PEGASUS_PLUGIN_CONFIGURATION, project.files(System.getProperty("java.home") + "/../lib/tools.jar"));
// this call has to be here because:
// 1) artifact cannot be published once projects has been evaluated, so we need to first
// create the tasks and artifact handler, then progressively append sources
// 2) in order to append sources progressively, the source and documentation tasks and artifacts must be
// configured/created before configuring and creating the code generation tasks.
configureGeneratedSourcesAndJavadoc(project);
ChangedFileReportTask changedFileReportTask = project.getTasks()
.create("changedFilesReport", ChangedFileReportTask.class);
project.getTasks().getByName("check").dependsOn(changedFileReportTask);
SourceSetContainer sourceSets = project.getConvention()
.getPlugin(JavaPluginConvention.class).getSourceSets();
sourceSets.all(sourceSet ->
{
if (sourceSet.getName().toLowerCase(Locale.US).contains("generated"))
{
return;
}
checkAvroSchemaExist(project, sourceSet);
// the idl Generator input options will be inside the PegasusOptions class. Users of the
// plugin can set the inputOptions in their build.gradle
@SuppressWarnings("unchecked")
Map<String, PegasusOptions> pegasusOptions = (Map<String, PegasusOptions>) project
.getExtensions().getExtraProperties().get("pegasus");
pegasusOptions.put(sourceSet.getName(), new PegasusOptions());
// rest model generation could fail on incompatibility
// if it can fail, fail it early
configureRestModelGeneration(project, sourceSet);
// Do compatibility check for schemas under "pegasus" directory if the configuration property is provided.
if (isPropertyTrue(project, ENABLE_PEGASUS_SCHEMA_COMPATIBILITY_CHECK))
{
configurePegasusSchemaSnapshotGeneration(project, sourceSet, false);
}
configurePegasusSchemaSnapshotGeneration(project, sourceSet, true);
configureConversionUtilities(project, sourceSet);
GenerateDataTemplateTask generateDataTemplateTask = configureDataTemplateGeneration(project, sourceSet);
configureAvroSchemaGeneration(project, sourceSet);
configureRestClientGeneration(project, sourceSet);
if (!isPropertyTrue(project, DISABLE_SCHEMA_ANNOTATION_VALIDATION))
{
configureSchemaAnnotationValidation(project, sourceSet, generateDataTemplateTask);
}
Task cleanGeneratedDirTask = project.task(sourceSet.getTaskName("clean", "GeneratedDir"));
cleanGeneratedDirTask.doLast(new CacheableAction<>(task ->
{
deleteGeneratedDir(project, sourceSet, REST_GEN_TYPE);
deleteGeneratedDir(project, sourceSet, AVRO_SCHEMA_GEN_TYPE);
deleteGeneratedDir(project, sourceSet, DATA_TEMPLATE_GEN_TYPE);
}));
// make clean depends on deleting the generated directories
project.getTasks().getByName("clean").dependsOn(cleanGeneratedDirTask);
// Set data schema directories as resource roots
configureDataSchemaResourcesRoot(project, sourceSet);
});
project.getExtensions().getExtraProperties().set(GENERATOR_CLASSLOADER_NAME, getClass().getClassLoader());
}
|
@Test
public void testTaskTypes() {
// Given/When: Pegasus Plugin is applied to a project.
Project project = ProjectBuilder.builder().build();
project.getPlugins().apply(PegasusPlugin.class);
// Then: Validate the Copy/Sync Schema tasks are of the correct type.
assertTrue(project.getTasks().getByName("mainDestroyStaleFiles") instanceof Delete);
assertTrue(project.getTasks().getByName("mainCopyPdscSchemas") instanceof Copy);
assertTrue(project.getTasks().getByName("mainCopySchemas") instanceof Sync);
}
|
@VisibleForTesting
protected List<UserDefinedJavaClassDef> orderDefinitions( List<UserDefinedJavaClassDef> definitions ) {
List<UserDefinedJavaClassDef> orderedDefinitions = new ArrayList<>( definitions.size() );
List<UserDefinedJavaClassDef> transactions =
definitions.stream()
.filter( def -> def.isTransformClass() && def.isActive() )
.sorted( ( p1, p2 ) -> p1.getClassName().compareTo( p2.getClassName() ) )
.collect( Collectors.toList() );
List<UserDefinedJavaClassDef> normalClasses =
definitions.stream()
.filter( def -> !def.isTransformClass() )
.sorted( ( p1, p2 ) -> p1.getClassName().compareTo( p2.getClassName() ) )
.collect( Collectors.toList() );
orderedDefinitions.addAll( normalClasses );
orderedDefinitions.addAll( transactions );
return orderedDefinitions;
}
|
@Test
public void oderDefinitionTest() throws Exception {
String codeBlock1 = "public boolean processRow() {\n"
+ " return true;\n"
+ "}\n\n";
String codeBlock2 = "public boolean extraClassA() {\n"
+ " // Random comment\n"
+ " return true;\n"
+ "}\n\n";
String codeBlock3 = "public boolean extraClassB() {\n"
+ " // Random comment\n"
+ " return true;\n"
+ "}\n\n";
UserDefinedJavaClassMeta userDefinedJavaClassMeta = new UserDefinedJavaClassMeta();
UserDefinedJavaClassDef processClassDef = new UserDefinedJavaClassDef( UserDefinedJavaClassDef.ClassType.TRANSFORM_CLASS, "Process", codeBlock1 );
UserDefinedJavaClassDef processClassDefA = new UserDefinedJavaClassDef( UserDefinedJavaClassDef.ClassType.TRANSFORM_CLASS, "ProcessA", codeBlock1 );
UserDefinedJavaClassDef normalClassADef = new UserDefinedJavaClassDef( UserDefinedJavaClassDef.ClassType.NORMAL_CLASS, "A", codeBlock1 );
UserDefinedJavaClassDef normalClassBDef = new UserDefinedJavaClassDef( UserDefinedJavaClassDef.ClassType.NORMAL_CLASS, "B", codeBlock1 );
UserDefinedJavaClassDef normalClassCDef = new UserDefinedJavaClassDef( UserDefinedJavaClassDef.ClassType.NORMAL_CLASS, "C", codeBlock1 );
ArrayList<UserDefinedJavaClassDef> defs = new ArrayList<>(5);
defs.add(processClassDefA);
defs.add(processClassDef);
defs.add(normalClassCDef);
defs.add(normalClassBDef);
defs.add(normalClassADef);
StepMeta stepMeta = Mockito.mock( StepMeta.class );
Mockito.when( stepMeta.getName() ).thenReturn( "User Defined Java Class" );
userDefinedJavaClassMeta.setParentStepMeta( stepMeta );
// Test reording the reverse order test
List<UserDefinedJavaClassDef> orderDefs = userDefinedJavaClassMeta.orderDefinitions( defs );
Assert.assertTrue( orderDefs.get(0).getClassName().equals( "A" ) );
Assert.assertTrue( orderDefs.get(1).getClassName().equals( "B" ) );
Assert.assertTrue( orderDefs.get(2).getClassName().equals( "C" ) );
Assert.assertTrue( orderDefs.get(3).getClassName().equals( "Process" ) );
Assert.assertTrue( orderDefs.get(4).getClassName().equals( "ProcessA" ) );
// Random order test
defs.clear();
defs.add(normalClassADef);
defs.add(normalClassCDef);
defs.add(processClassDefA);
defs.add(normalClassBDef);
defs.add(processClassDef);
orderDefs = userDefinedJavaClassMeta.orderDefinitions( defs );
Assert.assertTrue( orderDefs.get(0).getClassName().equals( "A" ) );
Assert.assertTrue( orderDefs.get(1).getClassName().equals( "B" ) );
Assert.assertTrue( orderDefs.get(2).getClassName().equals( "C" ) );
Assert.assertTrue( orderDefs.get(3).getClassName().equals( "Process" ) );
Assert.assertTrue( orderDefs.get(4).getClassName().equals( "ProcessA" ) );
}
|
public static <T> T execute(Single<T> apiCall) {
try {
return apiCall.blockingGet();
} catch (HttpException e) {
try {
if (e.response() == null || e.response().errorBody() == null) {
throw e;
}
String errorBody = e.response().errorBody().string();
OpenAiError error = mapper.readValue(errorBody, OpenAiError.class);
throw new OpenAiHttpException(error, e, e.code());
} catch (IOException ex) {
// couldn't parse OpenAI error
throw e;
}
}
}
|
@Test
void executeParseUnknownProperties() {
// error body contains one unknown property and no message
String errorBody = "{\"error\":{\"unknown\":\"Invalid auth token\",\"type\":\"type\",\"param\":\"param\",\"code\":\"code\"}}";
HttpException httpException = createException(errorBody, 401);
Single<CompletionResult> single = Single.error(httpException);
OpenAiHttpException exception = assertThrows(OpenAiHttpException.class, () -> OpenAiService.execute(single));
assertNull(exception.getMessage());
assertEquals("type", exception.type);
assertEquals("param", exception.param);
assertEquals("code", exception.code);
assertEquals(401, exception.statusCode);
}
|
@Override @Nonnull
public ProgressState call() {
assert state != END : "already in terminal state";
progTracker.reset();
progTracker.notDone();
outbox.reset();
stateMachineStep();
return progTracker.toProgressState();
}
|
@Test
public void when_closeBlocked_then_waitUntilDone() {
processor.doneLatch = new CountDownLatch(1);
ProcessorTasklet tasklet = createTasklet(ForkJoinPool.commonPool());
callUntil(tasklet, NO_PROGRESS);
processor.doneLatch.countDown();
assertTrueEventually(() -> assertEquals(DONE, tasklet.call()), 2);
}
|
@Override
public ShardingAuditStrategyConfiguration swapToObject(final YamlShardingAuditStrategyConfiguration yamlConfig) {
return new ShardingAuditStrategyConfiguration(yamlConfig.getAuditorNames(), yamlConfig.isAllowHintDisable());
}
|
@Test
void assertSwapToObject() {
YamlShardingAuditStrategyConfiguration yamlConfig = new YamlShardingAuditStrategyConfiguration();
yamlConfig.setAuditorNames(Collections.singletonList("audit_algorithm"));
yamlConfig.setAllowHintDisable(false);
YamlShardingAuditStrategyConfigurationSwapper swapper = new YamlShardingAuditStrategyConfigurationSwapper();
ShardingAuditStrategyConfiguration actual = swapper.swapToObject(yamlConfig);
assertThat(actual.getAuditorNames(), is(Collections.singletonList("audit_algorithm")));
assertFalse(actual.isAllowHintDisable());
}
|
public byte[] generateAtRequest(DocumentType documentType, PolymorphType authorization, String sequenceNo,
String reference) {
final Certificate dvca = getDvca(documentType);
final String subject = getAtSubject(documentType, dvca.getSubject(), sequenceNo);
if (repository.countBySubject(subject) != 0) {
throw new ClientException("AT certificate of " + subject + " already present");
}
final PublicKeyInfo keyInfo = new PublicKeyInfo();
keyInfo.setOid(EACObjectIdentifiers.id_TA_ECDSA_SHA_384);
keyInfo.setParams(BrainpoolP320r1.DOMAIN_PARAMS);
keyInfo.setKey(signatureService.getOrGenerateKey(subject));
final CvCertificate.Body body = new CvCertificate.Body();
body.setCar(dvca.getSubject());
body.setPublicKey(keyInfo);
body.setChr(subject);
if (documentType == DocumentType.DL) // use EACv2 for DL only
body.setAuthorization(authorization);
final CvCertificate cv = new CvCertificate();
body.setRaw(mapper.write(body));
cv.setBody(body);
final EcSignature inner = new EcSignature(signatureService.sign(cv, subject, true));
cv.setSignature(inner);
if (reference == null) {
return mapper.write(cv);
}
CvCertificateRequest req = new CvCertificateRequest();
cv.setRaw(mapper.write(cv));
req.setCertificate(cv);
req.setCar(reference);
final EcSignature outer = new EcSignature(signatureService.sign(req, reference, true));
req.setSignature(outer);
return mapper.write(req);
}
|
@Test
public void shouldGenerateFirstATRequest() throws Exception {
final HsmClient.KeyInfo keyInfo = new HsmClient.KeyInfo();
keyInfo.setPublicKey(Hex.decode("04"
+ "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"
+ "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"
));
Mockito.doReturn(keyInfo).when(hsmClient).generateKey(
Mockito.eq("AT"), Mockito.eq("SSSSSSSSSSSSSSSS")
);
Mockito.doThrow(new nl.logius.digid.sharedlib.exception.ClientException("Not found",404)).when(hsmClient).keyInfo(
Mockito.eq("AT"), Mockito.eq("SSSSSSSSSSSSSSSS")
);
final byte[] TBS = Base64.decode(
"SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" +
"SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" +
"SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" +
"SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" +
"SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" +
"SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" +
"SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS");
Mockito.doReturn(signature("SSSSSSSSSSSSSSSS")).when(hsmClient).sign(
Mockito.eq("AT"), Mockito.eq("SSSSSSSSSSSSSSSS"),
AdditionalMatchers.aryEq(TBS), Mockito.eq(true)
);
certificateRepo.save(loadCvCertificate("rdw/acc/cvca.cvcert", true));
certificateRepo.save(loadCvCertificate("rdw/acc/dvca.cvcert", false));
final byte[] der = service.generateAtRequest(DocumentType.DL, PolymorphType.PIP, "NL001", null);
final CvCertificate at = mapper.read(der, CvCertificate.class);
verifyAt(at, "SSSSSSSSSSSSSSSS", "SSSSSSSSSSSSSSSS", true);
}
|
@Override
protected Release findLatestActiveRelease(String appId, String clusterName, String namespaceName,
ApolloNotificationMessages clientMessages) {
String messageKey = ReleaseMessageKeyGenerator.generate(appId, clusterName, namespaceName);
String cacheKey = messageKey;
if (bizConfig.isConfigServiceCacheKeyIgnoreCase()) {
cacheKey = cacheKey.toLowerCase();
}
Tracer.logEvent(TRACER_EVENT_CACHE_GET, cacheKey);
ConfigCacheEntry cacheEntry = configCache.getUnchecked(cacheKey);
//cache is out-dated
if (clientMessages != null && clientMessages.has(messageKey) &&
clientMessages.get(messageKey) > cacheEntry.getNotificationId()) {
//invalidate the cache and try to load from db again
invalidate(cacheKey);
cacheEntry = configCache.getUnchecked(cacheKey);
}
return cacheEntry.getRelease();
}
|
@Test
public void testFindLatestActiveRelease() throws Exception {
when(releaseMessageService.findLatestReleaseMessageForMessages(Lists.newArrayList(someKey))).thenReturn
(someReleaseMessage);
when(releaseService.findLatestActiveRelease(someAppId, someClusterName, someNamespaceName)).thenReturn
(someRelease);
when(someReleaseMessage.getId()).thenReturn(someNotificationId);
Release release = configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName, someNamespaceName,
someNotificationMessages);
Release anotherRelease = configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName,
someNamespaceName, someNotificationMessages);
int retryTimes = 100;
for (int i = 0; i < retryTimes; i++) {
configServiceWithCache.findLatestActiveRelease(someAppId, someClusterName,
someNamespaceName, someNotificationMessages);
}
assertEquals(someRelease, release);
assertEquals(someRelease, anotherRelease);
verify(releaseMessageService, times(1)).findLatestReleaseMessageForMessages(Lists.newArrayList(someKey));
verify(releaseService, times(1)).findLatestActiveRelease(someAppId, someClusterName, someNamespaceName);
}
|
public Schema find(String name, String namespace) {
Schema.Type type = PRIMITIVES.get(name);
if (type != null) {
return Schema.create(type);
}
String fullName = fullName(name, namespace);
Schema schema = getNamedSchema(fullName);
if (schema == null) {
schema = getNamedSchema(name);
}
return schema != null ? schema : SchemaResolver.unresolvedSchema(fullName);
}
|
@Test
public void validateSchemaRetrievalFailure() {
Schema unknown = Schema.createFixed("unknown", null, null, 0);
Schema unresolved = fooBarBaz.find("unknown", null);
assertTrue(SchemaResolver.isUnresolvedSchema(unresolved));
assertEquals(unknown.getFullName(), SchemaResolver.getUnresolvedSchemaName(unresolved));
}
|
@Override
public CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options) {
final Map<String, KafkaFutureImpl<TopicMetadataAndConfig>> topicFutures = new HashMap<>(newTopics.size());
final CreatableTopicCollection topics = new CreatableTopicCollection();
for (NewTopic newTopic : newTopics) {
if (topicNameIsUnrepresentable(newTopic.name())) {
KafkaFutureImpl<TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
future.completeExceptionally(new InvalidTopicException("The given topic name '" +
newTopic.name() + "' cannot be represented in a request."));
topicFutures.put(newTopic.name(), future);
} else if (!topicFutures.containsKey(newTopic.name())) {
topicFutures.put(newTopic.name(), new KafkaFutureImpl<>());
topics.add(newTopic.convertToCreatableTopic());
}
}
if (!topics.isEmpty()) {
final long now = time.milliseconds();
final long deadline = calcDeadlineMs(now, options.timeoutMs());
final Call call = getCreateTopicsCall(options, topicFutures, topics,
Collections.emptyMap(), now, deadline);
runnable.call(call, now);
}
return new CreateTopicsResult(new HashMap<>(topicFutures));
}
|
@Test
public void testUnreachableBootstrapServer() throws Exception {
// This tests the scenario in which the bootstrap server is unreachable for a short while,
// which prevents AdminClient from being able to send the initial metadata request
Cluster cluster = Cluster.bootstrap(singletonList(new InetSocketAddress("localhost", 8121)));
Map<Node, Long> unreachableNodes = Collections.singletonMap(cluster.nodes().get(0), 200L);
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster,
AdminClientUnitTestEnv.clientConfigs(), unreachableNodes)) {
Cluster discoveredCluster = mockCluster(3, 0);
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest,
RequestTestUtils.metadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(),
1, Collections.emptyList()));
env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest,
prepareCreateTopicsResponse("myTopic", Errors.NONE));
KafkaFuture<Void> future = env.adminClient().createTopics(
singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))),
new CreateTopicsOptions().timeoutMs(10000)).all();
future.get();
}
}
|
@Override
public Num calculate(BarSeries series, Position position) {
Num stdDevPnl = standardDeviationCriterion.calculate(series, position);
if (stdDevPnl.isZero()) {
return series.zero();
}
// SQN = (Average (PnL) / StdDev(PnL)) * SquareRoot(NumberOfTrades)
Num numberOfPositions = numberOfPositionsCriterion.calculate(series, position);
Num pnl = criterion.calculate(series, position);
Num avgPnl = pnl.dividedBy(numberOfPositions);
return avgPnl.dividedBy(stdDevPnl).multipliedBy(numberOfPositions.sqrt());
}
|
@Test
public void calculateWithLosingShortPositions() {
MockBarSeries series = new MockBarSeries(numFunction, 100, 110, 100, 105, 95, 105);
TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series), Trade.buyAt(1, series),
Trade.sellAt(2, series), Trade.buyAt(3, series));
AnalysisCriterion sqnCriterion = getCriterion();
assertNumEquals(-4.242640687119286, sqnCriterion.calculate(series, tradingRecord));
}
|
@Deprecated
public List<IndexSegment> prune(List<IndexSegment> segments, QueryContext query) {
return prune(segments, query, new SegmentPrunerStatistics());
}
|
@Test
public void segmentsWithoutColumnAreInvalid() {
SegmentPrunerService service = new SegmentPrunerService(_emptyPrunerConf);
IndexSegment indexSegment = mockIndexSegment(10, "col1", "col2");
SegmentPrunerStatistics stats = new SegmentPrunerStatistics();
List<IndexSegment> indexes = new ArrayList<>();
indexes.add(indexSegment);
String query = "select not_present from t1";
QueryContext queryContext = QueryContextConverterUtils.getQueryContext(query);
List<IndexSegment> actual = service.prune(indexes, queryContext, stats);
Assert.assertEquals(actual, Collections.emptyList());
Assert.assertEquals(1, stats.getInvalidSegments());
}
|
public static TableSchema toTableSchema(Schema schema) {
return new TableSchema().setFields(toTableFieldSchema(schema));
}
|
@Test
public void testToTableSchema_flat() {
TableSchema schema = toTableSchema(FLAT_TYPE);
assertThat(
schema.getFields(),
containsInAnyOrder(
ID,
VALUE,
NAME,
TIMESTAMP_VARIANT1,
TIMESTAMP_VARIANT2,
TIMESTAMP_VARIANT3,
TIMESTAMP_VARIANT4,
TIMESTAMP_VARIANT5,
TIMESTAMP_VARIANT6,
TIMESTAMP_VARIANT7,
TIMESTAMP_VARIANT8,
DATETIME,
DATETIME_0MS,
DATETIME_0S_NS,
DATETIME_0S_0NS,
DATE,
TIME,
TIME_0MS,
TIME_0S_NS,
TIME_0S_0NS,
VALID,
BINARY,
RAW_BYTES,
NUMERIC,
BOOLEAN,
LONG,
DOUBLE));
}
|
@Override
public Object toConnectRow(final Object ksqlData) {
if (!(ksqlData instanceof Struct)) {
return ksqlData;
}
final Schema schema = getSchema();
final Struct struct = new Struct(schema);
Struct originalData = (Struct) ksqlData;
Schema originalSchema = originalData.schema();
if (originalSchema.name() == null && schema.name() != null) {
originalSchema = AvroSchemas.getAvroCompatibleConnectSchema(
originalSchema, schema.name()
);
originalData = ConnectSchemas.withCompatibleRowSchema(originalData, originalSchema);
}
validate(originalSchema, schema);
copyStruct(originalData, originalSchema, struct, schema);
return struct;
}
|
@Test
public void shouldTransformStructWithMapOfStructs() {
// Given:
final Schema innerStructSchemaWithoutOptional = getInnerStructSchema(false);
final Schema innerStructSchemaWithOptional = getInnerStructSchema(true);
// Physical Schema retrieved from SR
final Schema schema = SchemaBuilder.struct()
.field("string_field", SchemaBuilder.STRING_SCHEMA)
.field("map_field", SchemaBuilder.map(Schema.STRING_SCHEMA, innerStructSchemaWithoutOptional))
.build();
// Logical Schema created by Ksql
final Schema ORIGINAL_SCHEMA = SchemaBuilder.struct()
.field("string_field", SchemaBuilder.OPTIONAL_STRING_SCHEMA)
.field("map_field", SchemaBuilder.map(Schema.OPTIONAL_STRING_SCHEMA, innerStructSchemaWithOptional))
.optional()
.build();
final Struct struct = new Struct(ORIGINAL_SCHEMA)
.put("string_field", "abc")
.put("map_field", ImmutableMap.of(
"key1", getNestedData(innerStructSchemaWithOptional),
"key2", getNestedData(innerStructSchemaWithOptional)));
final Map<String, Object> mapWithoutOptional = ImmutableMap.of(
"key1", getNestedData(innerStructSchemaWithoutOptional),
"key2", getNestedData(innerStructSchemaWithoutOptional));
// When:
final Object object = new AvroSRSchemaDataTranslator(schema).toConnectRow(struct);
// Then:
assertThat(object, instanceOf(Struct.class));
assertThat(((Struct) object).schema(), sameInstance(schema));
assertThat(((Struct) object).get("string_field"), is("abc"));
assertThat(((Struct) object).get("map_field"), equalTo(mapWithoutOptional));
}
|
public int lower(int v) {
return Boundary.LOWER.apply(find(v));
}
|
@Test
public void testLower() {
SortedIntList l = new SortedIntList(5);
l.add(0);
l.add(5);
l.add(10);
assertEquals(2, l.lower(Integer.MAX_VALUE));
}
|
public String getQueueName(AsyncMockDefinition definition, EventMessage eventMessage) {
// Produce service name part of topic name.
String serviceName = definition.getOwnerService().getName().replace(" ", "");
serviceName = serviceName.replace("-", "");
// Produce version name part of topic name.
String versionName = definition.getOwnerService().getVersion().replace(" ", "");
versionName = versionName.replace(".", "");
// Produce operation name part of topic name.
String operationName = ProducerManager.getDestinationOperationPart(definition.getOperation(), eventMessage);
// Aggregate the 3 parts using '-' as delimiter.
return serviceName + "-" + versionName + "-" + operationName.replace("/", "-");
}
|
@Test
void testGetQueueNameWithPart() {
AmazonSQSProducerManager producerManager = new AmazonSQSProducerManager();
Service service = new Service();
service.setName("Pastry orders API");
service.setVersion("0.1.0");
Operation operation = new Operation();
operation.setName("SUBSCRIBE pastry/orders");
operation.setMethod("SUBSCRIBE");
operation.setDispatcher("URI_PARTS");
operation.setResourcePaths(Set.of("pastry/orders/{orderId}"));
service.addOperation(operation);
EventMessage eventMessage = new EventMessage();
eventMessage.setName("Sample");
eventMessage.setDispatchCriteria("/orderId=123-456-789");
List<EventMessage> eventsMessages = List.of(eventMessage);
AsyncMockDefinition definition = new AsyncMockDefinition(service, operation, eventsMessages);
String queueName = producerManager.getQueueName(definition, eventMessage);
assertEquals("PastryordersAPI-010-pastry-orders-123-456-789", queueName);
}
|
@Override
public String toString() {
return "AttributeConfig{"
+ "name='" + name + '\''
+ "extractorClassName='" + extractorClassName + '\''
+ '}';
}
|
@Test
public void validToString() {
AttributeConfig config = new AttributeConfig("iq", "com.test.IqExtractor");
String toString = config.toString();
assertThat(toString).contains("iq");
assertThat(toString).contains("com.test.IqExtractor");
}
|
@Deprecated
public RequestTemplate method(String method) {
checkNotNull(method, "method");
try {
this.method = HttpMethod.valueOf(method);
} catch (IllegalArgumentException iae) {
throw new IllegalArgumentException("Invalid HTTP Method: " + method);
}
return this;
}
|
@SuppressWarnings("deprecation")
@Test
void uriStuffedIntoMethod() {
Throwable exception = assertThrows(IllegalArgumentException.class,
() -> new RequestTemplate().method("/path?queryParam={queryParam}"));
assertThat(exception.getMessage())
.contains("Invalid HTTP Method: /path?queryParam={queryParam}");
}
|
public static <T> T execute(Single<T> apiCall) {
try {
return apiCall.blockingGet();
} catch (HttpException e) {
try {
if (e.response() == null || e.response().errorBody() == null) {
throw e;
}
String errorBody = e.response().errorBody().string();
OpenAiError error = mapper.readValue(errorBody, OpenAiError.class);
throw new OpenAiHttpException(error, e, e.code());
} catch (IOException ex) {
// couldn't parse OpenAI error
throw e;
}
}
}
|
@Test
void executeHappyPath() {
CompletionResult expected = new CompletionResult();
Single<CompletionResult> single = Single.just(expected);
CompletionResult actual = OpenAiService.execute(single);
assertEquals(expected, actual);
}
|
@Override
public Object handle(String targetService, List<Object> invokers, Object invocation, Map<String, String> queryMap,
String serviceInterface) {
if (!shouldHandle(invokers)) {
return invokers;
}
List<Object> targetInvokers;
if (routerConfig.isUseRequestRouter()) {
targetInvokers = getTargetInvokersByRequest(targetService, invokers, invocation);
} else {
targetInvokers = getTargetInvokersByRules(invokers, invocation, queryMap, targetService, serviceInterface);
}
return super.handle(targetService, targetInvokers, invocation, queryMap, serviceInterface);
}
|
@Test
public void testGetTargetInvokersByGlobalRules() {
// initialize the routing rule
RuleInitializationUtils.initGlobalFlowMatchRules();
List<Object> invokers = new ArrayList<>();
ApacheInvoker<Object> invoker1 = new ApacheInvoker<>("1.0.0");
invokers.add(invoker1);
ApacheInvoker<Object> invoker2 = new ApacheInvoker<>("1.0.1");
invokers.add(invoker2);
Invocation invocation = new ApacheInvocation();
invocation.setAttachment("bar", "bar1");
Map<String, String> queryMap = new HashMap<>();
queryMap.put("side", "consumer");
queryMap.put("group", "fooGroup");
queryMap.put("version", "0.0.1");
queryMap.put("interface", "io.sermant.foo.FooTest");
DubboCache.INSTANCE.putApplication("io.sermant.foo.FooTest", "foo");
List<Object> targetInvokers = (List<Object>) flowRouteHandler.handle(
DubboCache.INSTANCE.getApplication("io.sermant.foo.FooTest")
, invokers, invocation, queryMap, "io.sermant.foo.FooTest");
Assert.assertEquals(1, targetInvokers.size());
Assert.assertEquals(invoker2, targetInvokers.get(0));
ConfigCache.getLabel(RouterConstant.DUBBO_CACHE_NAME).resetGlobalRule(Collections.emptyList());
}
|
public static Map<String, ShardingSphereSchema> build(final GenericSchemaBuilderMaterial material) throws SQLException {
return build(getAllTableNames(material.getRules()), material);
}
|
@Test
void assertLoadAllTables() throws SQLException {
Collection<String> tableNames = Arrays.asList("data_node_routed_table1", "data_node_routed_table2");
when(MetaDataLoader.load(any())).thenReturn(createSchemaMetaDataMap(tableNames, material));
Map<String, ShardingSphereSchema> actual = GenericSchemaBuilder.build(tableNames, material);
assertThat(actual.size(), is(1));
assertTables(new ShardingSphereSchema(DefaultDatabase.LOGIC_NAME, actual.values().iterator().next().getTables(), Collections.emptyMap()).getTables());
}
|
public OpenAPI read(Class<?> cls) {
return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>());
}
|
@Test
public void testRequestBodyEncoding() {
Reader reader = new Reader(new OpenAPI());
OpenAPI openAPI = reader.read(UrlEncodedResourceWithEncodings.class);
String yaml = "openapi: 3.0.1\n" +
"paths:\n" +
" /things/search:\n" +
" post:\n" +
" operationId: searchForThings\n" +
" requestBody:\n" +
" content:\n" +
" application/x-www-form-urlencoded:\n" +
" schema:\n" +
" type: object\n" +
" properties:\n" +
" id:\n" +
" type: array\n" +
" description: id param\n" +
" items:\n" +
" type: string\n" +
" name:\n" +
" type: array\n" +
" items:\n" +
" type: string\n" +
" encoding:\n" +
" id:\n" +
" style: form\n" +
" explode: true\n" +
" name:\n" +
" style: form\n" +
" explode: false\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" application/json: {}\n" +
" /things/sriracha:\n" +
" post:\n" +
" operationId: srirachaThing\n" +
" requestBody:\n" +
" content:\n" +
" application/x-www-form-urlencoded:\n" +
" schema:\n" +
" type: object\n" +
" properties:\n" +
" id:\n" +
" type: array\n" +
" description: id param\n" +
" items:\n" +
" type: string\n" +
" name:\n" +
" type: array\n" +
" items:\n" +
" type: string\n" +
" encoding:\n" +
" id:\n" +
" style: form\n" +
" explode: true\n" +
" name:\n" +
" style: form\n" +
" explode: false\n" +
" responses:\n" +
" default:\n" +
" description: default response\n" +
" content:\n" +
" application/json: {}\n";
SerializationMatchers.assertEqualsToYaml(openAPI, yaml);
}
|
@Override
public Set<Long> loadAllKeys() {
long startNanos = Timer.nanos();
try {
return delegate.loadAllKeys();
} finally {
loadAllKeysProbe.recordValue(Timer.nanosElapsed(startNanos));
}
}
|
@Test
public void loadAllKeys() {
Set<Long> keys = Set.of(1L, 2L);
when(delegate.loadAllKeys()).thenReturn(keys);
Set<Long> result = queueStore.loadAllKeys();
assertEquals(keys, result);
assertProbeCalledOnce("loadAllKeys");
}
|
public static boolean equalContentsIgnoreOrder(Collection<?> c1, Collection<?> c2) {
return c1.size() == c2.size() && c1.containsAll(c2);
}
|
@Test
public void testEqualContentsIgnoreOrder() {
List<Integer> l2Copy = new ArrayList<>();
l2Copy.addAll(l2);
shuffle();
assertTrue(CollectionUtil.equalContentsIgnoreOrder(l2, l2Copy));
assertFalse(CollectionUtil.equalContentsIgnoreOrder(l1, l2));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.