focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public PageResult<FileDO> getFilePage(FilePageReqVO pageReqVO) {
return fileMapper.selectPage(pageReqVO);
} | @Test
public void testGetFilePage() {
// mock 数据
FileDO dbFile = randomPojo(FileDO.class, o -> { // 等会查询到
o.setPath("yunai");
o.setType("image/jpg");
o.setCreateTime(buildTime(2021, 1, 15));
});
fileMapper.insert(dbFile);
// 测试 path 不匹配
fileMapper.insert(ObjectUtils.cloneIgnoreId(dbFile, o -> o.setPath("tudou")));
// 测试 type 不匹配
fileMapper.insert(ObjectUtils.cloneIgnoreId(dbFile, o -> {
o.setType("image/png");
}));
// 测试 createTime 不匹配
fileMapper.insert(ObjectUtils.cloneIgnoreId(dbFile, o -> {
o.setCreateTime(buildTime(2020, 1, 15));
}));
// 准备参数
FilePageReqVO reqVO = new FilePageReqVO();
reqVO.setPath("yunai");
reqVO.setType("jp");
reqVO.setCreateTime((new LocalDateTime[]{buildTime(2021, 1, 10), buildTime(2021, 1, 20)}));
// 调用
PageResult<FileDO> pageResult = fileService.getFilePage(reqVO);
// 断言
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
AssertUtils.assertPojoEquals(dbFile, pageResult.getList().get(0));
} |
@Override
public Result invoke(Invocation invocation) throws RpcException {
Result result;
String value = getUrl().getMethodParameter(
RpcUtils.getMethodName(invocation), MOCK_KEY, Boolean.FALSE.toString())
.trim();
if (ConfigUtils.isEmpty(value)) {
// no mock
result = this.invoker.invoke(invocation);
} else if (value.startsWith(FORCE_KEY)) {
if (logger.isWarnEnabled()) {
logger.warn(
CLUSTER_FAILED_MOCK_REQUEST,
"force mock",
"",
"force-mock: " + RpcUtils.getMethodName(invocation) + " force-mock enabled , url : "
+ getUrl());
}
// force:direct mock
result = doMockInvoke(invocation, null);
} else {
// fail-mock
try {
result = this.invoker.invoke(invocation);
// fix:#4585
if (result.getException() != null && result.getException() instanceof RpcException) {
RpcException rpcException = (RpcException) result.getException();
if (rpcException.isBiz()) {
throw rpcException;
} else {
result = doMockInvoke(invocation, rpcException);
}
}
} catch (RpcException e) {
if (e.isBiz()) {
throw e;
}
if (logger.isWarnEnabled()) {
logger.warn(
CLUSTER_FAILED_MOCK_REQUEST,
"failed to mock invoke",
"",
"fail-mock: " + RpcUtils.getMethodName(invocation) + " fail-mock enabled , url : "
+ getUrl(),
e);
}
result = doMockInvoke(invocation, e);
}
}
return result;
} | @Test
void testMockInvokerFromOverride_Invoke_Fock_WithForceDefault() {
URL url = URL.valueOf("remote://1.2.3.4/" + IHelloService.class.getName())
.addParameter(
REFER_KEY,
URL.encode(PATH_KEY + "=" + IHelloService.class.getName()
+ "&" + "mock=force:return z"
+ "&" + "getSomething.mock=fail:return x"
+ "&" + "getSomething2.mock=force:return y"))
.addParameter("invoke_return_error", "true");
Invoker<IHelloService> cluster = getClusterInvoker(url);
// Configured with mock
RpcInvocation invocation = new RpcInvocation();
invocation.setMethodName("getSomething");
Result ret = cluster.invoke(invocation);
Assertions.assertEquals("x", ret.getValue());
// If no mock was configured, return null directly
invocation = new RpcInvocation();
invocation.setMethodName("getSomething2");
ret = cluster.invoke(invocation);
Assertions.assertEquals("y", ret.getValue());
// If no mock was configured, return null directly
invocation = new RpcInvocation();
invocation.setMethodName("getSomething3");
ret = cluster.invoke(invocation);
Assertions.assertEquals("z", ret.getValue());
// If no mock was configured, return null directly
invocation = new RpcInvocation();
invocation.setMethodName("sayHello");
ret = cluster.invoke(invocation);
Assertions.assertEquals("z", ret.getValue());
} |
public static AuditActor user(@Nonnull String username) {
if (isNullOrEmpty(username)) {
throw new IllegalArgumentException("username must not be null or empty");
}
return new AutoValue_AuditActor(URN_GRAYLOG_USER + username);
} | @Test(expected = IllegalArgumentException.class)
public void testNullUser() {
AuditActor.user(null);
} |
public static String toJson(MetadataUpdate metadataUpdate) {
return toJson(metadataUpdate, false);
} | @Test
public void testSetCurrentSchemaToJson() {
String action = MetadataUpdateParser.SET_CURRENT_SCHEMA;
int schemaId = 6;
String expected = String.format("{\"action\":\"%s\",\"schema-id\":%d}", action, schemaId);
MetadataUpdate update = new MetadataUpdate.SetCurrentSchema(schemaId);
String actual = MetadataUpdateParser.toJson(update);
assertThat(actual)
.as("Set current schema should convert to the correct JSON value")
.isEqualTo(expected);
} |
public InnerCNode getTree() throws CodegenRuntimeException {
try {
if (root == null) parse();
} catch (DefParserException | IOException e) {
throw new CodegenRuntimeException("Error parsing or reading config definition." + e.getMessage(), e);
}
return root;
} | @Test
void testTraverseTree() throws IOException {
File defFile = new File(DEF_NAME);
CNode root = new DefParser("test", new FileReader(defFile)).getTree();
assertNotNull(root);
CNode[] children = root.getChildren();
assertEquals(38, children.length);
int numGrandChildren = 0;
int numGreatGrandChildren = 0;
for (CNode child : children) {
CNode[] childsChildren = child.getChildren();
numGrandChildren += childsChildren.length;
for (CNode grandChild : childsChildren) {
numGreatGrandChildren += grandChild.getChildren().length;
}
}
assertEquals(14, numGrandChildren);
assertEquals(6, numGreatGrandChildren);
// Verify that each array creates a sub-tree, and that defaults for leafs are handled correctly.
CNode myArray = root.getChild("myArray");
assertEquals(5, myArray.getChildren().length);
// int within array
LeafCNode myArrayInt = (LeafCNode) myArray.getChild("intVal");
assertEquals("14", myArrayInt.getDefaultValue().getValue());
// enum within array
LeafCNode myArrayEnum = (LeafCNode) myArray.getChild("enumVal");
assertEquals("TYPE", myArrayEnum.getDefaultValue().getValue());
// Verify array within array and a default value for a leaf in the inner array.
CNode anotherArray = myArray.getChild("anotherArray");
assertEquals(1, anotherArray.getChildren().length);
LeafCNode foo = (LeafCNode) anotherArray.getChild("foo");
assertEquals("-4", foo.getDefaultValue().getValue());
} |
@Override
protected CompletableFuture<LogListInfo> handleRequest(
@Nonnull HandlerRequest<EmptyRequestBody> request, @Nonnull RestfulGateway gateway)
throws RestHandlerException {
if (logDir == null) {
return CompletableFuture.completedFuture(new LogListInfo(Collections.emptyList()));
}
final File[] logFiles = logDir.listFiles();
if (logFiles == null) {
return FutureUtils.completedExceptionally(
new IOException("Could not list files in " + logDir));
}
final List<LogInfo> logs =
Arrays.stream(logFiles)
.filter(File::isFile)
.map(
logFile ->
new LogInfo(
logFile.getName(),
logFile.length(),
logFile.lastModified()))
.collect(Collectors.toList());
return CompletableFuture.completedFuture(new LogListInfo(logs));
} | @Test
void testGetJobManagerLogsList() throws Exception {
File logRoot = temporaryFolder.toFile();
List<LogInfo> expectedLogInfo =
Arrays.asList(
new LogInfo("jobmanager.log", 5, 1632844800000L),
new LogInfo("jobmanager.out", 7, 1632844800000L),
new LogInfo("test.log", 13, 1632844800000L));
createLogFiles(logRoot, expectedLogInfo);
JobManagerLogListHandler jobManagerLogListHandler = createHandler(logRoot);
LogListInfo logListInfo =
jobManagerLogListHandler.handleRequest(testRequest, dispatcherGateway).get();
assertThat(logListInfo.getLogInfos()).containsExactlyInAnyOrderElementsOf(expectedLogInfo);
} |
public Resource getUsedResource() {
return usedResource;
} | @Test(timeout = 60000)
public void testFifoScheduling() throws Exception {
GenericTestUtils.setRootLogLevel(Level.DEBUG);
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
MockNM nm2 = rm.registerNode("127.0.0.2:5678", 4 * GB);
RMApp app1 = MockRMAppSubmitter.submitWithMemory(2048, rm);
// kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
SchedulerNodeReport report_nm1 =
rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize());
RMApp app2 = MockRMAppSubmitter.submitWithMemory(2048, rm);
// kick the scheduling, 2GB given to AM, remaining 2 GB on nm2
nm2.nodeHeartbeat(true);
RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
SchedulerNodeReport report_nm2 =
rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemorySize());
// add request for containers
am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1);
AllocateResponse alloc1Response = am1.schedule(); // send the request
// add request for containers
am2.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 0, 1);
AllocateResponse alloc2Response = am2.schedule(); // send the request
// kick the scheduler, 1 GB and 3 GB given to AM1 and AM2, remaining 0
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
Thread.sleep(1000);
alloc1Response = am1.schedule();
}
while (alloc2Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 2...");
Thread.sleep(1000);
alloc2Response = am2.schedule();
}
// kick the scheduler, nothing given remaining 2 GB.
nm2.nodeHeartbeat(true);
List<Container> allocated1 = alloc1Response.getAllocatedContainers();
Assert.assertEquals(1, allocated1.size());
Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemorySize());
Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId());
List<Container> allocated2 = alloc2Response.getAllocatedContainers();
Assert.assertEquals(1, allocated2.size());
Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemorySize());
Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId());
Assert.assertEquals(0, report_nm1.getAvailableResource().getMemorySize());
Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemorySize());
Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemorySize());
Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemorySize());
Container c1 = allocated1.get(0);
Assert.assertEquals(GB, c1.getResource().getMemorySize());
ContainerStatus containerStatus =
BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE,
"", 0, c1.getResource());
nm1.containerStatus(containerStatus);
int waitCount = 0;
while (attempt1.getJustFinishedContainers().size() < 1 && waitCount++ != 20) {
LOG.info("Waiting for containers to be finished for app 1... Tried "
+ waitCount + " times already..");
Thread.sleep(1000);
}
Assert.assertEquals(1, attempt1.getJustFinishedContainers().size());
Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses()
.size());
report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId());
Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemorySize());
rm.stop();
} |
@ExecuteOn(TaskExecutors.IO)
@Get(uri = "/maxTaskRunSetting")
@Hidden
public Integer maxTaskRunSetting() {
return executionRepository.maxTaskRunSetting();
} | @Test
void maxTaskRunSetting() {
HttpClientResponseException e = assertThrows(
HttpClientResponseException.class,
() -> client.toBlocking().retrieve(HttpRequest.GET("/api/v1/taskruns/maxTaskRunSetting"))
);
assertThat(e.getStatus(), is(HttpStatus.NOT_FOUND));
} |
protected Permission toPermission(final Node node) {
final Permission permission = new Permission();
if(node.getPermissions() != null) {
switch(node.getType()) {
case FOLDER:
case ROOM:
if(node.getPermissions().isCreate()
// For existing files the delete role is also required to overwrite
&& node.getPermissions().isDelete()) {
permission.setUser(Permission.Action.all);
}
else {
permission.setUser(Permission.Action.read.or(Permission.Action.execute));
}
break;
case FILE:
if(node.isIsEncrypted() != null && node.isIsEncrypted()) {
try {
if(null != session.keyPair()) {
permission.setUser(Permission.Action.none.or(Permission.Action.read));
}
else {
log.warn(String.format("Missing read permission for node %s with missing key pair", node));
}
}
catch(BackgroundException e) {
log.warn(String.format("Ignore failure %s retrieving key pair", e));
}
}
else {
if(node.getPermissions().isRead()) {
permission.setUser(Permission.Action.read);
}
}
if(node.getPermissions().isChange() && node.getPermissions().isDelete()) {
permission.setUser(permission.getUser().or(Permission.Action.write));
}
break;
}
if(log.isDebugEnabled()) {
log.debug(String.format("Map node permissions %s to %s", node.getPermissions(), permission));
}
}
return permission;
} | @Test
public void testPermissionsFolder() throws Exception {
final SDSAttributesAdapter f = new SDSAttributesAdapter(session);
final Node node = new Node();
node.setIsEncrypted(false);
node.setType(Node.TypeEnum.FOLDER);
final NodePermissions permissions = new NodePermissions().read(false).delete(false).change(false).create(false);
node.setPermissions(permissions);
assertTrue(f.toPermission(node).isReadable());
assertFalse(f.toPermission(node).isWritable());
assertTrue(f.toPermission(node).isExecutable());
permissions.setRead(true);
assertTrue(f.toPermission(node).isReadable());
assertFalse(f.toPermission(node).isWritable());
assertTrue(f.toPermission(node).isExecutable());
permissions.setChange(true);
assertTrue(f.toPermission(node).isReadable());
assertFalse(f.toPermission(node).isWritable());
assertTrue(f.toPermission(node).isExecutable());
permissions.setDelete(true);
assertTrue(f.toPermission(node).isReadable());
assertFalse(f.toPermission(node).isWritable());
assertTrue(f.toPermission(node).isExecutable());
permissions.setCreate(true);
assertTrue(f.toPermission(node).isReadable());
assertTrue(f.toPermission(node).isWritable());
assertTrue(f.toPermission(node).isExecutable());
} |
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
} | @Test
public void shouldDefaultToEmptyRefinementForBareQueries() {
// Given:
final SingleStatementContext stmt =
givenQuery("SELECT * FROM TEST1;");
// When:
final Query result = (Query) builder.buildStatement(stmt);
// Then:
assertThat("Should be pull", result.isPullQuery(), is(true));
assertThat(result.getRefinement(), is(Optional.empty()));
} |
public boolean hasViewAccessToTemplate(PipelineTemplateConfig template, CaseInsensitiveString username, List<Role> roles, boolean isGroupAdministrator) {
boolean hasViewAccessToTemplate = template.getAuthorization().isViewUser(username, roles);
hasViewAccessToTemplate = hasViewAccessToTemplate || (template.isAllowGroupAdmins() && isGroupAdministrator);
return hasViewAccessToTemplate;
} | @Test
public void shouldReturnFalseIfUserCannotViewTemplate() {
CaseInsensitiveString templateViewUser = new CaseInsensitiveString("view");
String templateName = "template";
PipelineTemplateConfig template = PipelineTemplateConfigMother.createTemplate(templateName, StageConfigMother.manualStage("stage"));
TemplatesConfig templates = new TemplatesConfig(template);
assertThat(templates.hasViewAccessToTemplate(template, templateViewUser, null, false), is(false));
} |
@JsonIgnore
public Set<Long> getSkippedIterationsWithCheckpoint() {
Set<Long> skipList = new HashSet<>();
if (restartInfo != null) {
skipList.addAll(restartInfo);
}
if (details != null) {
details.flatten(WorkflowInstance.Status::isTerminal).entrySet().stream()
.flatMap(e -> e.getValue().stream())
.filter(i -> i >= checkpoint)
.forEach(skipList::add);
}
return skipList;
} | @Test
public void testGetSkippedIterationsWithCheckpoint() throws Exception {
ForeachStepOverview overview =
loadObject(
"fixtures/instances/sample-foreach-step-overview.json", ForeachStepOverview.class);
assertTrue(overview.getSkippedIterationsWithCheckpoint().isEmpty());
overview.setCheckpoint(80115);
assertEquals(Collections.singleton(80115L), overview.getSkippedIterationsWithCheckpoint());
overview.addOne(123L, WorkflowInstance.Status.FAILED, null);
overview.refreshDetail();
overview.updateForRestart(
123L, WorkflowInstance.Status.CREATED, WorkflowInstance.Status.FAILED, null);
assertEquals(
new HashSet<>(Arrays.asList(123L, 80115L)), overview.getSkippedIterationsWithCheckpoint());
} |
@VisibleForTesting
static String logicalToProtoSchema(final LogicalSchema schema) {
final ConnectSchema connectSchema = ConnectSchemas.columnsToConnectSchema(schema.columns());
final ProtobufSchema protobufSchema = new ProtobufData(
new ProtobufDataConfig(ImmutableMap.of())).fromConnectSchema(connectSchema);
return protobufSchema.canonicalString();
} | @Test
public void shouldConvertLogicalSchemaToProtobufSchema() {
// Given:
final String expectedProtoSchemaString = "syntax = \"proto3\";\n" +
"\n" +
"message ConnectDefault1 {\n" +
" int32 A = 1;\n" +
" double B = 2;\n" +
" repeated string C = 3;\n" +
"}\n";
// When:
final String protoSchema = JsonStreamedRowResponseWriter.logicalToProtoSchema(SCHEMA);
// Then:
assertThat(protoSchema, is(expectedProtoSchemaString));
} |
@POST
@Path("/{connector}/restart")
@Operation(summary = "Restart the specified connector")
public Response restartConnector(final @PathParam("connector") String connector,
final @Context HttpHeaders headers,
final @DefaultValue("false") @QueryParam("includeTasks") @Parameter(description = "Whether to also restart tasks") Boolean includeTasks,
final @DefaultValue("false") @QueryParam("onlyFailed") @Parameter(description = "Whether to only restart failed tasks/connectors")Boolean onlyFailed,
final @Parameter(hidden = true) @QueryParam("forward") Boolean forward) throws Throwable {
RestartRequest restartRequest = new RestartRequest(connector, onlyFailed, includeTasks);
String forwardingPath = "/connectors/" + connector + "/restart";
if (restartRequest.forceRestartConnectorOnly()) {
// For backward compatibility, just restart the connector instance and return OK with no body
FutureCallback<Void> cb = new FutureCallback<>();
herder.restartConnector(connector, cb);
requestHandler.completeOrForwardRequest(cb, forwardingPath, "POST", headers, null, forward);
return Response.noContent().build();
}
// In all other cases, submit the async restart request and return connector state
FutureCallback<ConnectorStateInfo> cb = new FutureCallback<>();
herder.restartConnectorAndTasks(restartRequest, cb);
Map<String, String> queryParameters = new HashMap<>();
queryParameters.put("includeTasks", includeTasks.toString());
queryParameters.put("onlyFailed", onlyFailed.toString());
ConnectorStateInfo stateInfo = requestHandler.completeOrForwardRequest(cb, forwardingPath, "POST", headers, queryParameters, null, new TypeReference<ConnectorStateInfo>() {
}, new IdentityTranslator<>(), forward);
return Response.accepted().entity(stateInfo).build();
} | @Test
public void testRestartConnectorNotFound() {
final ArgumentCaptor<Callback<Void>> cb = ArgumentCaptor.forClass(Callback.class);
expectAndCallbackException(cb, new NotFoundException("not found"))
.when(herder).restartConnector(eq(CONNECTOR_NAME), cb.capture());
assertThrows(NotFoundException.class, () ->
connectorsResource.restartConnector(CONNECTOR_NAME, NULL_HEADERS, false, false, FORWARD)
);
} |
@Override
public Serde<GenericKey> create(
final FormatInfo format,
final PersistenceSchema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> schemaRegistryClientFactory,
final String loggerNamePrefix,
final ProcessingLogContext processingLogContext,
final Optional<TrackedCallback> tracker
) {
return createInner(
format,
schema,
ksqlConfig,
schemaRegistryClientFactory,
loggerNamePrefix,
processingLogContext,
tracker
);
} | @Test
public void shouldNotThrowOnMultipleKeyColumns() {
// Given:
schema = PersistenceSchema.from(
ImmutableList.of(column(SqlTypes.STRING), column(SqlTypes.INTEGER)),
SerdeFeatures.of()
);
// When:
factory.create(format, schema, config, srClientFactory, LOGGER_PREFIX, processingLogCxt,
Optional.empty());
// Then (did not throw):
} |
@Operation(summary = "秒杀场景一(sychronized同步锁实现)")
@PostMapping("/sychronized")
public Result doWithSychronized(@RequestBody @Valid SeckillWebMockRequestDTO dto) {
processSeckill(dto, SYCHRONIZED);
return Result.ok();
//待mq监听器处理完成打印日志,不在此处打印日志
} | @Test
void testDoWithSychronized() {
SeckillWebMockRequestDTO requestDTO = new SeckillWebMockRequestDTO();
requestDTO.setSeckillId(1L);
requestDTO.setRequestCount(1);
requestDTO.setCorePoolSize(1);
requestDTO.setMaxPoolSize(10);
SeckillMockRequestDTO any = new SeckillMockRequestDTO();
any.setSeckillId(1L);
Result response = seckillMockController.doWithSychronized(requestDTO);
verify(seckillService, times(0)).execute(any(SeckillMockRequestDTO.class), anyInt());
assertEquals(0, response.getCode());
} |
@Override
public ColumnStatistic getColumnStatistic(Table table, String column) {
Preconditions.checkState(table != null);
// get Statistics Table column info, just return default column statistics
if (StatisticUtils.statisticTableBlackListCheck(table.getId())) {
return ColumnStatistic.unknown();
}
if (!StatisticUtils.checkStatisticTableStateNormal()) {
return ColumnStatistic.unknown();
}
try {
CompletableFuture<Optional<ColumnStatistic>> result =
cachedStatistics.get(new ColumnStatsCacheKey(table.getId(), column));
if (result.isDone()) {
Optional<ColumnStatistic> realResult;
realResult = result.get();
return realResult.orElseGet(ColumnStatistic::unknown);
} else {
return ColumnStatistic.unknown();
}
} catch (Exception e) {
LOG.warn("Failed to execute getColumnStatistic", e);
return ColumnStatistic.unknown();
}
} | @Test
public void testLoadCacheLoadEmpty(@Mocked CachedStatisticStorage cachedStatisticStorage) {
Database db = connectContext.getGlobalStateMgr().getDb("test");
Table table = db.getTable("t0");
new Expectations() {
{
cachedStatisticStorage.getColumnStatistic(table, "v1");
result = ColumnStatistic.unknown();
minTimes = 0;
}
};
ColumnStatistic columnStatistic =
Deencapsulation.invoke(cachedStatisticStorage, "getColumnStatistic", table, "v1");
Assert.assertEquals(Double.POSITIVE_INFINITY, columnStatistic.getMaxValue(), 0.001);
Assert.assertEquals(Double.NEGATIVE_INFINITY, columnStatistic.getMinValue(), 0.001);
Assert.assertEquals(0.0, columnStatistic.getNullsFraction(), 0.001);
Assert.assertEquals(1.0, columnStatistic.getAverageRowSize(), 0.001);
Assert.assertEquals(1.0, columnStatistic.getDistinctValuesCount(), 0.001);
} |
@Nullable
public RouterFunction<ServerResponse> create(ReverseProxy reverseProxy, String pluginName) {
return createReverseProxyRouterFunction(reverseProxy, nullSafePluginName(pluginName));
} | @Test
void shouldReturnNotFoundIfResourceNotFound() throws FileNotFoundException {
var routerFunction = factory.create(mockReverseProxy(), "fakeA");
assertNotNull(routerFunction);
var webClient = WebTestClient.bindToRouterFunction(routerFunction).build();
var pluginWrapper = Mockito.mock(PluginWrapper.class);
var pluginRoot = ResourceUtils.getURL("classpath:plugin/plugin-for-reverseproxy/");
var classLoader = new URLClassLoader(new URL[] {pluginRoot});
when(pluginWrapper.getPluginClassLoader()).thenReturn(classLoader);
when(pluginManager.getPlugin("fakeA")).thenReturn(pluginWrapper);
webClient.get().uri("/plugins/fakeA/assets/static/non-existing-file.txt")
.exchange()
.expectHeader().cacheControl(CacheControl.empty())
.expectStatus().isNotFound();
} |
public static <T> RetryTransformer<T> of(Retry retry) {
return new RetryTransformer<>(retry);
} | @Test
public void shouldNotRetryWhenItThrowErrorSingle() {
RetryConfig config = retryConfig();
Retry retry = Retry.of("testName", config);
given(helloWorldService.returnHelloWorld())
.willThrow(new Error("BAM!"));
Single.fromCallable(helloWorldService::returnHelloWorld)
.compose(RetryTransformer.of(retry))
.test()
.assertError(Error.class)
.assertNotComplete();
then(helloWorldService).should().returnHelloWorld();
Retry.Metrics metrics = retry.getMetrics();
assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero();
assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero();
} |
public WatsonxAiRequest request(Prompt prompt) {
WatsonxAiChatOptions options = WatsonxAiChatOptions.builder().build();
if (this.defaultOptions != null) {
options = ModelOptionsUtils.merge(options, this.defaultOptions, WatsonxAiChatOptions.class);
}
if (prompt.getOptions() != null) {
if (prompt.getOptions() instanceof WatsonxAiChatOptions runtimeOptions) {
options = ModelOptionsUtils.merge(runtimeOptions, options, WatsonxAiChatOptions.class);
}
else {
var updatedRuntimeOptions = ModelOptionsUtils.copyToTarget(prompt.getOptions(), ChatOptions.class,
WatsonxAiChatOptions.class);
options = ModelOptionsUtils.merge(updatedRuntimeOptions, options, WatsonxAiChatOptions.class);
}
}
Map<String, Object> parameters = options.toMap();
final String convertedPrompt = MessageToPromptConverter.create()
.withAssistantPrompt("")
.withHumanPrompt("")
.toPrompt(prompt.getInstructions());
return WatsonxAiRequest.builder(convertedPrompt).withParameters(parameters).build();
} | @Test
public void testCreateRequestSuccessfullyWithDefaultParams() {
String msg = "Test message";
WatsonxAiChatOptions modelOptions = WatsonxAiChatOptions.builder()
.withModel("meta-llama/llama-2-70b-chat")
.build();
Prompt prompt = new Prompt(msg, modelOptions);
WatsonxAiRequest request = chatModel.request(prompt);
Assert.assertEquals(request.getModelId(), "meta-llama/llama-2-70b-chat");
assertThat(request.getParameters().get("decoding_method")).isEqualTo("greedy");
assertThat(request.getParameters().get("temperature")).isEqualTo(0.7);
assertThat(request.getParameters().get("top_p")).isEqualTo(1.0);
assertThat(request.getParameters().get("top_k")).isEqualTo(50);
assertThat(request.getParameters().get("max_new_tokens")).isEqualTo(20);
assertThat(request.getParameters().get("min_new_tokens")).isEqualTo(0);
assertThat(request.getParameters().get("stop_sequences")).isInstanceOf(List.class);
Assert.assertEquals(request.getParameters().get("stop_sequences"), List.of());
assertThat(request.getParameters().get("random_seed")).isNull();
} |
@Override
public void handleGlobalFailure(
Throwable cause, CompletableFuture<Map<String, String>> failureLabels) {
context.goToFinished(context.getArchivedExecutionGraph(JobStatus.FAILED, cause));
} | @Test
void testTransitionToFinishedOnGlobalFailure() {
TestingStateWithoutExecutionGraph state = new TestingStateWithoutExecutionGraph(ctx, LOG);
RuntimeException expectedException = new RuntimeException("This is a test exception");
ctx.setExpectFinished(
archivedExecutionGraph -> {
assertThat(archivedExecutionGraph.getState()).isEqualTo(JobStatus.FAILED);
assertThat(archivedExecutionGraph.getFailureInfo()).isNotNull();
assertThat(
archivedExecutionGraph
.getFailureInfo()
.getException()
.deserializeError(this.getClass().getClassLoader()))
.isEqualTo(expectedException);
});
state.handleGlobalFailure(expectedException, FailureEnricherUtils.EMPTY_FAILURE_LABELS);
} |
@Override
public <T> T convert(DataTable dataTable, Type type) {
return convert(dataTable, type, false);
} | @Test
void convert_to_map_of_primitive_to_map_of_primitive_to_object() {
DataTable table = parse("",
" | | 1 | 2 | 3 |",
" | A | ♘ | | ♝ |",
" | B | | | |",
" | C | | ♝ | |");
registry.defineDataTableType(new DataTableType(Piece.class, PIECE_TABLE_CELL_TRANSFORMER));
Map<String, Map<Integer, Piece>> expected = new HashMap<String, Map<Integer, Piece>>() {
{
put("A", new HashMap<Integer, Piece>() {
{
put(1, Piece.WHITE_KNIGHT);
put(2, null);
put(3, Piece.BLACK_BISHOP);
}
});
put("B", new HashMap<Integer, Piece>() {
{
put(1, null);
put(2, null);
put(3, null);
}
});
put("C", new HashMap<Integer, Piece>() {
{
put(1, null);
put(2, Piece.BLACK_BISHOP);
put(3, null);
}
});
}
};
assertEquals(expected, converter.convert(table, MAP_OF_STRING_TO_MAP_OF_INTEGER_TO_PIECE));
} |
@Override
public synchronized void start() {
LOG.info("Starting {}", this.getClass().getSimpleName());
Preconditions.checkState(mJvmPauseMonitor == null, "JVM pause monitor must not already exist");
mJvmPauseMonitor = new JvmPauseMonitor(
Configuration.getMs(PropertyKey.JVM_MONITOR_SLEEP_INTERVAL_MS),
Configuration.getMs(PropertyKey.JVM_MONITOR_WARN_THRESHOLD_MS),
Configuration.getMs(PropertyKey.JVM_MONITOR_INFO_THRESHOLD_MS));
mJvmPauseMonitor.start();
MetricsSystem.registerGaugeIfAbsent(
MetricsSystem.getMetricName(MetricKey.TOTAL_EXTRA_TIME.getName()),
mJvmPauseMonitor::getTotalExtraTime);
MetricsSystem.registerGaugeIfAbsent(
MetricsSystem.getMetricName(MetricKey.INFO_TIME_EXCEEDED.getName()),
mJvmPauseMonitor::getInfoTimeExceeded);
MetricsSystem.registerGaugeIfAbsent(
MetricsSystem.getMetricName(MetricKey.WARN_TIME_EXCEEDED.getName()),
mJvmPauseMonitor::getWarnTimeExceeded);
} | @Test
public void doubleStart() {
Configuration.set(PropertyKey.MASTER_JVM_MONITOR_ENABLED, true);
SimpleService service = JvmMonitorService.Factory.create();
Assert.assertTrue(service instanceof JvmMonitorService);
service.start();
Assert.assertThrows("JVM pause monitor must not already exist",
IllegalStateException.class, service::start);
} |
@Override
public String getValue(EvaluationContext context) {
// Use variable name if we just provide this.
if (variableName != null && variable == null) {
variable = context.lookupVariable(variableName);
return (variable != null ? variable.toString() : "");
}
String propertyName = pathExpression;
String propertyPath = null;
int delimiterIndex = -1;
// Search for a delimiter to isolate property name.
for (String delimiter : PROPERTY_NAME_DELIMITERS) {
delimiterIndex = pathExpression.indexOf(delimiter);
if (delimiterIndex != -1) {
propertyName = pathExpression.substring(0, delimiterIndex);
propertyPath = pathExpression.substring(delimiterIndex);
break;
}
}
Object variableValue = getProperty(variable, propertyName);
if (log.isDebugEnabled()) {
log.debug("propertyName: {}", propertyName);
log.debug("propertyPath: {}", propertyPath);
log.debug("variableValue: {}", variableValue);
}
if (propertyPath != null) {
if (variableValue.getClass().equals(String.class)) {
if (propertyPath.startsWith("/")) {
// This is a JSON Pointer or XPath expression to apply.
String variableString = String.valueOf(variableValue);
if (variableString.trim().startsWith("{") || variableString.trim().startsWith("[")) {
variableValue = getJsonPointerValue(variableString, propertyPath);
} else if (variableString.trim().startsWith("<")) {
variableValue = getXPathValue(variableString, propertyPath);
} else {
log.warn("Got a path query expression but content seems not to be JSON nor XML...");
variableValue = null;
}
}
} else if (variableValue.getClass().isArray()) {
if (propertyPath.matches(ARRAY_INDEX_REGEXP)) {
Matcher m = ARRAY_INDEX_PATTERN.matcher(propertyPath);
if (m.matches()) {
String arrayIndex = m.group(1);
Object[] variableValues = (Object[]) variableValue;
try {
variableValue = variableValues[Integer.parseInt(arrayIndex)];
} catch (ArrayIndexOutOfBoundsException ae) {
log.warn("Expression asked for " + arrayIndex + " but array is smaller (" + variableValues.length
+ "). Returning null.");
variableValue = null;
}
}
}
} else if (Map.class.isAssignableFrom(variableValue.getClass())) {
if (propertyPath.matches(MAP_INDEX_REGEXP)) {
Matcher m = MAP_INDEX_PATTERN.matcher(propertyPath);
if (m.matches()) {
String mapKey = m.group(1);
Map variableValues = (Map) variableValue;
variableValue = variableValues.get(mapKey);
}
}
}
}
return String.valueOf(variableValue);
} | @Test
void testXPathWithNamespaceValue() {
String xmlString = "<ns:library xmlns:ns=\"https://microcks.io\">\n"
+ " <ns:name>My Personal Library</ns:name>\n" + " <ns:books>\n"
+ " <ns:book><ns:title>Title 1</ns:title><ns:author>Jane Doe</ns:author></ns:book>\n"
+ " <ns:book><ns:title>Title 2</ns:title><ns:author>John Doe</ns:author></ns:book>\n" + " </ns:books>\n"
+ "</ns:library>";
EvaluableRequest request = new EvaluableRequest(xmlString, null);
// Create new expression evaluating XML XPath.
VariableReferenceExpression exp = new VariableReferenceExpression(request, "body//*[local-name() = 'name']");
String result = exp.getValue(new EvaluationContext());
assertEquals("My Personal Library", result);
} |
@GET
@Path("status")
@Produces({MediaType.APPLICATION_JSON})
public Map<String, String> status() {
return STATUS_OK;
} | @Test
public void testStatus() {
assertEquals(server.status().get("status"), "ok");
} |
@Override
public void exportData(JsonWriter writer) throws IOException {
// version tag at the root
writer.name(THIS_VERSION);
writer.beginObject();
// clients list
writer.name(CLIENTS);
writer.beginArray();
writeClients(writer);
writer.endArray();
writer.name(GRANTS);
writer.beginArray();
writeGrants(writer);
writer.endArray();
writer.name(WHITELISTEDSITES);
writer.beginArray();
writeWhitelistedSites(writer);
writer.endArray();
writer.name(BLACKLISTEDSITES);
writer.beginArray();
writeBlacklistedSites(writer);
writer.endArray();
writer.name(AUTHENTICATIONHOLDERS);
writer.beginArray();
writeAuthenticationHolders(writer);
writer.endArray();
writer.name(ACCESSTOKENS);
writer.beginArray();
writeAccessTokens(writer);
writer.endArray();
writer.name(REFRESHTOKENS);
writer.beginArray();
writeRefreshTokens(writer);
writer.endArray();
writer.name(SYSTEMSCOPES);
writer.beginArray();
writeSystemScopes(writer);
writer.endArray();
for (MITREidDataServiceExtension extension : extensions) {
if (extension.supportsVersion(THIS_VERSION)) {
extension.exportExtensionData(writer);
break;
}
}
writer.endObject(); // end mitreid-connect-1.3
} | @Test
public void testExportAccessTokens() throws IOException, ParseException {
String expiration1 = "2014-09-10T22:49:44.090+00:00";
Date expirationDate1 = formatter.parse(expiration1, Locale.ENGLISH);
ClientDetailsEntity mockedClient1 = mock(ClientDetailsEntity.class);
when(mockedClient1.getClientId()).thenReturn("mocked_client_1");
AuthenticationHolderEntity mockedAuthHolder1 = mock(AuthenticationHolderEntity.class);
when(mockedAuthHolder1.getId()).thenReturn(1L);
OAuth2AccessTokenEntity token1 = new OAuth2AccessTokenEntity();
token1.setId(1L);
token1.setClient(mockedClient1);
token1.setExpiration(expirationDate1);
token1.setJwt(JWTParser.parse("eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3ODk5NjgsInN1YiI6IjkwMzQyLkFTREZKV0ZBIiwiYXRfaGFzaCI6InptTmt1QmNRSmNYQktNaVpFODZqY0EiLCJhdWQiOlsiY2xpZW50Il0sImlzcyI6Imh0dHA6XC9cL2xvY2FsaG9zdDo4MDgwXC9vcGVuaWQtY29ubmVjdC1zZXJ2ZXItd2ViYXBwXC8iLCJpYXQiOjE0MTI3ODkzNjh9.xkEJ9IMXpH7qybWXomfq9WOOlpGYnrvGPgey9UQ4GLzbQx7JC0XgJK83PmrmBZosvFPCmota7FzI_BtwoZLgAZfFiH6w3WIlxuogoH-TxmYbxEpTHoTsszZppkq9mNgOlArV4jrR9y3TPo4MovsH71dDhS_ck-CvAlJunHlqhs0"));
token1.setAuthenticationHolder(mockedAuthHolder1);
token1.setScope(ImmutableSet.of("id-token"));
token1.setTokenType("Bearer");
String expiration2 = "2015-01-07T18:31:50.079+00:00";
Date expirationDate2 = formatter.parse(expiration2, Locale.ENGLISH);
ClientDetailsEntity mockedClient2 = mock(ClientDetailsEntity.class);
when(mockedClient2.getClientId()).thenReturn("mocked_client_2");
AuthenticationHolderEntity mockedAuthHolder2 = mock(AuthenticationHolderEntity.class);
when(mockedAuthHolder2.getId()).thenReturn(2L);
OAuth2RefreshTokenEntity mockRefreshToken2 = mock(OAuth2RefreshTokenEntity.class);
when(mockRefreshToken2.getId()).thenReturn(1L);
OAuth2AccessTokenEntity token2 = new OAuth2AccessTokenEntity();
token2.setId(2L);
token2.setClient(mockedClient2);
token2.setExpiration(expirationDate2);
token2.setJwt(JWTParser.parse("eyJhbGciOiJSUzI1NiJ9.eyJleHAiOjE0MTI3OTI5NjgsImF1ZCI6WyJjbGllbnQiXSwiaXNzIjoiaHR0cDpcL1wvbG9jYWxob3N0OjgwODBcL29wZW5pZC1jb25uZWN0LXNlcnZlci13ZWJhcHBcLyIsImp0aSI6IjBmZGE5ZmRiLTYyYzItNGIzZS05OTdiLWU0M2VhMDUwMzNiOSIsImlhdCI6MTQxMjc4OTM2OH0.xgaVpRLYE5MzbgXfE0tZt823tjAm6Oh3_kdR1P2I9jRLR6gnTlBQFlYi3Y_0pWNnZSerbAE8Tn6SJHZ9k-curVG0-ByKichV7CNvgsE5X_2wpEaUzejvKf8eZ-BammRY-ie6yxSkAarcUGMvGGOLbkFcz5CtrBpZhfd75J49BIQ"));
token2.setAuthenticationHolder(mockedAuthHolder2);
token2.setRefreshToken(mockRefreshToken2);
token2.setScope(ImmutableSet.of("openid", "offline_access", "email", "profile"));
token2.setTokenType("Bearer");
Set<OAuth2AccessTokenEntity> allAccessTokens = ImmutableSet.of(token1, token2);
Mockito.when(clientRepository.getAllClients()).thenReturn(new HashSet<ClientDetailsEntity>());
Mockito.when(approvedSiteRepository.getAll()).thenReturn(new HashSet<ApprovedSite>());
Mockito.when(wlSiteRepository.getAll()).thenReturn(new HashSet<WhitelistedSite>());
Mockito.when(blSiteRepository.getAll()).thenReturn(new HashSet<BlacklistedSite>());
Mockito.when(authHolderRepository.getAll()).thenReturn(new ArrayList<AuthenticationHolderEntity>());
Mockito.when(tokenRepository.getAllRefreshTokens()).thenReturn(new HashSet<OAuth2RefreshTokenEntity>());
Mockito.when(tokenRepository.getAllAccessTokens()).thenReturn(allAccessTokens);
Mockito.when(sysScopeRepository.getAll()).thenReturn(new HashSet<SystemScope>());
// do the data export
StringWriter stringWriter = new StringWriter();
JsonWriter writer = new JsonWriter(stringWriter);
writer.beginObject();
dataService.exportData(writer);
writer.endObject();
writer.close();
// parse the output as a JSON object for testing
JsonElement elem = new JsonParser().parse(stringWriter.toString());
JsonObject root = elem.getAsJsonObject();
// make sure the root is there
assertThat(root.has(MITREidDataService.MITREID_CONNECT_1_3), is(true));
JsonObject config = root.get(MITREidDataService.MITREID_CONNECT_1_3).getAsJsonObject();
// make sure all the root elements are there
assertThat(config.has(MITREidDataService.CLIENTS), is(true));
assertThat(config.has(MITREidDataService.GRANTS), is(true));
assertThat(config.has(MITREidDataService.WHITELISTEDSITES), is(true));
assertThat(config.has(MITREidDataService.BLACKLISTEDSITES), is(true));
assertThat(config.has(MITREidDataService.REFRESHTOKENS), is(true));
assertThat(config.has(MITREidDataService.ACCESSTOKENS), is(true));
assertThat(config.has(MITREidDataService.SYSTEMSCOPES), is(true));
assertThat(config.has(MITREidDataService.AUTHENTICATIONHOLDERS), is(true));
// make sure the root elements are all arrays
assertThat(config.get(MITREidDataService.CLIENTS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.GRANTS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.WHITELISTEDSITES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.BLACKLISTEDSITES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.REFRESHTOKENS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.ACCESSTOKENS).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.SYSTEMSCOPES).isJsonArray(), is(true));
assertThat(config.get(MITREidDataService.AUTHENTICATIONHOLDERS).isJsonArray(), is(true));
// check our access token list (this test)
JsonArray accessTokens = config.get(MITREidDataService.ACCESSTOKENS).getAsJsonArray();
assertThat(accessTokens.size(), is(2));
// check for both of our access tokens in turn
Set<OAuth2AccessTokenEntity> checked = new HashSet<>();
for (JsonElement e : accessTokens) {
assertTrue(e.isJsonObject());
JsonObject token = e.getAsJsonObject();
OAuth2AccessTokenEntity compare = null;
if (token.get("id").getAsLong() == token1.getId().longValue()) {
compare = token1;
} else if (token.get("id").getAsLong() == token2.getId().longValue()) {
compare = token2;
}
if (compare == null) {
fail("Could not find matching id: " + token.get("id").getAsString());
} else {
assertThat(token.get("id").getAsLong(), equalTo(compare.getId()));
assertThat(token.get("clientId").getAsString(), equalTo(compare.getClient().getClientId()));
assertThat(token.get("expiration").getAsString(), equalTo(formatter.print(compare.getExpiration(), Locale.ENGLISH)));
assertThat(token.get("value").getAsString(), equalTo(compare.getValue()));
assertThat(token.get("type").getAsString(), equalTo(compare.getTokenType()));
assertThat(token.get("authenticationHolderId").getAsLong(), equalTo(compare.getAuthenticationHolder().getId()));
assertTrue(token.get("scope").isJsonArray());
assertThat(jsonArrayToStringSet(token.getAsJsonArray("scope")), equalTo(compare.getScope()));
if(token.get("refreshTokenId").isJsonNull()) {
assertNull(compare.getRefreshToken());
} else {
assertThat(token.get("refreshTokenId").getAsLong(), equalTo(compare.getRefreshToken().getId()));
}
checked.add(compare);
}
}
// make sure all of our access tokens were found
assertThat(checked.containsAll(allAccessTokens), is(true));
} |
@Override
public TreeModel<Regressor> train(Dataset<Regressor> examples, Map<String, Provenance> runProvenance) {
return train(examples, runProvenance, INCREMENT_INVOCATION_COUNT);
} | @Test
public void testThreeDenseData() {
Pair<Dataset<Regressor>,Dataset<Regressor>> p = RegressionDataGenerator.threeDimDenseTrainTest(1.0, false);
TreeModel<Regressor> llModel = t.train(p.getA());
RegressionEvaluation llEval = e.evaluate(llModel,p.getB());
double expectedDim1 = -0.6618655170782572;
double expectedDim2 = -0.6618655170782572;
double expectedDim3 = -0.7617851143770209;
double expectedAve = -0.6951720495111785;
assertEquals(expectedDim1,llEval.r2(new Regressor(RegressionDataGenerator.firstDimensionName,Double.NaN)),1e-6);
assertEquals(expectedDim2,llEval.r2(new Regressor(RegressionDataGenerator.secondDimensionName,Double.NaN)),1e-6);
assertEquals(expectedDim3,llEval.r2(new Regressor(RegressionDataGenerator.thirdDimensionName,Double.NaN)),1e-6);
assertEquals(expectedAve,llEval.averageR2(),1e-6);
Helpers.testModelProtoSerialization(llModel, Regressor.class, p.getB());
p = RegressionDataGenerator.threeDimDenseTrainTest(1.0, true);
llModel = t.train(p.getA());
llEval = e.evaluate(llModel,p.getB());
assertEquals(expectedDim1,llEval.r2(new Regressor(RegressionDataGenerator.firstDimensionName,Double.NaN)),1e-6);
assertEquals(expectedDim2,llEval.r2(new Regressor(RegressionDataGenerator.secondDimensionName,Double.NaN)),1e-6);
assertEquals(expectedDim3,llEval.r2(new Regressor(RegressionDataGenerator.thirdDimensionName,Double.NaN)),1e-6);
assertEquals(expectedAve,llEval.averageR2(),1e-6);
} |
public DebtRatingGrid getDebtRatingGrid() {
return ratingGrid;
} | @Test
public void load_rating_grid() {
settings.setProperty(CoreProperties.RATING_GRID, "1,3.4,8,50");
RatingSettings configurationLoader = new RatingSettings(settings.asConfig());
double[] grid = configurationLoader.getDebtRatingGrid().getGridValues();
assertThat(grid).hasSize(4);
assertThat(grid[0]).isEqualTo(1.0);
assertThat(grid[1]).isEqualTo(3.4);
assertThat(grid[2]).isEqualTo(8.0);
assertThat(grid[3]).isEqualTo(50.0);
} |
@Override
public <VO, VR> KStream<K, VR> join(final KStream<K, VO> otherStream,
final ValueJoiner<? super V, ? super VO, ? extends VR> joiner,
final JoinWindows windows) {
return join(otherStream, toValueJoinerWithKey(joiner), windows);
} | @Test
public void shouldNotAllowNullJoinedOnTableJoin() {
final NullPointerException exception = assertThrows(
NullPointerException.class,
() -> testStream.join(testTable, MockValueJoiner.TOSTRING_JOINER, null));
assertThat(exception.getMessage(), equalTo("joined can't be null"));
} |
public static String toString(RedisCommand<?> command, Object... params) {
if (RedisCommands.AUTH.equals(command)) {
return "command: " + command + ", params: (password masked)";
}
return "command: " + command + ", params: " + LogHelper.toString(params);
} | @Test
public void toStringWithNull() {
assertThat(LogHelper.toString(null)).isEqualTo("null");
} |
@Override
public Token login(LoginRequest loginRequest) {
final UserEntity userEntityFromDB = userRepository
.findUserEntityByEmail(loginRequest.getEmail())
.orElseThrow(
() -> new UserNotFoundException("Can't find with given email: "
+ loginRequest.getEmail())
);
if (Boolean.FALSE.equals(passwordEncoder.matches(
loginRequest.getPassword(), userEntityFromDB.getPassword()))) {
throw new PasswordNotValidException();
}
return tokenService.generateToken(userEntityFromDB.getClaims());
} | @Test
void login_ValidCredentials_ReturnsToken() {
// Given
LoginRequest loginRequest = LoginRequest.builder()
.email("[email protected]")
.password("password123")
.build();
UserEntity userEntity = new UserEntityBuilder().withValidUserFields().build();
Token expectedToken = Token.builder()
.accessToken("mockAccessToken")
.accessTokenExpiresAt(123456789L)
.refreshToken("mockRefreshToken")
.build();
// When
when(userRepository.findUserEntityByEmail(loginRequest.getEmail()))
.thenReturn(Optional.of(userEntity));
when(passwordEncoder.matches(loginRequest.getPassword(), userEntity.getPassword()))
.thenReturn(true);
when(tokenService.generateToken(userEntity.getClaims())).thenReturn(expectedToken);
Token actualToken = userLoginService.login(loginRequest);
// Then
assertEquals(expectedToken.getAccessToken(), actualToken.getAccessToken());
assertEquals(expectedToken.getRefreshToken(), actualToken.getRefreshToken());
assertEquals(expectedToken.getAccessTokenExpiresAt(), actualToken.getAccessTokenExpiresAt());
// Verify
verify(userRepository).findUserEntityByEmail(loginRequest.getEmail());
verify(passwordEncoder).matches(loginRequest.getPassword(), userEntity.getPassword());
verify(tokenService).generateToken(userEntity.getClaims());
} |
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception {
return newGetter(object, parent, modifier, method.getReturnType(), method::invoke,
(t, et) -> new MethodGetter(parent, method, modifier, t, et));
} | @Test
public void newMethodGetter_whenExtractingFromNonEmpty_Array_nullFirst_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType()
throws Exception {
OuterObject object = new OuterObject("name", null, new InnerObject("inner", 0, 1, 2, 3));
Getter parentGetter = GetterFactory.newMethodGetter(object, null, innersArrayMethod, "[any]");
Getter innerObjectNameGetter = GetterFactory.newMethodGetter(object, parentGetter, innerAttributesArrayMethod, "[any]");
Class<?> returnType = innerObjectNameGetter.getReturnType();
assertEquals(Integer.class, returnType);
} |
public static CompositeData parseComposite(URI uri) throws URISyntaxException {
CompositeData rc = new CompositeData();
rc.scheme = uri.getScheme();
String ssp = stripPrefix(uri.getRawSchemeSpecificPart().trim(), "//").trim();
parseComposite(uri, rc, ssp);
rc.fragment = uri.getFragment();
return rc;
} | @Test
public void testEmptyCompositeWithParenthesisInParam() throws Exception {
URI uri = new URI("failover://()?updateURIsURL=file:/C:/Dir(1)/a.csv");
CompositeData data = URISupport.parseComposite(uri);
assertEquals(0, data.getComponents().length);
assertEquals(1, data.getParameters().size());
assertTrue(data.getParameters().containsKey("updateURIsURL"));
assertEquals("file:/C:/Dir(1)/a.csv", data.getParameters().get("updateURIsURL"));
} |
public String getPackageName() {
return this.context != null
? this.context.getPackageName()
: "";
} | @Test
public void getPackageName() {
assertThat(contextUtil.getPackageName(), is("com.github.tony19.logback.android.test"));
} |
@Override
public Path touch(final Path file, final TransferStatus status) throws BackgroundException {
try {
final IRODSFileSystemAO fs = session.getClient();
final int descriptor = fs.createFile(file.getAbsolute(),
DataObjInp.OpenFlags.WRITE_TRUNCATE, DataObjInp.DEFAULT_CREATE_MODE);
fs.fileClose(descriptor, false);
return file;
}
catch(JargonException e) {
throw new IRODSExceptionMappingService().map("Cannot create {0}", e, file);
}
} | @Test
public void testTouch() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file));
new IRODSTouchFeature(session).touch(test, new TransferStatus());
assertTrue(new IRODSFindFeature(session).find(test));
new IRODSDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(new IRODSFindFeature(session).find(test));
session.close();
} |
public void update(String attemptId, boolean isRetry, String topic, String group, int queueId, long popTime, long invisibleTime,
List<Long> msgQueueOffsetList, StringBuilder orderInfoBuilder) {
String key = buildKey(topic, group);
ConcurrentHashMap<Integer/*queueId*/, OrderInfo> qs = table.get(key);
if (qs == null) {
qs = new ConcurrentHashMap<>(16);
ConcurrentHashMap<Integer/*queueId*/, OrderInfo> old = table.putIfAbsent(key, qs);
if (old != null) {
qs = old;
}
}
OrderInfo orderInfo = qs.get(queueId);
if (orderInfo != null) {
OrderInfo newOrderInfo = new OrderInfo(attemptId, popTime, invisibleTime, msgQueueOffsetList, System.currentTimeMillis(), 0);
newOrderInfo.mergeOffsetConsumedCount(orderInfo.attemptId, orderInfo.offsetList, orderInfo.offsetConsumedCount);
orderInfo = newOrderInfo;
} else {
orderInfo = new OrderInfo(attemptId, popTime, invisibleTime, msgQueueOffsetList, System.currentTimeMillis(), 0);
}
qs.put(queueId, orderInfo);
Map<Long, Integer> offsetConsumedCount = orderInfo.offsetConsumedCount;
int minConsumedTimes = Integer.MAX_VALUE;
if (offsetConsumedCount != null) {
Set<Long> offsetSet = offsetConsumedCount.keySet();
for (Long offset : offsetSet) {
Integer consumedTimes = offsetConsumedCount.getOrDefault(offset, 0);
ExtraInfoUtil.buildQueueOffsetOrderCountInfo(orderInfoBuilder, topic, queueId, offset, consumedTimes);
minConsumedTimes = Math.min(minConsumedTimes, consumedTimes);
}
if (offsetConsumedCount.size() != orderInfo.offsetList.size()) {
// offsetConsumedCount only save messages which consumed count is greater than 0
// if size not equal, means there are some new messages
minConsumedTimes = 0;
}
} else {
minConsumedTimes = 0;
}
// for compatibility
// the old pop sdk use queueId to get consumedTimes from orderCountInfo
ExtraInfoUtil.buildQueueIdOrderCountInfo(orderInfoBuilder, topic, queueId, minConsumedTimes);
updateLockFreeTimestamp(topic, group, queueId, orderInfo);
} | @Test
public void testConsumedCountForMultiQueue() {
{
// consume two new messages
StringBuilder orderInfoBuilder = new StringBuilder();
consumerOrderInfoManager.update(
null,
false,
TOPIC,
GROUP,
QUEUE_ID_0,
popTime,
3000,
Lists.newArrayList(0L),
orderInfoBuilder
);
consumerOrderInfoManager.update(
null,
false,
TOPIC,
GROUP,
QUEUE_ID_1,
popTime,
3000,
Lists.newArrayList(0L),
orderInfoBuilder
);
assertEncodeAndDecode();
Map<String, Integer> orderInfoMap = ExtraInfoUtil.parseOrderCountInfo(orderInfoBuilder.toString());
assertEquals(2, orderInfoMap.size());
assertEquals(0, orderInfoMap.get(ExtraInfoUtil.getStartOffsetInfoMapKey(TOPIC, QUEUE_ID_0)).intValue());
assertEquals(0, orderInfoMap.get(ExtraInfoUtil.getStartOffsetInfoMapKey(TOPIC, QUEUE_ID_1)).intValue());
}
{
// reconsume two message
StringBuilder orderInfoBuilder = new StringBuilder();
consumerOrderInfoManager.update(
null,
false,
TOPIC,
GROUP,
QUEUE_ID_0,
popTime,
3000,
Lists.newArrayList(0L),
orderInfoBuilder
);
consumerOrderInfoManager.update(
null,
false,
TOPIC,
GROUP,
QUEUE_ID_1,
popTime,
3000,
Lists.newArrayList(0L),
orderInfoBuilder
);
assertEncodeAndDecode();
Map<String, Integer> orderInfoMap = ExtraInfoUtil.parseOrderCountInfo(orderInfoBuilder.toString());
assertEquals(4, orderInfoMap.size());
assertEquals(1, orderInfoMap.get(ExtraInfoUtil.getStartOffsetInfoMapKey(TOPIC, QUEUE_ID_0)).intValue());
assertEquals(1, orderInfoMap.get(ExtraInfoUtil.getStartOffsetInfoMapKey(TOPIC, QUEUE_ID_1)).intValue());
assertEquals(1, orderInfoMap.get(ExtraInfoUtil.getQueueOffsetMapKey(TOPIC, QUEUE_ID_0, 0L)).intValue());
assertEquals(1, orderInfoMap.get(ExtraInfoUtil.getQueueOffsetMapKey(TOPIC, QUEUE_ID_1, 0L)).intValue());
}
{
// reconsume with a new message
StringBuilder orderInfoBuilder = new StringBuilder();
consumerOrderInfoManager.update(
null,
false,
TOPIC,
GROUP,
QUEUE_ID_0,
popTime,
3000,
Lists.newArrayList(0L, 1L),
orderInfoBuilder
);
consumerOrderInfoManager.update(
null,
false,
TOPIC,
GROUP,
QUEUE_ID_1,
popTime,
3000,
Lists.newArrayList(0L),
orderInfoBuilder
);
assertEncodeAndDecode();
Map<String, Integer> orderInfoMap = ExtraInfoUtil.parseOrderCountInfo(orderInfoBuilder.toString());
assertEquals(4, orderInfoMap.size());
assertEquals(0, orderInfoMap.get(ExtraInfoUtil.getStartOffsetInfoMapKey(TOPIC, QUEUE_ID_0)).intValue());
assertEquals(2, orderInfoMap.get(ExtraInfoUtil.getStartOffsetInfoMapKey(TOPIC, QUEUE_ID_1)).intValue());
assertEquals(2, orderInfoMap.get(ExtraInfoUtil.getQueueOffsetMapKey(TOPIC, QUEUE_ID_0, 0L)).intValue());
assertNull(orderInfoMap.get(ExtraInfoUtil.getQueueOffsetMapKey(TOPIC, QUEUE_ID_0, 1L)));
assertEquals(2, orderInfoMap.get(ExtraInfoUtil.getQueueOffsetMapKey(TOPIC, QUEUE_ID_1, 0L)).intValue());
}
} |
public String build() {
if (columnDefs.isEmpty()) {
throw new IllegalStateException("No column has been defined");
}
StringBuilder sql = new StringBuilder().append("ALTER TABLE ").append(tableName).append(" ");
switch (dialect.getId()) {
case PostgreSql.ID:
addColumns(sql, "ADD COLUMN ");
break;
case MsSql.ID:
sql.append("ADD ");
addColumns(sql, "");
break;
default:
sql.append("ADD (");
addColumns(sql, "");
sql.append(")");
}
return sql.toString();
} | @Test
public void add_columns_on_postgresql() {
assertThat(createSampleBuilder(new PostgreSql()).build())
.isEqualTo("ALTER TABLE issues ADD COLUMN date_in_ms BIGINT NULL, ADD COLUMN name VARCHAR (10) NOT NULL, ADD COLUMN col_with_default BOOLEAN DEFAULT false NOT NULL, ADD COLUMN varchar_col_with_default VARCHAR (3) DEFAULT 'foo' NOT NULL");
} |
public static String formatBetween(Date beginDate, Date endDate, BetweenFormatter.Level level) {
return formatBetween(between(beginDate, endDate, DateUnit.MS), level);
} | @Test
public void formatBetweenTest() {
final String dateStr1 = "2017-03-01 22:34:23";
final Date date1 = DateUtil.parse(dateStr1);
final String dateStr2 = "2017-04-01 23:56:14";
final Date date2 = DateUtil.parse(dateStr2);
final long between = DateUtil.between(date1, date2, DateUnit.MS);
final String formatBetween = DateUtil.formatBetween(between, Level.MINUTE);
assertEquals("31天1小时21分", formatBetween);
} |
@Override
public Object cloneValueData( Object object ) throws KettleValueException {
Timestamp timestamp = getTimestamp( object );
if ( timestamp == null ) {
return null;
}
Timestamp clone = new Timestamp( timestamp.getTime() );
clone.setNanos( timestamp.getNanos() );
return clone;
} | @Test
public void testCloneValueData() throws KettleValueException {
ValueMetaTimestamp valueMetaTimestamp = new ValueMetaTimestamp();
Object clonedTimestamp = valueMetaTimestamp.cloneValueData( TIMESTAMP_WITH_NANOSECONDS );
assertEquals( TIMESTAMP_WITH_NANOSECONDS, clonedTimestamp );
} |
@ConstantFunction(name = "int_divide", argTypes = {TINYINT, TINYINT}, returnType = TINYINT)
public static ConstantOperator intDivideTinyInt(ConstantOperator first, ConstantOperator second) {
return ConstantOperator.createTinyInt((byte) (first.getTinyInt() / second.getTinyInt()));
} | @Test
public void intDivideTinyInt() {
assertEquals(1, ScalarOperatorFunctions.intDivideTinyInt(O_TI_10, O_TI_10).getTinyInt());
} |
public void sendRequests(Callback<None> callback)
{
LOG.info("Event Bus Requests throttler started for {} keys at a {} load rate",
_keysToFetch.size(), _maxConcurrentRequests);
if (_keysToFetch.size() == 0)
{
callback.onSuccess(None.none());
return;
}
_callback = callback;
makeRequests(_maxConcurrentRequests);
} | @Test(timeOut = 10000)
public void testThrottlingUnlimitedRequests() throws InterruptedException, ExecutionException, TimeoutException
{
TestSubscriber testSubscriber = new TestSubscriber();
TestEventBus testZkEventBus = new TestEventBus(testSubscriber, 50);
final int nRequests = 100;
int concurrentRequestsHugeNumber = 999999999;
int concurrentRequestsCheckHigher = PropertyEventBusRequestsThrottler.DEFAULT_MAX_CONCURRENT_REQUESTS;
PropertyEventBusRequestsThrottler<String> propertyEventBusRequestsThrottler =
new PropertyEventBusRequestsThrottler<>(testZkEventBus, testSubscriber, generateNKeys(nRequests),
concurrentRequestsHugeNumber, false);
FutureCallback<None> callback = new FutureCallback<>();
propertyEventBusRequestsThrottler.sendRequests(callback);
boolean triggeredAtLeastOnce = false;
while (!callback.isDone() && !triggeredAtLeastOnce)
{
int currentConcurrentRequests =
testZkEventBus.getRequestCount().get() - testSubscriber.getCompletedRequestCount().get();
if (currentConcurrentRequests > concurrentRequestsCheckHigher)
{
triggeredAtLeastOnce = true;
}
Thread.sleep(50);
}
callback.get(1000, TimeUnit.MILLISECONDS);
Assert.assertTrue(triggeredAtLeastOnce);
Assert.assertEquals(nRequests, testZkEventBus.getRequestCount().get());
Assert.assertEquals(nRequests, testSubscriber.getCompletedRequestCount().get());
} |
public List<GroupedDataRecord> group(final List<DataRecord> dataRecords) {
List<GroupedDataRecord> result = new ArrayList<>(100);
List<DataRecord> mergedDataRecords = dataRecords.get(0).getUniqueKeyValue().isEmpty() ? dataRecords : merge(dataRecords);
Map<String, List<DataRecord>> tableGroup = mergedDataRecords.stream().collect(Collectors.groupingBy(DataRecord::getTableName));
for (Entry<String, List<DataRecord>> entry : tableGroup.entrySet()) {
Map<PipelineSQLOperationType, List<DataRecord>> typeGroup = entry.getValue().stream().collect(Collectors.groupingBy(DataRecord::getType));
result.add(new GroupedDataRecord(entry.getKey(), typeGroup.getOrDefault(PipelineSQLOperationType.INSERT, Collections.emptyList()),
typeGroup.getOrDefault(PipelineSQLOperationType.UPDATE, Collections.emptyList()), typeGroup.getOrDefault(PipelineSQLOperationType.DELETE, Collections.emptyList())));
}
return result;
} | @Test
void assertGroup() {
List<DataRecord> dataRecords = mockDataRecords();
List<GroupedDataRecord> groupedDataRecords = groupEngine.group(dataRecords);
assertThat(groupedDataRecords.size(), is(2));
assertThat(groupedDataRecords.get(0).getTableName(), is("t1"));
assertThat(groupedDataRecords.get(1).getTableName(), is("t2"));
assertThat(groupedDataRecords.get(0).getInsertDataRecords().size(), is(1));
assertThat(groupedDataRecords.get(0).getUpdateDataRecords().size(), is(1));
assertThat(groupedDataRecords.get(0).getDeleteDataRecords().size(), is(1));
} |
@Override
public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException {
try {
final IRODSFileSystemAO fs = session.getClient();
final IRODSFile f = fs.getIRODSFileFactory().instanceIRODSFile(folder.getAbsolute());
fs.mkdir(f, false);
return folder;
}
catch(JargonException e) {
throw new IRODSExceptionMappingService().map("Cannot create folder {0}", e, folder);
}
} | @Test
public void testMakeDirectory() throws Exception {
final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol())));
final Profile profile = new ProfilePlistReader(factory).read(
this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile"));
final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials(
PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret")
));
final IRODSSession session = new IRODSSession(host);
session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback());
session.login(new DisabledLoginCallback(), new DisabledCancelCallback());
final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory));
new IRODSDirectoryFeature(session).mkdir(test, new TransferStatus());
assertTrue(session.getFeature(Find.class).find(test));
session.getFeature(Delete.class).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
assertFalse(session.getFeature(Find.class).find(test));
session.close();
} |
public Map<String, String> pukRequestAllowed(PukRequest request) throws PukRequestException {
final PenRequestStatus result = repository.findFirstByBsnAndDocTypeAndSequenceNoOrderByRequestDatetimeDesc(request.getBsn(), request.getDocType(), request.getSequenceNo());
checkExpirationDatePen(result);
return statusOK;
} | @Test
public void pukRequestAllowedIsNotAllowedAfter22Days() throws PukRequestException {
status.setPinResetValidDate(LocalDateTime.of(2019, 1, 1, 12, 34));
Exception exception = assertThrows(PukRequestException.class, () -> {
service.pukRequestAllowed(request);
});
assertEquals("DWS5", exception.getMessage());
} |
public static String addUserHomeDirectoryIfApplicable(String origPathStr, String user)
throws IOException, URISyntaxException {
if(origPathStr == null || origPathStr.isEmpty()) {
return "/user/" + user;
}
Path p = new Path(origPathStr);
if(p.isAbsolute()) {
return origPathStr;
}
if(p.toUri().getPath().isEmpty()) {
//origPathStr="hdfs://host:99" for example
return new Path(p.toUri().getScheme(), p.toUri().getAuthority(), "/user/" + user).toString();
}
//can't have relative path if there is scheme/authority
return "/user/" + user + "/" + origPathStr;
} | @Test
public void testConstructingUserHomeDirectory() throws Exception {
String[] sources = new String[] { "output+", "/user/hadoop/output",
"hdfs://container", "hdfs://container/", "hdfs://container/path",
"output#link", "hdfs://cointaner/output#link",
"hdfs://container@acc/test", "/user/webhcat/düsseldorf", "düsseldorf",
"䶴狝A﨩O", "hdfs://host:8080"};
String[] expectedResults = new String[] { "/user/webhcat/output+",
"/user/hadoop/output", "hdfs://container/user/webhcat",
"hdfs://container/", "hdfs://container/path",
"/user/webhcat/output#link", "hdfs://cointaner/output#link",
"hdfs://container@acc/test", "/user/webhcat/düsseldorf","/user/webhcat/düsseldorf",
"/user/webhcat/䶴狝A﨩O", "hdfs://host:8080/user/webhcat" };
for (int i = 0; i < sources.length; i++) {
String source = sources[i];
String expectedResult = expectedResults[i];
String result = TempletonUtils.addUserHomeDirectoryIfApplicable(source,
"webhcat");
Assert.assertEquals("i=" + i, expectedResult, result);
}
String badUri = "c:\\some\\path";
try {
TempletonUtils.addUserHomeDirectoryIfApplicable(badUri, "webhcat");
Assert.fail("addUserHomeDirectoryIfApplicable should fail for bad URI: "
+ badUri);
} catch (IllegalArgumentException ex) {
}
} |
@VisibleForTesting
static Map<String, AtomicInteger> getUnknownTablesWarningsMap() {
return unknownTablesWarnings;
} | @Test
public void unknownTableOrdering() throws Exception {
SpannerSchema.Builder builder = SpannerSchema.builder();
builder.addColumn("test1", "key", "INT64");
builder.addKeyPart("test1", "key", false);
SpannerSchema schema = builder.build();
// Verify that the encoded keys are ordered by table name and column values (as text).
List<Mutation> sortedMutations =
Arrays.asList(
Mutation.newInsertOrUpdateBuilder("test2")
.set("key")
.to("a")
.set("keydesc")
.to("a")
.build(),
Mutation.newInsertOrUpdateBuilder("test2")
.set("key")
.to("a")
.set("keydesc")
.to("b")
.build(),
Mutation.newInsertOrUpdateBuilder("test3")
.set("key")
.to("b")
// leave keydesc value unspecified --> maxvalue descending.
.build(),
Mutation.newInsertOrUpdateBuilder("test4")
.set("key")
.to("b")
.set("keydesc")
.to("a")
.build(),
Mutation.newInsertOrUpdateBuilder("test4")
// leave 'key' value unspecified -> maxvalue
.set("keydesc")
.to("a")
.build());
verifyEncodedOrdering(schema, sortedMutations);
Assert.assertEquals(3, MutationKeyEncoder.getUnknownTablesWarningsMap().size());
Assert.assertEquals(2, MutationKeyEncoder.getUnknownTablesWarningsMap().get("test2").get());
Assert.assertEquals(1, MutationKeyEncoder.getUnknownTablesWarningsMap().get("test3").get());
Assert.assertEquals(2, MutationKeyEncoder.getUnknownTablesWarningsMap().get("test4").get());
} |
public int nodeId() {
return nodeId;
} | @Test
public void testNoOpRecordWriteAfterTimeout() throws Throwable {
long maxIdleIntervalNs = 1_000;
long maxReplicationDelayMs = 60_000;
try (
LocalLogManagerTestEnv logEnv = new LocalLogManagerTestEnv.Builder(3).
build();
QuorumControllerTestEnv controlEnv = new QuorumControllerTestEnv.Builder(logEnv).
setControllerBuilderInitializer(controllerBuilder -> {
controllerBuilder.setConfigSchema(SCHEMA);
controllerBuilder.setMaxIdleIntervalNs(OptionalLong.of(maxIdleIntervalNs));
}).
build()
) {
ListenerCollection listeners = new ListenerCollection();
listeners.add(new Listener().setName("PLAINTEXT").setHost("localhost").setPort(9092));
QuorumController active = controlEnv.activeController();
LocalLogManager localLogManager = logEnv
.logManagers()
.stream()
.filter(logManager -> logManager.nodeId().equals(OptionalInt.of(active.nodeId())))
.findAny()
.get();
TestUtils.waitForCondition(
() -> localLogManager.highWatermark().isPresent(),
maxReplicationDelayMs,
"High watermark was not established"
);
final long firstHighWatermark = localLogManager.highWatermark().getAsLong();
TestUtils.waitForCondition(
() -> localLogManager.highWatermark().getAsLong() > firstHighWatermark,
maxReplicationDelayMs,
"Active controller didn't write NoOpRecord the first time"
);
// Do it again to make sure that we are not counting the leader change record
final long secondHighWatermark = localLogManager.highWatermark().getAsLong();
TestUtils.waitForCondition(
() -> localLogManager.highWatermark().getAsLong() > secondHighWatermark,
maxReplicationDelayMs,
"Active controller didn't write NoOpRecord the second time"
);
}
} |
public void deleteEtlOutputPath(String outputPath, BrokerDesc brokerDesc) {
try {
if (brokerDesc.hasBroker()) {
BrokerUtil.deletePath(outputPath, brokerDesc);
} else {
HdfsUtil.deletePath(outputPath, brokerDesc);
}
LOG.info("delete path success. path: {}", outputPath);
} catch (UserException e) {
LOG.warn("delete path failed. path: {}", outputPath, e);
}
} | @Test
public void testDeleteEtlOutputPath(@Mocked BrokerUtil brokerUtil) throws UserException {
new Expectations() {
{
BrokerUtil.deletePath(etlOutputPath, (BrokerDesc) any);
times = 1;
}
};
BrokerDesc brokerDesc = new BrokerDesc(broker, Maps.newHashMap());
SparkEtlJobHandler handler = new SparkEtlJobHandler();
try {
handler.deleteEtlOutputPath(etlOutputPath, brokerDesc);
} catch (Exception e) {
Assert.fail(e.getMessage());
}
} |
@VisibleForTesting
static Optional<String> findEntryClass(File jarFile) throws IOException {
return findFirstManifestAttribute(
jarFile,
PackagedProgram.MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS,
PackagedProgram.MANIFEST_ATTRIBUTE_MAIN_CLASS);
} | @Test
void testFindEntryClassNoEntry() throws IOException {
File jarFile = createJarFileWithManifest(ImmutableMap.of());
Optional<String> entry = JarManifestParser.findEntryClass(jarFile);
assertThat(entry).isNotPresent();
} |
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster,
long now,
OperationProgress operationProgress)
throws NotEnoughValidWindowsException {
ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false);
return aggregate(cluster, -1L, now, requirements, operationProgress);
} | @Test
public void testAggregate() throws NotEnoughValidWindowsException {
KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
Metadata metadata = getMetadata(Collections.singleton(TP));
KafkaPartitionMetricSampleAggregator
metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);
populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
MetricSampleAggregationResult<String, PartitionEntity> result =
metricSampleAggregator.aggregate(metadata.fetch(), Long.MAX_VALUE, new OperationProgress());
Map<PartitionEntity, ValuesAndExtrapolations> valuesAndExtrapolations = result.valuesAndExtrapolations();
assertEquals("The windows should only have one partition", 1, valuesAndExtrapolations.size());
ValuesAndExtrapolations partitionValuesAndExtrapolations = valuesAndExtrapolations.get(PE);
assertNotNull(partitionValuesAndExtrapolations);
assertEquals(NUM_WINDOWS, partitionValuesAndExtrapolations.metricValues().length());
for (int i = 0; i < NUM_WINDOWS; i++) {
assertEquals((NUM_WINDOWS - i) * WINDOW_MS, result.valuesAndExtrapolations().get(PE).window(i));
for (Resource resource : Resource.cachedValues()) {
Collection<Short> metricIds = KafkaMetricDef.resourceToMetricIds(resource);
double expectedValue = (resource == Resource.DISK ? (NUM_WINDOWS - 1 - i) * 10 + MIN_SAMPLES_PER_WINDOW - 1
: (NUM_WINDOWS - 1 - i) * 10 + (MIN_SAMPLES_PER_WINDOW - 1) / 2.0)
/ (resource == Resource.CPU ? UNIT_INTERVAL_TO_PERCENTAGE : 1.0) * metricIds.size();
assertEquals("The utilization for " + resource + " should be " + expectedValue,
expectedValue, partitionValuesAndExtrapolations.metricValues().valuesForGroup(resource.name(),
KafkaMetricDef.commonMetricDef(),
true).get(i), 0.01);
}
}
// Verify the metric completeness checker state
MetadataClient.ClusterAndGeneration clusterAndGeneration =
new MetadataClient.ClusterAndGeneration(metadata.fetch(), 1);
assertEquals(NUM_WINDOWS, metricSampleAggregator.validWindows(clusterAndGeneration.cluster(), 1.0).size());
Map<Long, Float> monitoredPercentages = metricSampleAggregator.validPartitionRatioByWindows(clusterAndGeneration.cluster());
for (double percentage : monitoredPercentages.values()) {
assertEquals(1.0, percentage, 0.0);
}
assertEquals(NUM_WINDOWS, metricSampleAggregator.availableWindows().size());
} |
int startReconfiguration(final String nodeType, final String address)
throws IOException, InterruptedException {
return startReconfigurationUtil(nodeType, address, System.out, System.err);
} | @Test
public void testAllDatanodesReconfig()
throws IOException, InterruptedException, TimeoutException {
ReconfigurationUtil reconfigurationUtil = mock(ReconfigurationUtil.class);
cluster.getDataNodes().get(0).setReconfigurationUtil(reconfigurationUtil);
cluster.getDataNodes().get(1).setReconfigurationUtil(reconfigurationUtil);
List<ReconfigurationUtil.PropertyChange> changes = new ArrayList<>();
changes.add(new ReconfigurationUtil.PropertyChange(
DFS_DATANODE_PEER_STATS_ENABLED_KEY, "true",
datanode.getConf().get(DFS_DATANODE_PEER_STATS_ENABLED_KEY)));
when(reconfigurationUtil.parseChangedProperties(any(Configuration.class),
any(Configuration.class))).thenReturn(changes);
int result = admin.startReconfiguration("datanode", "livenodes");
Assertions.assertThat(result).isEqualTo(0);
final List<String> outsForStartReconf = new ArrayList<>();
final List<String> errsForStartReconf = new ArrayList<>();
reconfigurationOutErrFormatter("startReconfiguration", "datanode",
"livenodes", outsForStartReconf, errsForStartReconf);
String started = "Started reconfiguration task on node";
String starting =
"Starting of reconfiguration task successful on 2 nodes, failed on 0 nodes.";
Assertions.assertThat(outsForStartReconf).hasSize(3);
Assertions.assertThat(errsForStartReconf).hasSize(0);
Assertions.assertThat(outsForStartReconf.get(0)).startsWith(started);
Assertions.assertThat(outsForStartReconf.get(1)).startsWith(started);
Assertions.assertThat(outsForStartReconf.get(2)).startsWith(starting);
Thread.sleep(1000);
final List<String> outs = new ArrayList<>();
final List<String> errs = new ArrayList<>();
awaitReconfigurationFinished("datanode", "livenodes", outs, errs);
Assertions.assertThat(outs).hasSize(9);
Assertions.assertThat(errs).hasSize(0);
LOG.info("dfsadmin -status -livenodes output:");
outs.forEach(s -> LOG.info("{}", s));
Assertions.assertThat(outs.get(0)).startsWith("Reconfiguring status for node");
String success = "SUCCESS: Changed property dfs.datanode.peer.stats.enabled";
String from = "\tFrom: \"false\"";
String to = "\tTo: \"true\"";
String retrieval =
"Retrieval of reconfiguration status successful on 2 nodes, failed on 0 nodes.";
Assertions.assertThat(outs.subList(1, 5)).containsSubsequence(success, from, to);
Assertions.assertThat(outs.subList(5, 9)).containsSubsequence(success, from, to, retrieval);
} |
public void checkJavaVersion() {
Runtime.Version version = Runtime.version();
if (version.compareTo(Runtime.Version.parse("17")) < 0) {
LOG.warn(LOG_MESSAGE);
String documentationLink = documentationLinkGenerator.getDocumentationLink("/analyzing-source-code/scanner-environment");
analysisWarnings.addUnique(WARNING_MESSAGE_TEMPLATE.replace("{}", documentationLink));
}
} | @Test
public void given_runtime11_should_log_message() {
try (MockedStatic<Runtime> utilities = Mockito.mockStatic(Runtime.class)) {
Runtime.Version version = Runtime.Version.parse("11");
utilities.when(Runtime::version).thenReturn(version);
underTest.checkJavaVersion();
assertThat(logTester.getLogs(Level.WARN)).extracting(LogAndArguments::getRawMsg)
.anyMatch(s -> s.contains(RuntimeJavaVersion.LOG_MESSAGE));
}
} |
public void terminateCluster(final List<String> deleteTopicPatterns) {
terminatePersistentQueries();
deleteSinkTopics(deleteTopicPatterns);
deleteTopics(managedTopics);
ksqlEngine.close();
} | @Test
public void shouldCleanUpSchemasForExplicitTopicListAvro() throws Exception {
// Given:
givenTopicsExistInKafka("K_Foo");
givenSinkTopicsExistInMetastore(FormatFactory.AVRO, "K_Foo");
givenSchemasForTopicsExistInSchemaRegistry("K_Foo");
// When:
clusterTerminator.terminateCluster(ImmutableList.of("K_Foo"));
// Then:
verifySchemaDeletedForTopics("K_Foo");
} |
public Print() {
this("\n");
} | @Test
public void testPrint() throws IOException {
Print print = new Print();
PrintStream out = mock(PrintStream.class);
FindOptions options = new FindOptions();
options.setOut(out);
print.setOptions(options);
String filename = "/one/two/test";
PathData item = new PathData(filename, mockFs.getConf());
assertEquals(Result.PASS, print.apply(item, -1));
verify(out).print(filename + '\n');
verifyNoMoreInteractions(out);
} |
public static <T> Read<T> readAvrosWithBeamSchema(Class<T> clazz) {
if (clazz.equals(GenericRecord.class)) {
throw new IllegalArgumentException("For GenericRecord, please call readAvroGenericRecords");
}
AvroCoder<T> coder = AvroCoder.of(clazz);
org.apache.avro.Schema avroSchema = coder.getSchema();
Schema schema = AvroUtils.getSchema(clazz, avroSchema);
return Read.newBuilder(parsePayloadUsingCoder(coder))
.setCoder(
SchemaCoder.of(
schema,
TypeDescriptor.of(clazz),
AvroUtils.getToRowFunction(clazz, avroSchema),
AvroUtils.getFromRowFunction(clazz)))
.build();
} | @Test
public void testAvroPojo() {
AvroCoder<GenericClass> coder = AvroCoder.of(GenericClass.class);
List<GenericClass> inputs =
Lists.newArrayList(
new GenericClass(
1, "foo", new DateTime().withDate(2019, 10, 1).withZone(DateTimeZone.UTC)),
new GenericClass(
2, "bar", new DateTime().withDate(1986, 10, 1).withZone(DateTimeZone.UTC)));
setupTestClient(inputs, coder);
PCollection<GenericClass> read =
pipeline.apply(
PubsubIO.readAvrosWithBeamSchema(GenericClass.class)
.fromSubscription(SUBSCRIPTION.getPath())
.withClock(CLOCK)
.withClientFactory(clientFactory));
PAssert.that(read).containsInAnyOrder(inputs);
pipeline.run();
} |
@Override
public RestResponse<KsqlEntityList> makeKsqlRequest(
final URI serverEndPoint,
final String sql,
final Map<String, ?> requestProperties) {
final KsqlTarget target = sharedClient
.target(serverEndPoint);
return getTarget(target)
.postKsqlRequest(sql, requestProperties, Optional.empty());
} | @Test
public void shouldPostRequest() {
// When:
final RestResponse<KsqlEntityList> result = client.makeKsqlRequest(SERVER_ENDPOINT, "Sql", ImmutableMap.of());
// Then:
verify(target).postKsqlRequest("Sql", ImmutableMap.of(), Optional.empty());
assertThat(result, is(response));
} |
public static List<String> splitToWhiteSpaceSeparatedTokens(String input) {
if (input == null) {
return new ArrayList<>();
}
StringTokenizer tokenizer = new StringTokenizer(input.trim(), QUOTE_CHAR + WHITESPACE, true);
List<String> tokens = new ArrayList<>();
StringBuilder quotedText = new StringBuilder();
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (QUOTE_CHAR.equals(token)) {
// if we have a quote, add the next tokens to the quoted text
// until the quoting has finished
quotedText.append(QUOTE_CHAR);
String buffer = quotedText.toString();
if (isSingleQuoted(buffer) || isDoubleQuoted(buffer)) {
tokens.add(buffer.substring(1, buffer.length() - 1));
quotedText = new StringBuilder();
}
} else if (WHITESPACE.equals(token)) {
// a white space, if in quote, add the white space, otherwise
// skip it
if (quotedText.length() > 0) {
quotedText.append(WHITESPACE);
}
} else {
if (quotedText.length() > 0) {
quotedText.append(token);
} else {
tokens.add(token);
}
}
}
if (quotedText.length() > 0) {
throw new IllegalArgumentException("Invalid quoting found in args " + quotedText);
}
return tokens;
} | @Test
public void testDoubleQuote() {
List<String> args = splitToWhiteSpaceSeparatedTokens("\"\"arg0\"\"");
assertEquals("\"arg0\"", args.get(0));
} |
public boolean isAfterFlink114() {
return flinkInterpreter.getFlinkVersion().isAfterFlink114();
} | @Test
void testBatchIPyFlink() throws InterpreterException, IOException {
if (!flinkInnerInterpreter.getFlinkVersion().isAfterFlink114()) {
testBatchPyFlink(interpreter, flinkScalaInterpreter);
}
} |
public static String formatExpression(final Expression expression) {
return formatExpression(expression, FormatOptions.of(s -> false));
} | @Test
public void shouldFormatInListExpression() {
assertThat(ExpressionFormatter.formatExpression(new InListExpression(Collections.singletonList(new StringLiteral("a")))), equalTo("('a')"));
} |
@CanIgnoreReturnValue
public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) {
checkNotNull(expectedMultimap, "expectedMultimap");
checkNotNull(actual);
ListMultimap<?, ?> missing = difference(expectedMultimap, actual);
ListMultimap<?, ?> extra = difference(actual, expectedMultimap);
// TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in
// the subject but not enough times. Similarly for unexpected extra items.
if (!missing.isEmpty()) {
if (!extra.isEmpty()) {
boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries());
// Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be
// grouped by key in the 'missing' and 'unexpected items' parts of the message (we still
// show the actual and expected multimaps in the standard format).
String missingDisplay =
addTypeInfo
? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries())
: countDuplicatesMultimap(annotateEmptyStringsMultimap(missing));
String extraDisplay =
addTypeInfo
? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries())
: countDuplicatesMultimap(annotateEmptyStringsMultimap(extra));
failWithActual(
fact("missing", missingDisplay),
fact("unexpected", extraDisplay),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
} else {
failWithActual(
fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
} else if (!extra.isEmpty()) {
failWithActual(
fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))),
simpleFact("---"),
fact("expected", annotateEmptyStringsMultimap(expectedMultimap)));
return ALREADY_FAILED;
}
return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap);
} | @Test
public void containsExactlyFailureMissing() {
ImmutableMultimap<Integer, String> expected =
ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four");
ListMultimap<Integer, String> actual = LinkedListMultimap.create(expected);
actual.remove(3, "six");
actual.remove(4, "five");
expectFailureWhenTestingThat(actual).containsExactlyEntriesIn(expected);
assertFailureKeys("missing", "---", "expected", "but was");
assertFailureValue("missing", "{3=[six], 4=[five]}");
} |
@Override
public void execute() throws Exception {
LOG.debug("Executing map task");
try (Closeable stateCloser = executionStateTracker.activate()) {
try {
// Start operations, in reverse-execution-order, so that a
// consumer is started before a producer might output to it.
// Starting a root operation such as a ReadOperation does the work
// of processing the input dataset.
LOG.debug("Starting operations");
ListIterator<Operation> iterator = operations.listIterator(operations.size());
while (iterator.hasPrevious()) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Worker aborted");
}
Operation op = iterator.previous();
op.start();
}
// Finish operations, in forward-execution-order, so that a
// producer finishes outputting to its consumers before those
// consumers are themselves finished.
LOG.debug("Finishing operations");
for (Operation op : operations) {
if (Thread.currentThread().isInterrupted()) {
throw new InterruptedException("Worker aborted");
}
op.finish();
}
} catch (Exception | Error exn) {
LOG.debug("Aborting operations", exn);
for (Operation op : operations) {
try {
op.abort();
} catch (Exception | Error exn2) {
exn.addSuppressed(exn2);
if (exn2 instanceof InterruptedException) {
Thread.currentThread().interrupt();
}
}
}
throw exn;
}
}
LOG.debug("Map task execution complete");
// TODO: support for success / failure ports?
} | @Test
@SuppressWarnings("unchecked")
/**
* This test makes sure that any metrics reported within an operation are part of the metric
* containers returned by {@link getMetricContainers}.
*/
public void testGetMetricContainers() throws Exception {
ExecutionStateTracker stateTracker =
new DataflowExecutionStateTracker(
ExecutionStateSampler.newForTest(),
new TestDataflowExecutionState(
NameContext.forStage("testStage"),
"other",
null /* requestingStepName */,
null /* sideInputIndex */,
null /* metricsContainer */,
NoopProfileScope.NOOP),
new CounterSet(),
PipelineOptionsFactory.create(),
"test-work-item-id");
final String o1 = "o1";
TestOperationContext context1 = createContext(o1, stateTracker);
final String o2 = "o2";
TestOperationContext context2 = createContext(o2, stateTracker);
final String o3 = "o3";
TestOperationContext context3 = createContext(o3, stateTracker);
List<Operation> operations =
Arrays.asList(
new Operation(new OutputReceiver[] {}, context1) {
@Override
public void start() throws Exception {
super.start();
try (Closeable scope = context.enterStart()) {
Metrics.counter("TestMetric", "MetricCounter").inc(1L);
}
}
},
new Operation(new OutputReceiver[] {}, context2) {
@Override
public void start() throws Exception {
super.start();
try (Closeable scope = context.enterStart()) {
Metrics.counter("TestMetric", "MetricCounter").inc(2L);
}
}
},
new Operation(new OutputReceiver[] {}, context3) {
@Override
public void start() throws Exception {
super.start();
try (Closeable scope = context.enterStart()) {
Metrics.counter("TestMetric", "MetricCounter").inc(3L);
}
}
});
try (MapTaskExecutor executor = new MapTaskExecutor(operations, counterSet, stateTracker)) {
// Call execute so that we run all the counters
executor.execute();
assertThat(
context1.metricsContainer().getUpdates().counterUpdates(),
contains(metricUpdate("TestMetric", "MetricCounter", o1, 1L)));
assertThat(
context2.metricsContainer().getUpdates().counterUpdates(),
contains(metricUpdate("TestMetric", "MetricCounter", o2, 2L)));
assertThat(
context3.metricsContainer().getUpdates().counterUpdates(),
contains(metricUpdate("TestMetric", "MetricCounter", o3, 3L)));
assertEquals(0, stateTracker.getMillisSinceBundleStart());
assertEquals(TimeUnit.MINUTES.toMillis(10), stateTracker.getNextBundleLullDurationReportMs());
}
} |
public String format() {
return dataSourceName + DELIMITER + tableName;
} | @Test
void assertFormatIncludeInstance() {
String expected = "ds_0.db_0.tbl_0";
DataNode dataNode = new DataNode(expected);
assertThat(dataNode.format(), is(expected));
} |
void publishLogDelta(
MetadataDelta delta,
MetadataImage newImage,
LogDeltaManifest manifest
) {
bytesSinceLastSnapshot += manifest.numBytes();
if (bytesSinceLastSnapshot >= maxBytesSinceLastSnapshot) {
if (eventQueue.isEmpty()) {
scheduleEmit("we have replayed at least " + maxBytesSinceLastSnapshot +
" bytes", newImage);
} else if (log.isTraceEnabled()) {
log.trace("Not scheduling bytes-based snapshot because event queue is not empty yet.");
}
} else if (maxTimeSinceLastSnapshotNs != 0 &&
(time.nanoseconds() - lastSnapshotTimeNs >= maxTimeSinceLastSnapshotNs)) {
if (eventQueue.isEmpty()) {
scheduleEmit("we have waited at least " +
TimeUnit.NANOSECONDS.toMinutes(maxTimeSinceLastSnapshotNs) + " minute(s)", newImage);
} else if (log.isTraceEnabled()) {
log.trace("Not scheduling time-based snapshot because event queue is not empty yet.");
}
} else if (log.isTraceEnabled()) {
log.trace("Neither time-based nor bytes-based criteria are met; not scheduling snapshot.");
}
} | @Test
public void testSnapshotsDisabled() throws Exception {
MockFaultHandler faultHandler = new MockFaultHandler("SnapshotGenerator");
MockEmitter emitter = new MockEmitter().setReady();
AtomicReference<String> disabledReason = new AtomicReference<>();
try (SnapshotGenerator generator = new SnapshotGenerator.Builder(emitter).
setFaultHandler(faultHandler).
setMaxBytesSinceLastSnapshot(1).
setMaxTimeSinceLastSnapshotNs(0).
setDisabledReason(disabledReason).
build()) {
disabledReason.compareAndSet(null, "we are testing disable()");
// No snapshots are generated because snapshots are disabled.
generator.publishLogDelta(TEST_DELTA, TEST_IMAGE, logDeltaManifestBuilder().build());
}
assertEquals(Collections.emptyList(), emitter.images());
faultHandler.maybeRethrowFirstException();
} |
@Override
protected CouchbaseEndpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
CouchbaseEndpoint endpoint = new CouchbaseEndpoint(uri, remaining, this);
setProperties(endpoint, parameters);
return endpoint;
} | @Test
public void testCouchbaseNullAdditionalHosts() throws Exception {
Map<String, Object> params = new HashMap<>();
params.put("additionalHosts", null);
params.put("bucket", "bucket");
String uri = "couchbase:http://localhost";
String remaining = "http://localhost";
CouchbaseEndpoint endpoint = new CouchbaseComponent(context).createEndpoint(uri, remaining, params);
URI[] endpointArray = endpoint.makeBootstrapURI();
assertEquals(1, endpointArray.length);
} |
@Override
public void updateNode(K8sNode node) {
checkNotNull(node, ERR_NULL_NODE);
K8sNode intNode;
K8sNode extNode;
K8sNode localNode;
K8sNode tunNode;
K8sNode existingNode = nodeStore.node(node.hostname());
checkNotNull(existingNode, ERR_NULL_NODE);
DeviceId existIntgBridge = nodeStore.node(node.hostname()).intgBridge();
if (node.intgBridge() == null) {
intNode = node.updateIntgBridge(existIntgBridge);
checkArgument(!hasIntgBridge(intNode.intgBridge(), intNode.hostname()),
NOT_DUPLICATED_MSG, intNode.intgBridge());
} else {
intNode = node;
checkArgument(!hasIntgBridge(intNode.intgBridge(), intNode.hostname()),
NOT_DUPLICATED_MSG, intNode.intgBridge());
}
DeviceId existExtBridge = nodeStore.node(node.hostname()).extBridge();
if (intNode.extBridge() == null) {
extNode = intNode.updateExtBridge(existExtBridge);
checkArgument(!hasExtBridge(extNode.extBridge(), extNode.hostname()),
NOT_DUPLICATED_MSG, extNode.extBridge());
} else {
extNode = intNode;
checkArgument(!hasExtBridge(extNode.extBridge(), extNode.hostname()),
NOT_DUPLICATED_MSG, extNode.extBridge());
}
DeviceId existLocalBridge = nodeStore.node(node.hostname()).localBridge();
if (extNode.localBridge() == null) {
localNode = extNode.updateLocalBridge(existLocalBridge);
checkArgument(!hasLocalBridge(localNode.localBridge(), localNode.hostname()),
NOT_DUPLICATED_MSG, localNode.localBridge());
} else {
localNode = extNode;
checkArgument(!hasLocalBridge(localNode.localBridge(), localNode.hostname()),
NOT_DUPLICATED_MSG, localNode.localBridge());
}
if (node.mode() == NORMAL) {
DeviceId existTunBridge = nodeStore.node(node.hostname()).tunBridge();
if (localNode.tunBridge() == null) {
tunNode = localNode.updateTunBridge(existTunBridge);
checkArgument(!hasTunBridge(tunNode.tunBridge(), tunNode.hostname()),
NOT_DUPLICATED_MSG, tunNode.tunBridge());
} else {
tunNode = localNode;
checkArgument(!hasTunBridge(tunNode.tunBridge(), tunNode.hostname()),
NOT_DUPLICATED_MSG, tunNode.tunBridge());
}
nodeStore.updateNode(tunNode);
} else {
nodeStore.updateNode(localNode);
}
log.info(String.format(MSG_NODE, extNode.hostname(), MSG_UPDATED));
} | @Test(expected = NullPointerException.class)
public void testUpdateNotExistingNode() {
target.updateNode(MINION_1);
} |
public static SessionBytesStoreSupplier persistentSessionStore(final String name,
final Duration retentionPeriod) {
Objects.requireNonNull(name, "name cannot be null");
final String msgPrefix = prepareMillisCheckFailMsgPrefix(retentionPeriod, "retentionPeriod");
final long retentionPeriodMs = validateMillisecondDuration(retentionPeriod, msgPrefix);
if (retentionPeriodMs < 0) {
throw new IllegalArgumentException("retentionPeriod cannot be negative");
}
return new RocksDbSessionBytesStoreSupplier(name, retentionPeriodMs);
} | @Test
public void shouldThrowIfIPersistentSessionStoreRetentionPeriodIsNegative() {
final Exception e = assertThrows(IllegalArgumentException.class, () -> Stores.persistentSessionStore("anyName", ofMillis(-1)));
assertEquals("retentionPeriod cannot be negative", e.getMessage());
} |
@PostMapping("/syncData")
@RequiresPermissions("system:authen:modify")
public ShenyuAdminResult syncData() {
return appAuthService.syncData();
} | @Test
public void testSyncData() throws Exception {
this.mockMvc.perform(MockMvcRequestBuilders.post("/appAuth/syncData"))
.andExpect(status().isOk())
.andReturn();
} |
@Override
/**
* Parses the given text to transform it to the desired target type.
* @param text The LLM output in string format.
* @return The parsed output in the desired target type.
*/
public T convert(@NonNull String text) {
try {
// Remove leading and trailing whitespace
text = text.trim();
// Check for and remove triple backticks and "json" identifier
if (text.startsWith("```") && text.endsWith("```")) {
// Remove the first line if it contains "```json"
String[] lines = text.split("\n", 2);
if (lines[0].trim().equalsIgnoreCase("```json")) {
text = lines.length > 1 ? lines[1] : "";
}
else {
text = text.substring(3); // Remove leading ```
}
// Remove trailing ```
text = text.substring(0, text.length() - 3);
// Trim again to remove any potential whitespace
text = text.trim();
}
return (T) this.objectMapper.readValue(text, this.typeRef);
}
catch (JsonProcessingException e) {
logger.error("Could not parse the given text to the desired target type:" + text + " into " + this.typeRef);
throw new RuntimeException(e);
}
} | @Test
public void convertClassWithDateType() {
var converter = new BeanOutputConverter<>(TestClassWithDateProperty.class);
var testClass = converter.convert("{ \"someString\": \"2020-01-01\" }");
assertThat(testClass.getSomeString()).isEqualTo(LocalDate.of(2020, 1, 1));
} |
public String readWPHexString(int numOfBytes) throws IOException {
StringBuilder b = new StringBuilder();
for (int i = 0; i < numOfBytes; i++) {
b.append(readWPHex());
}
return b.toString();
} | @Test
public void testReadHexString() throws Exception {
try (WPInputStream wpInputStream = emptyWPStream()) {
wpInputStream.readWPHexString(10);
fail("should have thrown EOF");
} catch (EOFException e) {
//swallow
}
} |
@Override
public BasicTypeDefine reconvert(Column column) {
BasicTypeDefine.BasicTypeDefineBuilder builder =
BasicTypeDefine.builder()
.name(column.getName())
.nullable(column.isNullable())
.comment(column.getComment())
.defaultValue(column.getDefaultValue());
switch (column.getDataType().getSqlType()) {
case BOOLEAN:
builder.columnType(SQLSERVER_BIT);
builder.dataType(SQLSERVER_BIT);
break;
case TINYINT:
builder.columnType(SQLSERVER_TINYINT);
builder.dataType(SQLSERVER_TINYINT);
break;
case SMALLINT:
builder.columnType(SQLSERVER_SMALLINT);
builder.dataType(SQLSERVER_SMALLINT);
break;
case INT:
builder.columnType(SQLSERVER_INT);
builder.dataType(SQLSERVER_INT);
break;
case BIGINT:
builder.columnType(SQLSERVER_BIGINT);
builder.dataType(SQLSERVER_BIGINT);
break;
case FLOAT:
builder.columnType(SQLSERVER_REAL);
builder.dataType(SQLSERVER_REAL);
break;
case DOUBLE:
builder.columnType(SQLSERVER_FLOAT);
builder.dataType(SQLSERVER_FLOAT);
break;
case DECIMAL:
DecimalType decimalType = (DecimalType) column.getDataType();
long precision = decimalType.getPrecision();
int scale = decimalType.getScale();
if (precision <= 0) {
precision = DEFAULT_PRECISION;
scale = DEFAULT_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is precision less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (precision > MAX_PRECISION) {
scale = (int) Math.max(0, scale - (precision - MAX_PRECISION));
precision = MAX_PRECISION;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum precision of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_PRECISION,
precision,
scale);
}
if (scale < 0) {
scale = 0;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which is scale less than 0, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
precision,
scale);
} else if (scale > MAX_SCALE) {
scale = MAX_SCALE;
log.warn(
"The decimal column {} type decimal({},{}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to decimal({},{})",
column.getName(),
decimalType.getPrecision(),
decimalType.getScale(),
MAX_SCALE,
precision,
scale);
}
builder.columnType(String.format("%s(%s,%s)", SQLSERVER_DECIMAL, precision, scale));
builder.dataType(SQLSERVER_DECIMAL);
builder.precision(precision);
builder.scale(scale);
break;
case STRING:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(MAX_NVARCHAR);
builder.dataType(MAX_NVARCHAR);
} else if (column.getColumnLength() <= MAX_NVARCHAR_LENGTH) {
builder.columnType(
String.format("%s(%s)", SQLSERVER_NVARCHAR, column.getColumnLength()));
builder.dataType(SQLSERVER_NVARCHAR);
builder.length(column.getColumnLength());
} else {
builder.columnType(MAX_NVARCHAR);
builder.dataType(MAX_NVARCHAR);
builder.length(column.getColumnLength());
}
break;
case BYTES:
if (column.getColumnLength() == null || column.getColumnLength() <= 0) {
builder.columnType(MAX_VARBINARY);
builder.dataType(SQLSERVER_VARBINARY);
} else if (column.getColumnLength() <= MAX_BINARY_LENGTH) {
builder.columnType(
String.format("%s(%s)", SQLSERVER_VARBINARY, column.getColumnLength()));
builder.dataType(SQLSERVER_VARBINARY);
builder.length(column.getColumnLength());
} else {
builder.columnType(MAX_VARBINARY);
builder.dataType(SQLSERVER_VARBINARY);
builder.length(column.getColumnLength());
}
break;
case DATE:
builder.columnType(SQLSERVER_DATE);
builder.dataType(SQLSERVER_DATE);
break;
case TIME:
if (column.getScale() != null && column.getScale() > 0) {
int timeScale = column.getScale();
if (timeScale > MAX_TIME_SCALE) {
timeScale = MAX_TIME_SCALE;
log.warn(
"The time column {} type time({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to time({})",
column.getName(),
column.getScale(),
MAX_SCALE,
timeScale);
}
builder.columnType(String.format("%s(%s)", SQLSERVER_TIME, timeScale));
builder.scale(timeScale);
} else {
builder.columnType(SQLSERVER_TIME);
}
builder.dataType(SQLSERVER_TIME);
break;
case TIMESTAMP:
if (column.getScale() != null && column.getScale() > 0) {
int timestampScale = column.getScale();
if (timestampScale > MAX_TIMESTAMP_SCALE) {
timestampScale = MAX_TIMESTAMP_SCALE;
log.warn(
"The timestamp column {} type timestamp({}) is out of range, "
+ "which exceeds the maximum scale of {}, "
+ "it will be converted to timestamp({})",
column.getName(),
column.getScale(),
MAX_TIMESTAMP_SCALE,
timestampScale);
}
builder.columnType(
String.format("%s(%s)", SQLSERVER_DATETIME2, timestampScale));
builder.scale(timestampScale);
} else {
builder.columnType(SQLSERVER_DATETIME2);
}
builder.dataType(SQLSERVER_DATETIME2);
break;
default:
throw CommonError.convertToConnectorTypeError(
DatabaseIdentifier.SQLSERVER,
column.getDataType().getSqlType().name(),
column.getName());
}
return builder.build();
} | @Test
public void testReconvertDouble() {
Column column =
PhysicalColumn.builder().name("test").dataType(BasicType.DOUBLE_TYPE).build();
BasicTypeDefine typeDefine = SqlServerTypeConverter.INSTANCE.reconvert(column);
Assertions.assertEquals(column.getName(), typeDefine.getName());
Assertions.assertEquals(SqlServerTypeConverter.SQLSERVER_FLOAT, typeDefine.getColumnType());
Assertions.assertEquals(SqlServerTypeConverter.SQLSERVER_FLOAT, typeDefine.getDataType());
} |
public static String getJPushSDKName(byte whichPushSDK) {
String name;
switch (whichPushSDK) {
case 0:
name = "Jpush";
break;
case 1:
name = "Xiaomi";
break;
case 2:
name = "HUAWEI";
break;
case 3:
name = "Meizu";
break;
case 4:
name = "OPPO";
break;
case 5:
name = "vivo";
break;
case 6:
name = "Asus";
break;
case 8:
name = "fcm";
break;
default:
name = "Jpush";
}
return name;
} | @Test
public void getJPushSDKName() {
Assert.assertEquals("Jpush", PushUtils.getJPushSDKName((byte) 0));
Assert.assertEquals("Xiaomi", PushUtils.getJPushSDKName((byte) 1));
Assert.assertEquals("HUAWEI", PushUtils.getJPushSDKName((byte) 2));
Assert.assertEquals("Meizu", PushUtils.getJPushSDKName((byte) 3));
Assert.assertEquals("OPPO", PushUtils.getJPushSDKName((byte) 4));
Assert.assertEquals("vivo", PushUtils.getJPushSDKName((byte) 5));
Assert.assertEquals("Asus", PushUtils.getJPushSDKName((byte) 6));
Assert.assertEquals("fcm", PushUtils.getJPushSDKName((byte) 8));
Assert.assertEquals("Jpush", PushUtils.getJPushSDKName((byte) 100));
} |
public static <N> ImmutableGraph<N> singletonUndirectedGraph(N node) {
final MutableGraph<N> graph = GraphBuilder.undirected().build();
graph.addNode(node);
return ImmutableGraph.copyOf(graph);
} | @Test
public void singletonUndirectedGraph() {
final ImmutableGraph<String> singletonGraph = Graphs.singletonUndirectedGraph("Test");
assertThat(singletonGraph.isDirected()).isFalse();
assertThat(singletonGraph.nodes()).containsExactly("Test");
assertThat(singletonGraph.edges()).isEmpty();
} |
public String view(TableIdentifier ident) {
return SLASH.join(
"v1",
prefix,
"namespaces",
RESTUtil.encodeNamespace(ident.namespace()),
"views",
RESTUtil.encodeString(ident.name()));
} | @Test
public void view() {
TableIdentifier ident = TableIdentifier.of("ns", "view-name");
assertThat(withPrefix.view(ident)).isEqualTo("v1/ws/catalog/namespaces/ns/views/view-name");
assertThat(withoutPrefix.view(ident)).isEqualTo("v1/namespaces/ns/views/view-name");
} |
@Override
public void report(SortedMap<MetricName, Gauge> gauges,
SortedMap<MetricName, Counter> counters,
SortedMap<MetricName, Histogram> histograms,
SortedMap<MetricName, Meter> meters,
SortedMap<MetricName, Timer> timers) {
final String dateTime = dateFormat.format(new Date(clock.getTime()));
printWithBanner(dateTime, '=');
output.println();
if (!gauges.isEmpty()) {
printWithBanner("-- Gauges", '-');
for (Map.Entry<MetricName, Gauge> entry : gauges.entrySet()) {
output.println(entry.getKey());
printGauge(entry);
}
output.println();
}
if (!counters.isEmpty()) {
printWithBanner("-- Counters", '-');
for (Map.Entry<MetricName, Counter> entry : counters.entrySet()) {
output.println(entry.getKey());
printCounter(entry);
}
output.println();
}
if (!histograms.isEmpty()) {
printWithBanner("-- Histograms", '-');
for (Map.Entry<MetricName, Histogram> entry : histograms.entrySet()) {
output.println(entry.getKey());
printHistogram(entry.getValue());
}
output.println();
}
if (!meters.isEmpty()) {
printWithBanner("-- Meters", '-');
for (Map.Entry<MetricName, Meter> entry : meters.entrySet()) {
output.println(entry.getKey());
printMeter(entry.getValue());
}
output.println();
}
if (!timers.isEmpty()) {
printWithBanner("-- Timers", '-');
for (Map.Entry<MetricName, Timer> entry : timers.entrySet()) {
output.println(entry.getKey());
printTimer(entry.getValue());
}
output.println();
}
output.println();
output.flush();
} | @Test
public void reportsHistogramValues() throws Exception {
final Histogram histogram = mock(Histogram.class);
when(histogram.getCount()).thenReturn(1L);
final Snapshot snapshot = mock(Snapshot.class);
when(snapshot.getMax()).thenReturn(2L);
when(snapshot.getMean()).thenReturn(3.0);
when(snapshot.getMin()).thenReturn(4L);
when(snapshot.getStdDev()).thenReturn(5.0);
when(snapshot.getMedian()).thenReturn(6.0);
when(snapshot.get75thPercentile()).thenReturn(7.0);
when(snapshot.get95thPercentile()).thenReturn(8.0);
when(snapshot.get98thPercentile()).thenReturn(9.0);
when(snapshot.get99thPercentile()).thenReturn(10.0);
when(snapshot.get999thPercentile()).thenReturn(11.0);
when(histogram.getSnapshot()).thenReturn(snapshot);
reporter.report(this.map(),
this.map(),
map("test.histogram", histogram),
this.map(),
this.map());
assertThat(consoleOutput())
.isEqualTo(lines(
"3/17/13, 6:04:36 PM ============================================================",
"",
"-- Histograms ------------------------------------------------------------------",
"test.histogram",
" count = 1",
" min = 4",
" max = 2",
" mean = 3.00",
" stddev = 5.00",
" median = 6.00",
" 75% <= 7.00",
" 95% <= 8.00",
" 98% <= 9.00",
" 99% <= 10.00",
" 99.9% <= 11.00",
"",
""
));
} |
@Override
public String getMethod() {
return PATH;
} | @Test
public void testAnswerWebAppQueryWithEmptyId() {
AnswerWebAppQuery answerWebAppQuery = AnswerWebAppQuery
.builder()
.webAppQueryId("")
.queryResult(InlineQueryResultArticle
.builder()
.id("MyId")
.title("Text")
.inputMessageContent(InputTextMessageContent
.builder()
.messageText("My own text")
.build())
.build())
.build();
assertEquals("answerWebAppQuery", answerWebAppQuery.getMethod());
Throwable thrown = assertThrows(TelegramApiValidationException.class, answerWebAppQuery::validate);
assertEquals("WebAppQueryId can't be empty", thrown.getMessage());
} |
@Override
protected List<SegmentConversionResult> convert(PinotTaskConfig pinotTaskConfig, List<File> segmentDirs,
File workingDir)
throws Exception {
int numInputSegments = segmentDirs.size();
_eventObserver.notifyProgress(pinotTaskConfig, "Converting segments: " + numInputSegments);
String taskType = pinotTaskConfig.getTaskType();
Map<String, String> configs = pinotTaskConfig.getConfigs();
LOGGER.info("Starting task: {} with configs: {}", taskType, configs);
long startMillis = System.currentTimeMillis();
String realtimeTableName = configs.get(MinionConstants.TABLE_NAME_KEY);
String rawTableName = TableNameBuilder.extractRawTableName(realtimeTableName);
String offlineTableName = TableNameBuilder.OFFLINE.tableNameWithType(rawTableName);
TableConfig tableConfig = getTableConfig(offlineTableName);
Schema schema = getSchema(offlineTableName);
SegmentProcessorConfig.Builder segmentProcessorConfigBuilder =
new SegmentProcessorConfig.Builder().setTableConfig(tableConfig).setSchema(schema);
// Time handler config
segmentProcessorConfigBuilder
.setTimeHandlerConfig(MergeTaskUtils.getTimeHandlerConfig(tableConfig, schema, configs));
// Partitioner config
segmentProcessorConfigBuilder
.setPartitionerConfigs(MergeTaskUtils.getPartitionerConfigs(tableConfig, schema, configs));
// Merge type
MergeType mergeType = MergeTaskUtils.getMergeType(configs);
// Handle legacy key
if (mergeType == null) {
String legacyMergeTypeStr = configs.get(RealtimeToOfflineSegmentsTask.COLLECTOR_TYPE_KEY);
if (legacyMergeTypeStr != null) {
mergeType = MergeType.valueOf(legacyMergeTypeStr.toUpperCase());
}
}
segmentProcessorConfigBuilder.setMergeType(mergeType);
// Aggregation types
segmentProcessorConfigBuilder.setAggregationTypes(MergeTaskUtils.getAggregationTypes(configs));
// Segment config
segmentProcessorConfigBuilder.setSegmentConfig(MergeTaskUtils.getSegmentConfig(configs));
// Progress observer
segmentProcessorConfigBuilder.setProgressObserver(p -> _eventObserver.notifyProgress(_pinotTaskConfig, p));
SegmentProcessorConfig segmentProcessorConfig = segmentProcessorConfigBuilder.build();
List<RecordReader> recordReaders = new ArrayList<>(numInputSegments);
int count = 1;
for (File segmentDir : segmentDirs) {
_eventObserver.notifyProgress(_pinotTaskConfig,
String.format("Creating RecordReader for: %s (%d out of %d)", segmentDir, count++, numInputSegments));
PinotSegmentRecordReader recordReader = new PinotSegmentRecordReader();
// NOTE: Do not fill null field with default value to be consistent with other record readers
recordReader.init(segmentDir, null, null, true);
recordReaders.add(recordReader);
}
List<File> outputSegmentDirs;
try {
_eventObserver.notifyProgress(_pinotTaskConfig, "Generating segments");
outputSegmentDirs = new SegmentProcessorFramework(recordReaders, segmentProcessorConfig, workingDir).process();
} finally {
for (RecordReader recordReader : recordReaders) {
recordReader.close();
}
}
long endMillis = System.currentTimeMillis();
LOGGER.info("Finished task: {} with configs: {}. Total time: {}ms", taskType, configs, (endMillis - startMillis));
List<SegmentConversionResult> results = new ArrayList<>();
for (File outputSegmentDir : outputSegmentDirs) {
String outputSegmentName = outputSegmentDir.getName();
results.add(new SegmentConversionResult.Builder().setFile(outputSegmentDir).setSegmentName(outputSegmentName)
.setTableNameWithType(offlineTableName).build());
}
return results;
} | @Test
public void testConcat()
throws Exception {
FileUtils.deleteQuietly(WORKING_DIR);
RealtimeToOfflineSegmentsTaskExecutor realtimeToOfflineSegmentsTaskExecutor =
new RealtimeToOfflineSegmentsTaskExecutor(null, null);
realtimeToOfflineSegmentsTaskExecutor.setMinionEventObserver(new MinionProgressObserver());
Map<String, String> configs = new HashMap<>();
configs.put(MinionConstants.TABLE_NAME_KEY, "testTable_OFFLINE");
configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_START_MS_KEY, "1600473600000");
configs.put(MinionConstants.RealtimeToOfflineSegmentsTask.WINDOW_END_MS_KEY, "1600560000000");
PinotTaskConfig pinotTaskConfig =
new PinotTaskConfig(MinionConstants.RealtimeToOfflineSegmentsTask.TASK_TYPE, configs);
List<SegmentConversionResult> conversionResults =
realtimeToOfflineSegmentsTaskExecutor.convert(pinotTaskConfig, _segmentIndexDirList, WORKING_DIR);
assertEquals(conversionResults.size(), 1);
File resultingSegment = conversionResults.get(0).getFile();
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(resultingSegment);
assertEquals(segmentMetadata.getTotalDocs(), 30);
ColumnMetadata columnMetadataForT = segmentMetadata.getColumnMetadataFor(T);
assertEquals(columnMetadataForT.getCardinality(), 3);
assertTrue((long) columnMetadataForT.getMinValue() >= 1600473600000L);
assertTrue((long) columnMetadataForT.getMaxValue() < 1600560000000L);
} |
public static String toSanitizedString(Expression expr) {
return ExpressionVisitors.visit(expr, new StringSanitizer());
} | @Test
public void testSanitizeStringFallback() {
Pattern filterPattern = Pattern.compile("^test = \\(hash-[0-9a-fA-F]{8}\\)$");
for (String filter :
Lists.newArrayList(
"2022-20-29",
"2022-04-29T40:49:51.123456",
"2022-04-29T23:70:51-07:00",
"2022-04-29T23:49:51.123456+100:00")) {
String sanitizedFilter = ExpressionUtil.toSanitizedString(Expressions.equal("test", filter));
assertThat(filterPattern.matcher(sanitizedFilter)).matches();
}
} |
public static Optional<ConnectorPageSource> createHivePageSource(
Set<HiveRecordCursorProvider> cursorProviders,
Set<HiveBatchPageSourceFactory> pageSourceFactories,
Configuration configuration,
ConnectorSession session,
HiveFileSplit fileSplit,
OptionalInt tableBucketNumber,
Storage storage,
TupleDomain<HiveColumnHandle> effectivePredicate,
List<HiveColumnHandle> hiveColumns,
Map<String, HiveColumnHandle> predicateColumns,
List<HivePartitionKey> partitionKeys,
DateTimeZone hiveStorageTimeZone,
TypeManager typeManager,
SchemaTableName tableName,
List<HiveColumnHandle> partitionKeyColumnHandles,
List<Column> tableDataColumns,
Map<String, String> tableParameters,
int partitionDataColumnCount,
TableToPartitionMapping tableToPartitionMapping,
Optional<BucketConversion> bucketConversion,
boolean s3SelectPushdownEnabled,
HiveFileContext hiveFileContext,
RowExpression remainingPredicate,
boolean isPushdownFilterEnabled,
RowExpressionService rowExpressionService,
Optional<EncryptionInformation> encryptionInformation,
Optional<byte[]> rowIdPartitionComponent)
{
List<HiveColumnHandle> allColumns;
if (isPushdownFilterEnabled) {
Set<String> columnNames = hiveColumns.stream().map(HiveColumnHandle::getName).collect(toImmutableSet());
List<HiveColumnHandle> additionalColumns = predicateColumns.values().stream()
.filter(column -> !columnNames.contains(column.getName()))
.collect(toImmutableList());
allColumns = ImmutableList.<HiveColumnHandle>builder()
.addAll(hiveColumns)
.addAll(additionalColumns)
.build();
}
else {
allColumns = hiveColumns;
}
List<ColumnMapping> columnMappings = ColumnMapping.buildColumnMappings(
partitionKeys,
allColumns,
bucketConversion.map(BucketConversion::getBucketColumnHandles).orElse(ImmutableList.of()),
tableToPartitionMapping,
fileSplit,
tableBucketNumber);
Set<Integer> outputIndices = hiveColumns.stream()
.map(HiveColumnHandle::getHiveColumnIndex)
.collect(toImmutableSet());
// Finds the non-synthetic columns.
List<ColumnMapping> regularAndInterimColumnMappings = ColumnMapping.extractRegularAndInterimColumnMappings(columnMappings);
Optional<BucketAdaptation> bucketAdaptation = bucketConversion.map(conversion -> toBucketAdaptation(
conversion,
regularAndInterimColumnMappings,
tableBucketNumber,
ColumnMapping::getIndex,
isLegacyTimestampBucketing(session)));
if (isUseRecordPageSourceForCustomSplit(session) && shouldUseRecordReaderFromInputFormat(configuration, storage, fileSplit.getCustomSplitInfo())) {
return getPageSourceFromCursorProvider(
cursorProviders,
configuration,
session,
fileSplit,
storage,
effectivePredicate,
hiveStorageTimeZone,
typeManager,
tableName,
partitionKeyColumnHandles,
tableDataColumns,
tableParameters,
partitionDataColumnCount,
tableToPartitionMapping,
s3SelectPushdownEnabled,
remainingPredicate,
isPushdownFilterEnabled,
rowExpressionService,
allColumns,
columnMappings,
outputIndices,
regularAndInterimColumnMappings,
bucketAdaptation);
}
for (HiveBatchPageSourceFactory pageSourceFactory : pageSourceFactories) {
Optional<? extends ConnectorPageSource> pageSource = pageSourceFactory.createPageSource(
configuration,
session,
fileSplit,
storage,
tableName,
tableParameters,
toColumnHandles(regularAndInterimColumnMappings, true),
effectivePredicate,
hiveStorageTimeZone,
hiveFileContext,
encryptionInformation,
rowIdPartitionComponent);
if (pageSource.isPresent()) {
HivePageSource hivePageSource = new HivePageSource(
columnMappings,
bucketAdaptation,
hiveStorageTimeZone,
typeManager,
pageSource.get(),
fileSplit.getPath(),
rowIdPartitionComponent);
if (isPushdownFilterEnabled) {
return Optional.of(new FilteringPageSource(
columnMappings,
effectivePredicate,
remainingPredicate,
typeManager,
rowExpressionService,
session,
outputIndices,
hivePageSource));
}
return Optional.of(hivePageSource);
}
}
return getPageSourceFromCursorProvider(
cursorProviders,
configuration,
session,
fileSplit,
storage,
effectivePredicate,
hiveStorageTimeZone,
typeManager,
tableName,
partitionKeyColumnHandles,
tableDataColumns,
tableParameters,
partitionDataColumnCount,
tableToPartitionMapping,
s3SelectPushdownEnabled,
remainingPredicate,
isPushdownFilterEnabled,
rowExpressionService,
allColumns,
columnMappings,
outputIndices,
regularAndInterimColumnMappings,
bucketAdaptation);
} | @Test
public void testNotUseRecordReaderWithInputFormatAnnotationWithoutCustomSplit()
{
StorageFormat storageFormat = StorageFormat.create(ParquetHiveSerDe.class.getName(), HoodieParquetInputFormat.class.getName(), "");
Storage storage = new Storage(storageFormat, "test", Optional.empty(), true, ImmutableMap.of(), ImmutableMap.of());
HiveRecordCursorProvider recordCursorProvider = new MockHiveRecordCursorProvider();
HiveBatchPageSourceFactory hiveBatchPageSourceFactory = new MockHiveBatchPageSourceFactory();
HiveFileSplit fileSplit = new HiveFileSplit(
"/test/",
0,
100,
200,
Instant.now().toEpochMilli(),
Optional.empty(),
ImmutableMap.of(),
0);
Optional<ConnectorPageSource> pageSource = HivePageSourceProvider.createHivePageSource(
ImmutableSet.of(recordCursorProvider),
ImmutableSet.of(hiveBatchPageSourceFactory),
new Configuration(),
new TestingConnectorSession(new HiveSessionProperties(
new HiveClientConfig().setUseRecordPageSourceForCustomSplit(true),
new OrcFileWriterConfig(),
new ParquetFileWriterConfig(),
new CacheConfig()).getSessionProperties()),
fileSplit,
OptionalInt.empty(),
storage,
TupleDomain.none(),
ImmutableList.of(),
ImmutableMap.of(),
ImmutableList.of(),
DateTimeZone.UTC,
new TestingTypeManager(),
new SchemaTableName("test", "test"),
ImmutableList.of(),
ImmutableList.of(),
ImmutableMap.of(),
0,
TableToPartitionMapping.empty(),
Optional.empty(),
false,
null,
null,
false,
null,
Optional.empty(),
Optional.empty());
assertTrue(pageSource.isPresent());
assertTrue(pageSource.get() instanceof HivePageSource);
} |
@Override
public void describe(SensorDescriptor descriptor) {
descriptor
.name("Xoo Highlighting Sensor")
.onlyOnLanguages(Xoo.KEY);
} | @Test
public void testDescriptor() {
sensor.describe(new DefaultSensorDescriptor());
} |
@Override
public double score(int[] truth, int[] prediction) {
return of(truth, prediction, strategy);
} | @Test
public void testMacro() {
System.out.println("Macro-Recall");
int[] truth = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5
};
int[] prediction = {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 2, 2, 3, 1, 3, 3, 3, 4, 5, 4, 4, 4, 4, 1, 5, 5
};
Recall instance = new Recall(Averaging.Macro);
double expResult = 0.8157;
double result = instance.score(truth, prediction);
assertEquals(expResult, result, 1E-4);
} |
@Override
public Column convert(BasicTypeDefine typeDefine) {
PhysicalColumn.PhysicalColumnBuilder builder =
PhysicalColumn.builder()
.name(typeDefine.getName())
.sourceType(typeDefine.getColumnType())
.nullable(typeDefine.isNullable())
.defaultValue(typeDefine.getDefaultValue())
.comment(typeDefine.getComment());
String xuguDataType = typeDefine.getDataType().toUpperCase();
switch (xuguDataType) {
case XUGU_BOOLEAN:
case XUGU_BOOL:
builder.dataType(BasicType.BOOLEAN_TYPE);
break;
case XUGU_TINYINT:
builder.dataType(BasicType.BYTE_TYPE);
break;
case XUGU_SMALLINT:
builder.dataType(BasicType.SHORT_TYPE);
break;
case XUGU_INT:
case XUGU_INTEGER:
builder.dataType(BasicType.INT_TYPE);
break;
case XUGU_BIGINT:
builder.dataType(BasicType.LONG_TYPE);
break;
case XUGU_FLOAT:
builder.dataType(BasicType.FLOAT_TYPE);
break;
case XUGU_DOUBLE:
builder.dataType(BasicType.DOUBLE_TYPE);
break;
case XUGU_NUMBER:
case XUGU_DECIMAL:
case XUGU_NUMERIC:
DecimalType decimalType;
if (typeDefine.getPrecision() != null && typeDefine.getPrecision() > 0) {
decimalType =
new DecimalType(
typeDefine.getPrecision().intValue(), typeDefine.getScale());
} else {
decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE);
}
builder.dataType(decimalType);
builder.columnLength(Long.valueOf(decimalType.getPrecision()));
builder.scale(decimalType.getScale());
break;
case XUGU_CHAR:
case XUGU_NCHAR:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L));
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case XUGU_VARCHAR:
case XUGU_VARCHAR2:
builder.dataType(BasicType.STRING_TYPE);
if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) {
builder.columnLength(TypeDefineUtils.charTo4ByteLength(MAX_VARCHAR_LENGTH));
} else {
builder.columnLength(typeDefine.getLength());
}
break;
case XUGU_CLOB:
builder.dataType(BasicType.STRING_TYPE);
builder.columnLength(BYTES_2GB - 1);
break;
case XUGU_JSON:
case XUGU_GUID:
builder.dataType(BasicType.STRING_TYPE);
break;
case XUGU_BINARY:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(MAX_BINARY_LENGTH);
break;
case XUGU_BLOB:
builder.dataType(PrimitiveByteArrayType.INSTANCE);
builder.columnLength(BYTES_2GB - 1);
break;
case XUGU_DATE:
builder.dataType(LocalTimeType.LOCAL_DATE_TYPE);
break;
case XUGU_TIME:
case XUGU_TIME_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_TIME_TYPE);
break;
case XUGU_DATETIME:
case XUGU_DATETIME_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
break;
case XUGU_TIMESTAMP:
case XUGU_TIMESTAMP_WITH_TIME_ZONE:
builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE);
if (typeDefine.getScale() == null) {
builder.scale(TIMESTAMP_DEFAULT_SCALE);
} else {
builder.scale(typeDefine.getScale());
}
break;
default:
throw CommonError.convertToSeaTunnelTypeError(
DatabaseIdentifier.XUGU, xuguDataType, typeDefine.getName());
}
return builder.build();
} | @Test
public void testConvertBinary() {
BasicTypeDefine<Object> typeDefine =
BasicTypeDefine.builder().name("test").columnType("blob").dataType("blob").build();
Column column = XuguTypeConverter.INSTANCE.convert(typeDefine);
Assertions.assertEquals(typeDefine.getName(), column.getName());
Assertions.assertEquals(PrimitiveByteArrayType.INSTANCE, column.getDataType());
Assertions.assertEquals(BYTES_2GB - 1, column.getColumnLength());
Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType());
} |
@VisibleForTesting
void dropPartitionsInternal(String catName, String dbName, String tblName,
List<String> partNames, boolean allowSql, boolean allowJdo)
throws MetaException, NoSuchObjectException {
if (CollectionUtils.isEmpty(partNames)) {
return;
}
new GetListHelper<Void>(catName, dbName, tblName, allowSql, allowJdo) {
@Override
protected List<Void> getSqlResult(GetHelper<List<Void>> ctx) throws MetaException {
directSql.dropPartitionsViaSqlFilter(catName, dbName, tblName, partNames);
return Collections.emptyList();
}
@Override
protected List<Void> getJdoResult(GetHelper<List<Void>> ctx) throws MetaException {
dropPartitionsViaJdo(catName, dbName, tblName, partNames);
return Collections.emptyList();
}
}.run(false);
} | @Test
public void testDirectSQLDropPartitionsCleanup() throws Exception {
createPartitionedTable(true, true);
// Check, that every table in the expected state before the drop
checkBackendTableSize("PARTITIONS", 3);
checkBackendTableSize("PART_PRIVS", 3);
checkBackendTableSize("PART_COL_PRIVS", 3);
checkBackendTableSize("PART_COL_STATS", 3);
checkBackendTableSize("PARTITION_PARAMS", 3);
checkBackendTableSize("PARTITION_KEY_VALS", 3);
checkBackendTableSize("SD_PARAMS", 3);
checkBackendTableSize("BUCKETING_COLS", 3);
checkBackendTableSize("SKEWED_COL_NAMES", 3);
checkBackendTableSize("SDS", 4); // Table has an SDS
checkBackendTableSize("SORT_COLS", 3);
checkBackendTableSize("SERDE_PARAMS", 3);
checkBackendTableSize("SERDES", 4); // Table has a serde
// drop the partitions
try (AutoCloseable c = deadline()) {
objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), true, false);
}
// Check, if every data is dropped connected to the partitions
checkBackendTableSize("PARTITIONS", 0);
checkBackendTableSize("PART_PRIVS", 0);
checkBackendTableSize("PART_COL_PRIVS", 0);
checkBackendTableSize("PART_COL_STATS", 0);
checkBackendTableSize("PARTITION_PARAMS", 0);
checkBackendTableSize("PARTITION_KEY_VALS", 0);
checkBackendTableSize("SD_PARAMS", 0);
checkBackendTableSize("BUCKETING_COLS", 0);
checkBackendTableSize("SKEWED_COL_NAMES", 0);
checkBackendTableSize("SDS", 1); // Table has an SDS
checkBackendTableSize("SORT_COLS", 0);
checkBackendTableSize("SERDE_PARAMS", 0);
checkBackendTableSize("SERDES", 1); // Table has a serde
} |
public List<ChangeStreamRecord> toChangeStreamRecords(
PartitionMetadata partition,
ChangeStreamResultSet resultSet,
ChangeStreamResultSetMetadata resultSetMetadata) {
if (this.isPostgres()) {
// In PostgresQL, change stream records are returned as JsonB.
return Collections.singletonList(
toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata));
}
// In GoogleSQL, change stream records are returned as an array of structs.
return resultSet.getCurrentRowAsStruct().getStructList(0).stream()
.flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata))
.collect(Collectors.toList());
} | @Test
public void testMappingStructRowToHeartbeatRecord() {
final HeartbeatRecord heartbeatRecord =
new HeartbeatRecord(Timestamp.ofTimeSecondsAndNanos(10L, 20), null);
final Struct struct = recordsToStructWithStrings(heartbeatRecord);
ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class);
when(resultSet.getCurrentRowAsStruct()).thenReturn(struct);
assertEquals(
Collections.singletonList(heartbeatRecord),
mapper.toChangeStreamRecords(partition, resultSet, resultSetMetadata));
} |
@Override
public String getFieldDefinition( ValueMetaInterface v, String tk, String pk, boolean useAutoinc,
boolean addFieldName, boolean addCr ) {
String retval = "";
String fieldname = v.getName();
int length = v.getLength();
int precision = v.getPrecision();
if ( addFieldName ) {
retval += fieldname + " ";
}
int type = v.getType();
switch ( type ) {
case ValueMetaInterface.TYPE_TIMESTAMP:
case ValueMetaInterface.TYPE_DATE:
retval += "DATETIME NULL";
break;
case ValueMetaInterface.TYPE_BOOLEAN:
if ( supportsBooleanDataType() ) {
retval += "BOOLEAN";
} else {
retval += "CHAR(1)";
}
break;
case ValueMetaInterface.TYPE_NUMBER:
case ValueMetaInterface.TYPE_INTEGER:
case ValueMetaInterface.TYPE_BIGNUMBER:
if ( fieldname.equalsIgnoreCase( tk ) || // Technical key: auto increment field!
fieldname.equalsIgnoreCase( pk ) // Primary key
) {
if ( useAutoinc ) {
retval += "INTEGER IDENTITY NOT NULL";
} else {
retval += "INTEGER NOT NULL PRIMARY KEY";
}
} else {
if ( precision != 0 || ( precision == 0 && length > 9 ) ) {
if ( precision > 0 && length > 0 ) {
retval += "DECIMAL(" + length + ", " + precision + ") NULL";
} else {
retval += "DOUBLE PRECISION NULL";
}
} else {
// Precision == 0 && length<=9
if ( length < 3 ) {
retval += "TINYINT NULL";
} else if ( length < 5 ) {
retval += "SMALLINT NULL";
} else {
retval += "INTEGER NULL";
}
}
}
break;
case ValueMetaInterface.TYPE_STRING:
if ( length >= 2048 ) {
retval += "TEXT NULL";
} else {
retval += "VARCHAR";
if ( length > 0 ) {
retval += "(" + length + ")";
}
retval += " NULL";
}
break;
default:
retval += " UNKNOWN";
break;
}
if ( addCr ) {
retval += Const.CR;
}
return retval;
} | @Test
public void testGetFieldDefinition() {
assertEquals( "FOO DATETIME NULL",
nativeMeta.getFieldDefinition( new ValueMetaTimestamp( "FOO" ), "", "", false, true, false ) );
assertEquals( "DATETIME NULL",
nativeMeta.getFieldDefinition( new ValueMetaDate( "FOO" ), "", "", false, false, false ) );
assertEquals( "CHAR(1)",
nativeMeta.getFieldDefinition( new ValueMetaBoolean( "FOO" ), "", "", false, false, false ) );
assertEquals( "INTEGER NOT NULL PRIMARY KEY",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO" ), "FOO", "", false, false, false ) );
assertEquals( "INTEGER NOT NULL PRIMARY KEY",
nativeMeta.getFieldDefinition( new ValueMetaInteger( "FOO" ), "", "FOO", false, false, false ) );
assertEquals( "DOUBLE PRECISION NULL",
nativeMeta.getFieldDefinition( new ValueMetaNumber( "FOO" ), "", "", false, false, false ) );
assertEquals( "DECIMAL(11, 3) NULL",
nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 11, 3 ), "", "", false, false, false ) );
assertEquals( "TINYINT NULL",
nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 2, 0 ), "", "", false, false, false ) );
assertEquals( "SMALLINT NULL",
nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 3, 0 ), "", "", false, false, false ) );
assertEquals( "SMALLINT NULL",
nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 4, 0 ), "", "", false, false, false ) );
assertEquals( "INTEGER NULL",
nativeMeta.getFieldDefinition( new ValueMetaBigNumber( "FOO", 5, 0 ), "", "", false, false, false ) );
assertEquals( "VARCHAR(15) NULL",
nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", 15, 0 ), "", "", false, false, false ) );
assertEquals( "TEXT NULL",
nativeMeta.getFieldDefinition( new ValueMetaString( "FOO", 2050, 0 ), "", "", false, false, false ) );
assertEquals( " UNKNOWN",
nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, false ) );
assertEquals( " UNKNOWN",
nativeMeta.getFieldDefinition( new ValueMetaBinary( "FOO" ), "", "", false, false, false ) );
assertEquals( " UNKNOWN" + System.getProperty( "line.separator" ),
nativeMeta.getFieldDefinition( new ValueMetaInternetAddress( "FOO" ), "", "", false, false, true ) );
} |
@Override
public void handle(final RoutingContext routingContext) {
// We must set it to allow chunked encoding if we're using http1.1
if (routingContext.request().version() == HttpVersion.HTTP_1_1) {
routingContext.response().putHeader(TRANSFER_ENCODING, CHUNKED_ENCODING);
} else if (routingContext.request().version() == HttpVersion.HTTP_2) {
// Nothing required
} else {
routingContext.fail(BAD_REQUEST.code(),
new KsqlApiException("This endpoint is only available when using HTTP1.1 or HTTP2",
ERROR_CODE_BAD_REQUEST));
}
final CommonRequest request = getRequest(routingContext);
if (request == null) {
return;
}
final Optional<Boolean> internalRequest = ServerVerticle.isInternalRequest(routingContext);
final MetricsCallbackHolder metricsCallbackHolder = new MetricsCallbackHolder();
final long startTimeNanos = Time.SYSTEM.nanoseconds();
endpoints.createQueryPublisher(
request.sql, request.configOverrides, request.sessionProperties,
request.requestProperties,
context, server.getWorkerExecutor(),
DefaultApiSecurityContext.create(routingContext, server), metricsCallbackHolder,
internalRequest)
.thenAccept(publisher -> {
if (publisher instanceof BlockingPrintPublisher) {
handlePrintPublisher(
routingContext,
(BlockingPrintPublisher) publisher);
} else {
handleQueryPublisher(
routingContext,
(QueryPublisher) publisher,
metricsCallbackHolder,
startTimeNanos);
}
})
.exceptionally(t ->
ServerUtils.handleEndpointException(t, routingContext, "Failed to execute query"));
} | @Test
public void shouldSucceed_pushQuery() {
// Given:
when(publisher.isPullQuery()).thenReturn(false);
final QueryStreamArgs req = new QueryStreamArgs("select * from foo emit changes;",
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap());
givenRequest(req);
when(connectionQueryManager.createApiQuery(any(), any())).thenReturn(pushQueryHolder);
// When:
handler.handle(routingContext);
endHandler.getValue().handle(null);
// Then:
assertThat(subscriber.getValue(), notNullValue());
verify(pushQueryHolder).close();
} |
public static FactoryBuilder newFactoryBuilder() {
return new FactoryBuilder();
} | @Test void injectKindFormats_cantBeSame() {
assertThatThrownBy(() -> B3Propagation.newFactoryBuilder()
.injectFormats(Span.Kind.CLIENT, Format.MULTI, Format.MULTI))
.isInstanceOf(IllegalArgumentException.class);
} |
public Set<LongPair> items() {
Set<LongPair> items = new HashSet<>(this.size);
forEach((item1, item2) -> items.add(new LongPair(item1, item2)));
return items;
} | @Test
public void testItems() {
GrowablePriorityLongPairQueue queue = new GrowablePriorityLongPairQueue();
int n = 100;
int limit = 10;
for (int i = 0; i < n; i++) {
queue.add(i, i);
}
Set<LongPair> items = queue.items();
Set<LongPair> limitItems = queue.items(limit);
assertEquals(items.size(), n);
assertEquals(limitItems.size(), limit);
int totalRemovedItems = queue.removeIf((first, second) -> limitItems.contains((new LongPair(first, second))));
assertEquals(limitItems.size(), totalRemovedItems);
assertEquals(queue.size(), n - limit);
} |
public static RestSettingBuilder get(final String id) {
return get(eq(checkId(id)));
} | @Test
public void should_throw_exception_for_resource_name_with_space() {
assertThrows(IllegalArgumentException.class, () ->
server.resource("hello world", get().response("hello")));
} |
@Override
public void registerHints(@NonNull RuntimeHints hints, @Nullable ClassLoader classLoader) {
var chatTypes = Set.of(AbstractMessage.class, AssistantMessage.class, ToolResponseMessage.class, Message.class,
MessageType.class, UserMessage.class, SystemMessage.class, FunctionCallbackContext.class,
FunctionCallback.class, FunctionCallbackWrapper.class);
for (var c : chatTypes) {
hints.reflection().registerType(c);
}
Method getDescription = ReflectionUtils.findMethod(FunctionCallback.class, "getDescription");
hints.reflection().registerMethod(getDescription, ExecutableMode.INVOKE);
Method getInputTypeSchema = ReflectionUtils.findMethod(FunctionCallback.class, "getInputTypeSchema");
hints.reflection().registerMethod(getInputTypeSchema, ExecutableMode.INVOKE);
Method getName = ReflectionUtils.findMethod(FunctionCallback.class, "getName");
hints.reflection().registerMethod(getName, ExecutableMode.INVOKE);
for (var r : Set.of("antlr4/org/springframework/ai/vectorstore/filter/antlr4/Filters.g4",
"embedding/embedding-model-dimensions.properties"))
hints.resources().registerResource(new ClassPathResource(r));
} | @Test
void core() {
var runtimeHints = new RuntimeHints();
var springAiCore = new SpringAiCoreRuntimeHints();
springAiCore.registerHints(runtimeHints, null);
assertThat(runtimeHints).matches(resource().forResource("embedding/embedding-model-dimensions.properties"));
assertThat(runtimeHints).matches(reflection().onMethod(FunctionCallback.class, "getDescription"));
assertThat(runtimeHints).matches(reflection().onMethod(FunctionCallback.class, "getInputTypeSchema"));
assertThat(runtimeHints).matches(reflection().onMethod(FunctionCallback.class, "getName"));
} |
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter,
MetricsRecorder metricsRecorder,
BufferSupplier bufferSupplier) {
if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) {
// check the magic value
if (!records.hasMatchingMagic(toMagic))
return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder);
else
// Do in-place validation, offset assignment and maybe set timestamp
return assignOffsetsNonCompressed(offsetCounter, metricsRecorder);
} else
return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier);
} | @Test
public void testRelativeOffsetAssignmentNonCompressedV1() {
long now = System.currentTimeMillis();
MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V1, now, Compression.NONE);
long offset = 1234567;
checkOffsets(records, 0);
MemoryRecords messageWithOffset = new LogValidator(
records,
new TopicPartition("topic", 0),
time,
CompressionType.NONE,
Compression.NONE,
false,
RecordBatch.MAGIC_VALUE_V1,
TimestampType.CREATE_TIME,
5000L,
5000L,
RecordBatch.NO_PARTITION_LEADER_EPOCH,
AppendOrigin.CLIENT,
MetadataVersion.latestTesting()
).validateMessagesAndAssignOffsets(
PrimitiveRef.ofLong(offset),
metricsRecorder,
RequestLocal.withThreadConfinedCaching().bufferSupplier()
).validatedRecords;
checkOffsets(messageWithOffset, offset);
} |
public static List<Group> enumerateFrom(Group root) {
List<Group> leaves = new ArrayList<>();
visitNode(root, leaves);
return leaves;
} | @Test
void singleLeafIsEnumerated() throws Exception {
Group g = new Group(0, "donkeykong", dummyDistribution());
Group child = new Group(1, "mario");
g.addSubGroup(child);
List<Group> leaves = LeafGroups.enumerateFrom(g);
assertThat(leaves.size(), is(1));
assertThat(leaves.get(0).getName(), is("mario"));
} |
public static WebServiceClient getWebServiceClient() {
return instance;
} | @Test
void testGetWebServiceClient() throws Exception {
Configuration conf = new Configuration();
conf.set(YarnConfiguration.YARN_HTTP_POLICY_KEY, "HTTPS_ONLY");
WebServiceClient.initialize(conf);
WebServiceClient client = WebServiceClient.getWebServiceClient();
assertNotNull(client.getSSLFactory());
WebServiceClient.destroy();
} |
Future<Boolean> canRollController(int nodeId) {
LOGGER.debugCr(reconciliation, "Determining whether controller pod {} can be rolled", nodeId);
return describeMetadataQuorum().map(info -> {
boolean canRoll = isQuorumHealthyWithoutNode(nodeId, info);
if (!canRoll) {
LOGGER.debugCr(reconciliation, "Not restarting controller pod {}. Restart would affect the quorum health", nodeId);
}
return canRoll;
}).recover(error -> {
LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart controller pod {}", nodeId, error);
return Future.failedFuture(error);
});
} | @Test
public void canRollActiveControllerOddSizedCluster(VertxTestContext context) {
Map<Integer, OptionalLong> controllers = new HashMap<>();
controllers.put(1, OptionalLong.of(10000L));
controllers.put(2, OptionalLong.of(9500L));
controllers.put(3, OptionalLong.of(9700L));
Admin admin = setUpMocks(1, controllers);
KafkaQuorumCheck quorumCheck = new KafkaQuorumCheck(Reconciliation.DUMMY_RECONCILIATION, admin, vertx, CONTROLLER_QUORUM_FETCH_TIMEOUT_MS);
quorumCheck.canRollController(1).onComplete(context.succeeding(result -> {
context.verify(() -> assertTrue(result));
context.completeNow();
}));
} |
public SqlType getExpressionSqlType(final Expression expression) {
return getExpressionSqlType(expression, Collections.emptyMap());
} | @Test
public void shouldProcessTimeLiteral() {
assertThat(expressionTypeManager.getExpressionSqlType(new TimeLiteral(new Time(1000))), is(SqlTypes.TIME));
} |
@Override
public void setParallelism(int parallelism) {
checkParallelism(parallelism);
Preconditions.checkArgument(
parallelism <= maxParallelism,
"Vertex's parallelism should be smaller than or equal to vertex's max parallelism.");
Preconditions.checkState(
this.parallelism == ExecutionConfig.PARALLELISM_DEFAULT,
"Vertex's parallelism can be set only if the vertex's parallelism was not decided yet.");
this.parallelism = parallelism;
} | @Test
void testSetParallelism() {
DefaultVertexParallelismInfo info =
new DefaultVertexParallelismInfo(
ExecutionConfig.PARALLELISM_DEFAULT, 10, ALWAYS_VALID);
// test set negative value
assertThatThrownBy(() -> info.setParallelism(-1))
.withFailMessage("parallelism is not in valid bounds")
.isInstanceOf(IllegalArgumentException.class);
// test parallelism larger than max parallelism
assertThatThrownBy(() -> info.setParallelism(11))
.withFailMessage(
"Vertex's parallelism should be smaller than or equal to vertex's max parallelism.")
.isInstanceOf(IllegalArgumentException.class);
// set valid value.
info.setParallelism(5);
// test set parallelism for vertex whose parallelism was decided.
assertThatThrownBy(() -> info.setParallelism(5))
.withFailMessage(
"Vertex's parallelism can be set only if the vertex's parallelism was not decided yet.")
.isInstanceOf(IllegalStateException.class);
} |
@VisibleForTesting
ImmutableList<EventWithContext> eventsFromAggregationResult(EventFactory eventFactory, AggregationEventProcessorParameters parameters, AggregationResult result)
throws EventProcessorException {
final ImmutableList.Builder<EventWithContext> eventsWithContext = ImmutableList.builder();
final Set<String> sourceStreams = eventStreamService.buildEventSourceStreams(getStreams(parameters),
result.sourceStreams());
for (final AggregationKeyResult keyResult : result.keyResults()) {
if (!satisfiesConditions(keyResult)) {
LOG.debug("Skipping result <{}> because the conditions <{}> don't match", keyResult, config.conditions());
continue;
}
final String keyString = String.join("|", keyResult.key());
final String eventMessage = createEventMessageString(keyString, keyResult);
// Extract event time and range from the key result or use query time range as fallback.
// These can be different, e.g. during catch up processing.
final DateTime eventTime = keyResult.timestamp().orElse(result.effectiveTimerange().to());
final Event event = eventFactory.createEvent(eventDefinition, eventTime, eventMessage);
// The keyResult timestamp is set to the end of the range
event.setTimerangeStart(keyResult.timestamp().map(t -> t.minus(config.searchWithinMs())).orElse(parameters.timerange().getFrom()));
event.setTimerangeEnd(keyResult.timestamp().orElse(parameters.timerange().getTo()));
event.setReplayInfo(EventReplayInfo.builder()
.timerangeStart(event.getTimerangeStart())
.timerangeEnd(event.getTimerangeEnd())
.query(config.query())
.streams(sourceStreams)
.filters(config.filters())
.build());
sourceStreams.forEach(event::addSourceStream);
final Map<String, Object> fields = new HashMap<>();
// Each group value will be a separate field in the message to make it usable as event fields.
//
// Example result:
// groupBy=["application_name", "username"]
// result-key=["sshd", "jane"]
//
// Message fields:
// application_name=sshd
// username=jane
for (int i = 0; i < config.groupBy().size(); i++) {
try {
fields.put(config.groupBy().get(i), keyResult.key().get(i));
} catch (IndexOutOfBoundsException e) {
throw new EventProcessorException(
"Couldn't create events for: " + eventDefinition.title() + " (possibly due to non-existing grouping fields)",
false, eventDefinition.id(), eventDefinition, e);
}
}
// Group By fields need to be saved on the event so they are available to the subsequent notification events
event.setGroupByFields(fields.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toString())));
// The field name for the series value is composed of the series function and field. We don't take the
// series ID into account because it would be very hard to use for the user. That means a series with
// the same function and field but different ID would overwrite a previous one.
// This shouldn't be a problem though, because the same function and field will always compute the same
// value.
//
// Examples:
// aggregation_value_count_source=42
// aggregation_value_card_anonid=23
for (AggregationSeriesValue seriesValue : keyResult.seriesValues()) {
final String function = seriesValue.series().type().toLowerCase(Locale.ROOT);
final Optional<String> field = fieldFromSeries(seriesValue.series());
final String fieldName = field.map(f -> String.format(Locale.ROOT, "aggregation_value_%s_%s", function, f))
.orElseGet(() -> String.format(Locale.ROOT, "aggregation_value_%s", function));
fields.put(fieldName, seriesValue.value());
}
// This is the concatenated key value
fields.put("aggregation_key", keyString);
// TODO: Can we find a useful source value?
final Message message = messageFactory.createMessage(eventMessage, "", result.effectiveTimerange().to());
message.addFields(fields);
// Ask any event query modifier for its state and collect it into the event modifier state
final Map<String, Object> eventModifierState = eventQueryModifiers.stream()
.flatMap(modifier -> modifier.eventModifierData(result.additionalResults()).entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
LOG.debug("Creating event {}/{} - {} {} ({})", eventDefinition.title(), eventDefinition.id(), keyResult.key(), seriesString(keyResult), fields);
eventsWithContext.add(EventWithContext.builder()
.event(event)
.messageContext(message)
.eventModifierState(eventModifierState)
.build());
}
return eventsWithContext.build();
} | @Test
public void testEventsFromAggregationResultWithEmptyResultAndNoConfiguredStreamsUsesAllStreamsAsSourceStreams() throws EventProcessorException {
final DateTime now = DateTime.now(DateTimeZone.UTC);
final AbsoluteRange timerange = AbsoluteRange.create(now.minusHours(1), now.minusHours(1).plusMillis(SEARCH_WINDOW_MS));
// We expect to get the end of the aggregation timerange as event time
final TestEvent event1 = new TestEvent(timerange.to());
final TestEvent event2 = new TestEvent(timerange.to());
when(eventFactory.createEvent(any(EventDefinition.class), any(DateTime.class), anyString()))
.thenReturn(event1) // first invocation return value
.thenReturn(event2); // second invocation return value
final EventDefinitionDto eventDefinitionDto = buildEventDefinitionDto(ImmutableSet.of(), ImmutableList.of(), null, emptyList());
final AggregationEventProcessorParameters parameters = AggregationEventProcessorParameters.builder()
.timerange(timerange)
.build();
final AggregationEventProcessor eventProcessor = new AggregationEventProcessor(eventDefinitionDto, searchFactory, eventProcessorDependencyCheck, stateService, moreSearch,
eventStreamService, messages, notificationService, permittedStreams, Set.of(), messageFactory);
final AggregationResult result = buildAggregationResult(timerange, timerange.to(), ImmutableList.of("one", "two"));
final ImmutableList<EventWithContext> eventsWithContext = eventProcessor.eventsFromAggregationResult(eventFactory, parameters, result);
assertThat(eventsWithContext).hasSize(1);
assertThat(eventsWithContext.get(0)).satisfies(eventWithContext -> {
final Event event = eventWithContext.event();
assertThat(event.getId()).isEqualTo(event1.getId());
assertThat(event.getMessage()).isEqualTo(event1.getMessage());
assertThat(event.getEventTimestamp()).isEqualTo(timerange.to());
assertThat(event.getTimerangeStart()).isEqualTo(timerange.from());
assertThat(event.getTimerangeEnd()).isEqualTo(timerange.to());
// Must contain all existing streams but the default event streams!
assertThat(event.getSourceStreams()).containsOnly(
"stream-1",
"stream-2",
"stream-3",
StreamImpl.DEFAULT_STREAM_ID
);
final Message message = eventWithContext.messageContext().orElse(null);
assertThat(message).isNotNull();
assertThat(message.getField("group_field_one")).isEqualTo("one");
assertThat(message.getField("group_field_two")).isEqualTo("two");
assertThat(message.getField("aggregation_key")).isEqualTo("one|two");
assertThat(message.getField("aggregation_value_count")).isEqualTo(0.0d);
});
} |
private static GuardedByExpression bind(JCTree.JCExpression exp, BinderContext context) {
GuardedByExpression expr = BINDER.visit(exp, context);
checkGuardedBy(expr != null, String.valueOf(exp));
checkGuardedBy(expr.kind() != Kind.TYPE_LITERAL, "Raw type literal: %s", exp);
return expr;
} | @Test
public void explicitThisOuterClass() {
assertThat(
bind(
"Inner",
"this.lock",
forSourceLines(
"threadsafety/Test.java",
"package threadsafety;",
"class Outer {",
" Object lock;",
" class Inner {",
" int x;",
" }",
"}")))
.isEqualTo("(SELECT (SELECT (THIS) outer$threadsafety.Outer) lock)");
} |
public static CidrBlock fromString(String cidr) {
String[] cidrParts = cidr.split("/");
if (cidrParts.length != 2)
throw new IllegalArgumentException("Invalid CIDR block, expected format to be " +
"'<ip address>/<prefix size>', but was '" + cidr + "'");
InetAddress inetAddress = InetAddresses.forString(cidrParts[0]);
int prefixSize = Integer.parseInt(cidrParts[1]);
return new CidrBlock(inetAddress, prefixSize);
} | @Test
public void parse_from_string_test() {
assertEquals(new CidrBlock(InetAddresses.forString("10.0.0.1"), 12), CidrBlock.fromString("10.0.0.1/12"));
assertEquals(new CidrBlock(InetAddresses.forString("1234:5678::1"), 64), CidrBlock.fromString("1234:5678:0000::1/64"));
} |
public static Destination convertToJcsmpDestination(Solace.Destination destination) {
if (destination.getType().equals(Solace.DestinationType.TOPIC)) {
return topicFromName(checkNotNull(destination.getName()));
} else if (destination.getType().equals(Solace.DestinationType.QUEUE)) {
return queueFromName(checkNotNull(destination.getName()));
} else {
throw new IllegalArgumentException(
"SolaceIO.Write: Unknown destination type: " + destination.getType());
}
} | @Test
public void testTopicEncoding() {
MockSessionService mockClientService =
new MockSessionService(
index -> {
List<BytesXMLMessage> messages =
ImmutableList.of(
SolaceDataUtils.getBytesXmlMessage("payload_test0", "450"),
SolaceDataUtils.getBytesXmlMessage("payload_test1", "451"),
SolaceDataUtils.getBytesXmlMessage("payload_test2", "452"));
return getOrNull(index, messages);
},
3);
SessionServiceFactory fakeSessionServiceFactory =
new MockSessionServiceFactory(mockClientService);
// Run
PCollection<Solace.Record> events =
pipeline.apply(
"Read from Solace",
getDefaultRead().withSessionServiceFactory(fakeSessionServiceFactory));
// Run the pipeline
PCollection<Boolean> destAreTopics =
events.apply(
MapElements.into(TypeDescriptors.booleans())
.via(
r -> {
Destination dest = SolaceIO.convertToJcsmpDestination(r.getDestination());
return dest instanceof Topic;
}));
List<Boolean> expected = ImmutableList.of(true, true, true);
// Assert results
PAssert.that(destAreTopics).containsInAnyOrder(expected);
pipeline.run();
} |
@Override
public boolean process(HttpServletRequest request, HttpServletResponse response, FilterChain chain)
throws IOException, ServletException {
if (TokenReloadAction.tokenReloadEnabled()) {
String pathInfo = request.getPathInfo();
if (pathInfo != null && pathInfo.equals("/" + TokenReloadAction.URL_NAME + "/")) {
chain.doFilter(request, response);
return true;
}
}
return false;
} | @Test
public void crumbExclusionChecksRequestPath() throws Exception {
System.setProperty("casc.reload.token", "someSecretValue");
TokenReloadCrumbExclusion crumbExclusion = new TokenReloadCrumbExclusion();
assertFalse(crumbExclusion.process(new MockHttpServletRequest("/reload-configuration-as-code/2"), null, null));
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.