focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public AnalysisPhase getAnalysisPhase() {
return ANALYSIS_PHASE;
}
|
@Test
public void testGetAnalysisPhase() {
HintAnalyzer instance = new HintAnalyzer();
AnalysisPhase expResult = AnalysisPhase.POST_INFORMATION_COLLECTION2;
AnalysisPhase result = instance.getAnalysisPhase();
assertEquals(expResult, result);
}
|
public int getDeleteReservationFailedRetrieved() {
return numDeleteReservationFailedRetrieved.value();
}
|
@Test
public void testGetDeleteReservationRetrievedFailed() {
long totalBadBefore = metrics.getDeleteReservationFailedRetrieved();
badSubCluster.getDeleteReservationFailed();
Assert.assertEquals(totalBadBefore + 1,
metrics.getDeleteReservationFailedRetrieved());
}
|
@Override
public Collection<LocalDataQueryResultRow> getRows(final ShowDistVariableStatement sqlStatement, final ContextManager contextManager) {
ShardingSphereMetaData metaData = contextManager.getMetaDataContexts().getMetaData();
String variableName = sqlStatement.getName();
if (isConfigurationKey(variableName)) {
return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getConfigurationValue(metaData, variableName)));
}
if (isTemporaryConfigurationKey(variableName)) {
return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getTemporaryConfigurationValue(metaData, variableName)));
}
return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getConnectionSize(variableName)));
}
|
@Test
void assertShowCachedConnections() {
ShowDistVariableExecutor executor = new ShowDistVariableExecutor();
executor.setConnectionContext(new DistSQLConnectionContext(mock(QueryContext.class), 1,
mock(DatabaseType.class), mock(DatabaseConnectionManager.class), mock(ExecutorStatementManager.class)));
Collection<LocalDataQueryResultRow> actual = executor.getRows(new ShowDistVariableStatement("CACHED_CONNECTIONS"), contextManager);
assertThat(actual.size(), is(1));
LocalDataQueryResultRow row = actual.iterator().next();
assertThat(row.getCell(1), is("cached_connections"));
assertThat(row.getCell(2), is("1"));
}
|
@GET
@Path("/entity-uid/{uid}/")
@Produces(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8)
public TimelineEntity getEntity(
@Context HttpServletRequest req,
@Context HttpServletResponse res,
@PathParam("uid") String uId,
@QueryParam("confstoretrieve") String confsToRetrieve,
@QueryParam("metricstoretrieve") String metricsToRetrieve,
@QueryParam("fields") String fields,
@QueryParam("metricslimit") String metricsLimit,
@QueryParam("metricstimestart") String metricsTimeStart,
@QueryParam("metricstimeend") String metricsTimeEnd) {
String url = req.getRequestURI() +
(req.getQueryString() == null ? "" :
QUERY_STRING_SEP + req.getQueryString());
UserGroupInformation callerUGI =
TimelineReaderWebServicesUtils.getUser(req);
LOG.info("Received URL {} from user {}",
url, TimelineReaderWebServicesUtils.getUserName(callerUGI));
long startTime = Time.monotonicNow();
boolean succeeded = false;
init(res);
TimelineReaderManager timelineReaderManager = getTimelineReaderManager();
TimelineEntity entity = null;
try {
TimelineReaderContext context =
TimelineUIDConverter.GENERIC_ENTITY_UID.decodeUID(uId);
if (context == null) {
throw new BadRequestException("Incorrect UID " + uId);
}
entity = timelineReaderManager.getEntity(context,
TimelineReaderWebServicesUtils.createTimelineDataToRetrieve(
confsToRetrieve, metricsToRetrieve, fields, metricsLimit,
metricsTimeStart, metricsTimeEnd));
checkAccessForGenericEntity(entity, callerUGI);
succeeded = true;
} catch (Exception e) {
handleException(e, url, startTime, "Either metricslimit or metricstime"
+ " start/end");
} finally {
long latency = Time.monotonicNow() - startTime;
METRICS.addGetEntitiesLatency(latency, succeeded);
LOG.info("Processed URL {} (Took {} ms.)", url, latency);
}
if (entity == null) {
LOG.info("Processed URL {} but entity not found" + " (Took {} ms.)",
url, (Time.monotonicNow() - startTime));
throw new NotFoundException("Timeline entity with uid: " + uId +
"is not found");
}
return entity;
}
|
@Test
void testGetEntitiesBasedOnCreatedTime() throws Exception {
Client client = createClient();
try {
URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" +
"timeline/clusters/cluster1/apps/app1/entities/app?" +
"createdtimestart=1425016502030&createdtimeend=1425016502060");
ClientResponse resp = getResponse(client, uri);
Set<TimelineEntity> entities =
resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
assertEquals(1, entities.size());
assertTrue(entities.contains(newEntity("app", "id_4")),
"Entity with id_4 should have been present in response.");
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
"clusters/cluster1/apps/app1/entities/app?createdtimeend" +
"=1425016502010");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
assertEquals(3, entities.size());
assertFalse(entities.contains(newEntity("app", "id_4")),
"Entity with id_4 should not have been present in response.");
uri = URI.create("http://localhost:" + serverPort + "/ws/v2/timeline/" +
"clusters/cluster1/apps/app1/entities/app?createdtimestart=" +
"1425016502010");
resp = getResponse(client, uri);
entities = resp.getEntity(new GenericType<Set<TimelineEntity>>(){
});
assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
resp.getType().toString());
assertNotNull(entities);
assertEquals(1, entities.size());
assertTrue(entities.contains(newEntity("app", "id_4")),
"Entity with id_4 should have been present in response.");
} finally {
client.destroy();
}
}
|
@Override
public T add(K name, V value) {
throw new UnsupportedOperationException("read only");
}
|
@Test
public void testAddStringValues() {
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() {
HEADERS.add("name", "value1", "value2");
}
});
}
|
@Override
public boolean shouldCopy(Path path) {
return true;
}
|
@Test
public void testShouldCopy() {
Assert.assertTrue(new TrueCopyFilter().shouldCopy(new Path("fake")));
}
|
@Override
public void afterChannelInitialized(final Channel channel) {
if (eventBus == null) {
throw new IllegalStateException("Event bus must be set before channel customization can occur");
}
final ChannelCircuitBreakerHandler channelCircuitBreakerHandler = new ChannelCircuitBreakerHandler(clusterName,
circuitBreakerConfig, upstreamAddresses, eventBus, scheduler);
final String commandHandlerName = StreamSupport.stream(channel.pipeline().spliterator(), false)
.filter(entry -> entry.getValue() instanceof CommandHandler)
.map(Map.Entry::getKey)
.findFirst()
.orElseThrow();
channel.pipeline().addBefore(commandHandlerName, null, channelCircuitBreakerHandler);
}
|
@Test
void testAfterChannelInitialized() {
final LettuceShardCircuitBreaker lettuceShardCircuitBreaker = new LettuceShardCircuitBreaker("test",
new CircuitBreakerConfiguration().toCircuitBreakerConfig(), Schedulers.immediate());
lettuceShardCircuitBreaker.setEventBus(eventBus);
final Channel channel = new EmbeddedChannel(
new CommandHandler(ClientOptions.create(), ClientResources.create(), mock(Endpoint.class)));
lettuceShardCircuitBreaker.afterChannelInitialized(channel);
final AtomicBoolean foundCommandHandler = new AtomicBoolean(false);
final AtomicBoolean foundChannelCircuitBreakerHandler = new AtomicBoolean(false);
StreamSupport.stream(channel.pipeline().spliterator(), false)
.forEach(nameAndHandler -> {
if (nameAndHandler.getValue() instanceof CommandHandler) {
foundCommandHandler.set(true);
}
if (nameAndHandler.getValue() instanceof LettuceShardCircuitBreaker.ChannelCircuitBreakerHandler) {
foundChannelCircuitBreakerHandler.set(true);
}
if (foundCommandHandler.get()) {
assertTrue(foundChannelCircuitBreakerHandler.get(),
"circuit breaker handler should be before the command handler");
}
});
assertTrue(foundChannelCircuitBreakerHandler.get());
assertTrue(foundCommandHandler.get());
}
|
public RegistryBuilder appendParameter(String key, String value) {
this.parameters = appendParameter(parameters, key, value);
return getThis();
}
|
@Test
void appendParameter() {
RegistryBuilder builder = new RegistryBuilder();
builder.appendParameter("default.num", "one").appendParameter("num", "ONE");
Map<String, String> parameters = builder.build().getParameters();
Assertions.assertTrue(parameters.containsKey("default.num"));
Assertions.assertEquals("ONE", parameters.get("num"));
}
|
private MergeSortedPages() {}
|
@Test
public void testSimpleTwoStreams()
throws Exception
{
List<Type> types = ImmutableList.of(INTEGER);
MaterializedResult actual = mergeSortedPages(
types,
ImmutableList.of(0),
ImmutableList.of(ASC_NULLS_FIRST),
ImmutableList.of(
rowPagesBuilder(types)
.row(1)
.row(3)
.pageBreak()
.row(5)
.row(7)
.build(),
rowPagesBuilder(types)
.row(2)
.row(4)
.pageBreak()
.row(6)
.row(8)
.build()));
MaterializedResult expected = resultBuilder(TEST_SESSION, types)
.row(1)
.row(2)
.row(3)
.row(4)
.row(5)
.row(6)
.row(7)
.row(8)
.build();
assertEquals(actual, expected);
}
|
@Override
protected SchemaTransform from(Configuration configuration) {
return new JavaFilterTransform(configuration);
}
|
@Test
@Category(NeedsRunner.class)
public void testFilter() {
Schema inputSchema =
Schema.of(
Schema.Field.of("a", Schema.FieldType.STRING),
Schema.Field.of("b", Schema.FieldType.INT32),
Schema.Field.of("c", Schema.FieldType.DOUBLE));
PCollection<Row> input =
pipeline
.apply(
Create.of(
Row.withSchema(inputSchema).addValues("foo", 2, 0.5).build(),
Row.withSchema(inputSchema).addValues("bar", 4, 0.25).build()))
.setRowSchema(inputSchema);
PCollection<Row> renamed =
PCollectionRowTuple.of(JavaFilterTransformProvider.INPUT_ROWS_TAG, input)
.apply(
new JavaFilterTransformProvider()
.from(
JavaFilterTransformProvider.Configuration.builder()
.setKeep(
JavaRowUdf.Configuration.builder()
.setExpression("b + c > 3")
.build())
.build()))
.get(JavaFilterTransformProvider.OUTPUT_ROWS_TAG);
PAssert.that(renamed)
.containsInAnyOrder(
Row.withSchema(inputSchema)
.withFieldValue("a", "bar")
.withFieldValue("b", 4)
.withFieldValue("c", 0.25)
.build());
pipeline.run();
}
|
public boolean tryToMoveTo(State to) {
AtomicReference<State> lastFrom = new AtomicReference<>();
State newState = this.state.updateAndGet(from -> {
lastFrom.set(from);
if (TRANSITIONS.get(from).contains(to)) {
return to;
}
return from;
});
boolean updated = newState == to && lastFrom.get() != to;
LOG.trace("tryToMoveTo from {} to {} => {}", lastFrom.get(), to, updated);
return updated;
}
|
@Test
public void can_move_to_HARD_STOPPING_from_any_step_but_from_INIT_HARD_STOPPING_and_STOPPED() {
for (State state : values()) {
boolean tryToMoveTo = newLifeCycle(state).tryToMoveTo(HARD_STOPPING);
if (state == INIT || state == STOPPED || state == HARD_STOPPING) {
assertThat(tryToMoveTo).describedAs("from state " + state).isFalse();
} else {
assertThat(tryToMoveTo).describedAs("from state " + state).isTrue();
}
}
}
|
@Override
public void putTaskConfigs(final String connName, final List<Map<String, String>> configs, final Callback<Void> callback, InternalRequestSignature requestSignature) {
log.trace("Submitting put task configuration request {}", connName);
if (requestNotSignedProperly(requestSignature, callback)) {
return;
}
addRequest(
() -> {
if (!isLeader())
callback.onCompletion(new NotLeaderException("Only the leader may write task configurations.", leaderUrl()), null);
else if (!configState.contains(connName))
callback.onCompletion(new NotFoundException("Connector " + connName + " not found"), null);
else {
writeTaskConfigs(connName, configs);
callback.onCompletion(null, null);
}
return null;
},
forwardErrorAndTickThreadStages(callback)
);
}
|
@Test
public void testPutTaskConfigsInvalidSignature() {
when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2);
InternalRequestSignature signature = mock(InternalRequestSignature.class);
when(signature.keyAlgorithm()).thenReturn("HmacSHA256");
when(signature.isValid(any())).thenReturn(false);
SessionKey sessionKey = mock(SessionKey.class);
SecretKey secretKey = mock(SecretKey.class);
when(sessionKey.key()).thenReturn(secretKey);
when(sessionKey.creationTimestamp()).thenReturn(time.milliseconds());
// Read a new session key from the config topic
configUpdateListener.onSessionKeyUpdate(sessionKey);
Callback<Void> taskConfigCb = mock(Callback.class);
herder.putTaskConfigs(CONN1, TASK_CONFIGS, taskConfigCb, signature);
ArgumentCaptor<Throwable> errorCapture = ArgumentCaptor.forClass(Throwable.class);
verify(taskConfigCb).onCompletion(errorCapture.capture(), isNull());
assertInstanceOf(ConnectRestException.class, errorCapture.getValue());
assertEquals(FORBIDDEN.getStatusCode(), ((ConnectRestException) errorCapture.getValue()).statusCode());
verifyNoMoreInteractions(member, taskConfigCb);
}
|
public Document run() throws ParserConfigurationException, IOException, SAXException, TransformerException {
DocumentBuilder docBuilder = XML.getDocumentBuilder();
Document document = docBuilder.parse(new InputSource(xmlInput));
return execute(document);
}
|
@Test
public void testPreProcessing() throws Exception {
String expectedDev =
"""
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
<services xmlns:deploy="vespa" xmlns:preprocess="properties" version="1.0">
<admin version="2.0">
<adminserver hostalias="node0"/>
</admin>
<content id="foo" version="1.0">
<redundancy>1</redundancy>
<documents>
<document mode="index" type="music.sd"/>
</documents>
<nodes>
<node distribution-key="0" hostalias="node0"/>
</nodes>
</content>
<container id="stateless" version="1.0">
<search/>
<component bundle="foobundle" class="MyFoo" id="foo"/>
<component bundle="foobundle" class="TestBar" id="bar"/>
<nodes>
<node hostalias="node0" baseport="5000"/>
</nodes>
</container>
</services>""";
TestBase.assertDocument(expectedDev,
new XmlPreProcessor(appDir,
services,
InstanceName.defaultName(),
Environment.dev,
RegionName.defaultName(),
Cloud.defaultCloud().name(),
Tags.empty()).run());
// Difference from dev: node1
// Difference from dev: no TestBar
String expectedStaging =
"""
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
<services xmlns:deploy="vespa" xmlns:preprocess="properties" version="1.0">
<admin version="2.0">
<adminserver hostalias="node0"/>
</admin>
<content id="foo" version="1.0">
<redundancy>1</redundancy>
<documents>
<document mode="index" type="music.sd"/>
</documents>
<nodes>
<node distribution-key="0" hostalias="node0"/>
</nodes>
</content>
<container id="stateless" version="1.0">
<search/>
<component bundle="foobundle" class="MyFoo" id="foo"/>
<nodes>
<node hostalias="node0" baseport="5000"/>
</nodes>
</container>
</services>""";
TestBase.assertDocument(expectedStaging,
new XmlPreProcessor(appDir,
services,
InstanceName.defaultName(),
Environment.staging,
RegionName.defaultName(),
Cloud.defaultCloud().name(),
Tags.empty()).run());
String expectedPerfUsWest =
"""
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
<services xmlns:deploy="vespa" xmlns:preprocess="properties" version="1.0">
<admin version="2.0">
<adminserver hostalias="node0"/>
</admin>
<content id="foo" version="1.0">
<redundancy>1</redundancy>
<documents>
<document mode="index" type="music.sd"/>
</documents>
<nodes>
<node distribution-key="0" hostalias="node0"/>
</nodes>
</content>
<container id="stateless" version="1.0">
<search/>
<component bundle="foobundle" class="MyFoo" id="foo"/>
<nodes>
<node hostalias="node0" baseport="5000"/>
</nodes>
</container>
</services>""";
TestBase.assertDocument(expectedPerfUsWest,
new XmlPreProcessor(appDir,
services,
InstanceName.defaultName(),
Environment.perf,
RegionName.from("us-west"),
Cloud.defaultCloud().name(), Tags.empty()).run());
String expectedPerfUsEastAndCentral =
"""
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
<services xmlns:deploy="vespa" xmlns:preprocess="properties" version="1.0">
<admin version="2.0">
<adminserver hostalias="node0"/>
</admin>
<content id="foo" version="1.0">
<thread count="128"/>
<redundancy>1</redundancy>
<documents>
<document mode="index" type="music.sd"/>
</documents>
<nodes>
<node distribution-key="0" hostalias="node0"/>
</nodes>
</content>
<container id="stateless" version="1.0">
<search/>
<component bundle="foobundle" class="MyFoo" id="foo"/>
<nodes>
<node hostalias="node0" baseport="5000"/>
</nodes>
</container>
</services>""";
TestBase.assertDocument(expectedPerfUsEastAndCentral,
new XmlPreProcessor(appDir,
services,
InstanceName.defaultName(),
Environment.perf,
RegionName.from("us-east"),
Cloud.defaultCloud().name(), Tags.empty()).run());
TestBase.assertDocument(expectedPerfUsEastAndCentral,
new XmlPreProcessor(appDir,
services,
InstanceName.defaultName(),
Environment.perf,
RegionName.from("us-central"),
Cloud.defaultCloud().name(), Tags.empty()).run());
String expectedProdUsWest =
"""
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
<services xmlns:deploy="vespa" xmlns:preprocess="properties" version="1.0">
<admin version="2.0">
<adminserver hostalias="node0"/>
</admin>
<content id="foo" version="1.0">
<redundancy>1</redundancy>
<documents>
<document mode="index" type="music.sd"/>
</documents>
<nodes>
<node distribution-key="0" hostalias="node0"/>
<node distribution-key="1" hostalias="node1"/>
<node distribution-key="2" hostalias="node2"/>
</nodes>
</content>
<container id="stateless" version="1.0">
<search>
<chain id="common">
<searcher id="MySearcher1"/>
<searcher id="MySearcher2"/>
</chain>
</search>
<component bundle="foobundle" class="MyFoo" id="foo"/>
<component bundle="foobundle" class="ProdBar" id="bar"/>
<component bundle="foobundle" class="ProdBaz" id="baz"/>
<nodes>
<node hostalias="node0" baseport="5001"/>
</nodes>
</container>
</services>""";
TestBase.assertDocument(expectedProdUsWest,
new XmlPreProcessor(appDir,
services,
InstanceName.defaultName(),
Environment.prod,
RegionName.from("us-west"),
Cloud.defaultCloud().name(), Tags.empty()).run());
String expectedProdUsEastAndCentral =
"""
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
<services xmlns:deploy="vespa" xmlns:preprocess="properties" version="1.0">
<admin version="2.0">
<adminserver hostalias="node1"/>
</admin>
<content id="foo" version="1.0">
<thread count="128"/>
<redundancy>1</redundancy>
<documents>
<document mode="index" type="music.sd"/>
</documents>
<nodes>
<node distribution-key="0" hostalias="node0"/>
<node distribution-key="1" hostalias="node1"/>
</nodes>
</content>
<container id="stateless" version="1.0">
<search>
<chain id="common">
<searcher id="MySearcher1"/>
<searcher id="MySearcher2"/>
</chain>
</search>
<component bundle="foobundle" class="MyFoo" id="foo"/>
<component bundle="foobundle" class="ProdBar" id="bar"/>
<component bundle="foobundle" class="ProdBaz" id="baz"/>
<nodes>
<node hostalias="node0" baseport="5002"/>
</nodes>
</container>
</services>""";
TestBase.assertDocument(expectedProdUsEastAndCentral,
new XmlPreProcessor(appDir,
services,
InstanceName.defaultName(),
Environment.prod,
RegionName.from("us-east"),
Cloud.defaultCloud().name(), Tags.empty()).run());
TestBase.assertDocument(expectedProdUsEastAndCentral,
new XmlPreProcessor(appDir,
services,
InstanceName.defaultName(),
Environment.prod,
RegionName.from("us-central"),
Cloud.defaultCloud().name(), Tags.empty()).run());
}
|
static DeduplicationResult ensureSingleProducer(
QueryablePipeline pipeline,
Collection<ExecutableStage> stages,
Collection<PipelineNode.PTransformNode> unfusedTransforms) {
RunnerApi.Components.Builder unzippedComponents = pipeline.getComponents().toBuilder();
Multimap<PipelineNode.PCollectionNode, StageOrTransform> pcollectionProducers =
getProducers(pipeline, stages, unfusedTransforms);
Multimap<StageOrTransform, PipelineNode.PCollectionNode> requiresNewOutput =
HashMultimap.create();
// Create a synthetic PCollection for each of these nodes. The transforms in the runner
// portion of the graph that creates them should be replaced in the result components. The
// ExecutableStage must also be rewritten to have updated outputs and transforms.
for (Map.Entry<PipelineNode.PCollectionNode, Collection<StageOrTransform>> collectionProducer :
pcollectionProducers.asMap().entrySet()) {
if (collectionProducer.getValue().size() > 1) {
for (StageOrTransform producer : collectionProducer.getValue()) {
requiresNewOutput.put(producer, collectionProducer.getKey());
}
}
}
Map<ExecutableStage, ExecutableStage> updatedStages = new LinkedHashMap<>();
Map<String, PipelineNode.PTransformNode> updatedTransforms = new LinkedHashMap<>();
Multimap<String, PipelineNode.PCollectionNode> originalToPartial = HashMultimap.create();
for (Map.Entry<StageOrTransform, Collection<PipelineNode.PCollectionNode>>
deduplicationTargets : requiresNewOutput.asMap().entrySet()) {
if (deduplicationTargets.getKey().getStage() != null) {
StageDeduplication deduplication =
deduplicatePCollections(
deduplicationTargets.getKey().getStage(),
deduplicationTargets.getValue(),
unzippedComponents::containsPcollections);
for (Entry<String, PipelineNode.PCollectionNode> originalToPartialReplacement :
deduplication.getOriginalToPartialPCollections().entrySet()) {
originalToPartial.put(
originalToPartialReplacement.getKey(), originalToPartialReplacement.getValue());
unzippedComponents.putPcollections(
originalToPartialReplacement.getValue().getId(),
originalToPartialReplacement.getValue().getPCollection());
}
updatedStages.put(
deduplicationTargets.getKey().getStage(), deduplication.getUpdatedStage());
} else if (deduplicationTargets.getKey().getTransform() != null) {
PTransformDeduplication deduplication =
deduplicatePCollections(
deduplicationTargets.getKey().getTransform(),
deduplicationTargets.getValue(),
unzippedComponents::containsPcollections);
for (Entry<String, PipelineNode.PCollectionNode> originalToPartialReplacement :
deduplication.getOriginalToPartialPCollections().entrySet()) {
originalToPartial.put(
originalToPartialReplacement.getKey(), originalToPartialReplacement.getValue());
unzippedComponents.putPcollections(
originalToPartialReplacement.getValue().getId(),
originalToPartialReplacement.getValue().getPCollection());
}
updatedTransforms.put(
deduplicationTargets.getKey().getTransform().getId(),
deduplication.getUpdatedTransform());
} else {
throw new IllegalStateException(
String.format(
"%s with no %s or %s",
StageOrTransform.class.getSimpleName(),
ExecutableStage.class.getSimpleName(),
PipelineNode.PTransformNode.class.getSimpleName()));
}
}
Set<PipelineNode.PTransformNode> introducedFlattens = new LinkedHashSet<>();
for (Map.Entry<String, Collection<PipelineNode.PCollectionNode>> partialFlattenTargets :
originalToPartial.asMap().entrySet()) {
String flattenId =
SyntheticComponents.uniqueId("unzipped_flatten", unzippedComponents::containsTransforms);
PTransform flattenPartialPCollections =
createFlattenOfPartials(
flattenId, partialFlattenTargets.getKey(), partialFlattenTargets.getValue());
unzippedComponents.putTransforms(flattenId, flattenPartialPCollections);
introducedFlattens.add(PipelineNode.pTransform(flattenId, flattenPartialPCollections));
}
Components components = unzippedComponents.build();
return DeduplicationResult.of(components, introducedFlattens, updatedStages, updatedTransforms);
}
|
@Test
public void duplicateOverStagesAndTransforms() {
/* When both a stage and a runner-executed transform produce a PCollection, all should be
* replaced with synthetic flattens.
* original graph:
* --> one -> .out \
* red -> .out | -> shared -> .out
* --------------> /
*
* fused graph:
* --> [one -> .out -> shared ->] .out
* red -> .out |
* ------------------> shared --> .out
*
* deduplicated graph:
* --> [one -> .out -> shared ->] .out:0 \
* red -> .out | -> shared -> .out
* -----------------> shared:0 -> .out:1 /
*/
PCollection redOut = PCollection.newBuilder().setUniqueName("red.out").build();
PTransform red =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putOutputs("out", redOut.getUniqueName())
.build();
PCollection oneOut = PCollection.newBuilder().setUniqueName("one.out").build();
PTransform one =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putInputs("in", redOut.getUniqueName())
.putOutputs("out", oneOut.getUniqueName())
.build();
PCollection sharedOut = PCollection.newBuilder().setUniqueName("shared.out").build();
PTransform shared =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putInputs("one", oneOut.getUniqueName())
.putInputs("red", redOut.getUniqueName())
.putOutputs("shared", sharedOut.getUniqueName())
.build();
PCollection blueOut = PCollection.newBuilder().setUniqueName("blue.out").build();
PTransform blue =
PTransform.newBuilder()
.setSpec(
FunctionSpec.newBuilder()
.setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN)
.build())
.putInputs("in", sharedOut.getUniqueName())
.putOutputs("out", blueOut.getUniqueName())
.build();
RunnerApi.Components components =
Components.newBuilder()
.putTransforms("one", one)
.putPcollections(oneOut.getUniqueName(), oneOut)
.putTransforms("red", red)
.putPcollections(redOut.getUniqueName(), redOut)
.putTransforms("shared", shared)
.putPcollections(sharedOut.getUniqueName(), sharedOut)
.putTransforms("blue", blue)
.putPcollections(blueOut.getUniqueName(), blueOut)
.build();
PTransformNode sharedTransform = PipelineNode.pTransform("shared", shared);
ExecutableStage oneStage =
ImmutableExecutableStage.of(
components,
Environment.getDefaultInstance(),
PipelineNode.pCollection(redOut.getUniqueName(), redOut),
ImmutableList.of(),
ImmutableList.of(),
ImmutableList.of(),
ImmutableList.of(PipelineNode.pTransform("one", one), sharedTransform),
ImmutableList.of(PipelineNode.pCollection(sharedOut.getUniqueName(), sharedOut)),
DEFAULT_WIRE_CODER_SETTINGS);
PTransformNode redTransform = PipelineNode.pTransform("red", red);
PTransformNode blueTransform = PipelineNode.pTransform("blue", blue);
QueryablePipeline pipeline = QueryablePipeline.forPrimitivesIn(components);
OutputDeduplicator.DeduplicationResult result =
OutputDeduplicator.ensureSingleProducer(
pipeline,
ImmutableList.of(oneStage),
ImmutableList.of(redTransform, blueTransform, sharedTransform));
assertThat(result.getIntroducedTransforms(), hasSize(1));
PTransformNode introduced = getOnlyElement(result.getIntroducedTransforms());
assertThat(introduced.getTransform().getOutputsMap().size(), equalTo(1));
assertThat(
getOnlyElement(introduced.getTransform().getOutputsMap().values()),
equalTo(sharedOut.getUniqueName()));
assertThat(
result.getDeduplicatedComponents().getPcollectionsMap().keySet(),
hasItems(introduced.getTransform().getInputsMap().values().toArray(new String[0])));
assertThat(result.getDeduplicatedStages().keySet(), hasSize(1));
assertThat(result.getDeduplicatedTransforms().keySet(), containsInAnyOrder("shared"));
List<String> introducedOutputs = new ArrayList<>();
introducedOutputs.addAll(
result.getDeduplicatedTransforms().get("shared").getTransform().getOutputsMap().values());
introducedOutputs.addAll(
result.getDeduplicatedStages().get(oneStage).getOutputPCollections().stream()
.map(PCollectionNode::getId)
.collect(Collectors.toList()));
assertThat(
introduced.getTransform().getInputsMap().values(),
containsInAnyOrder(introducedOutputs.toArray(new String[0])));
assertThat(
result.getDeduplicatedComponents().getPcollectionsMap().keySet(),
hasItems(introducedOutputs.toArray(new String[0])));
assertThat(
result.getDeduplicatedComponents().getTransformsMap(),
hasEntry(introduced.getId(), introduced.getTransform()));
}
|
public static List<Endpoint> listenerListToEndPoints(
String input,
Map<ListenerName, SecurityProtocol> nameToSecurityProto
) {
return listenerListToEndPoints(input, n -> {
SecurityProtocol result = nameToSecurityProto.get(n);
if (result == null) {
throw new IllegalArgumentException("No security protocol defined for listener " + n.value());
}
return result;
});
}
|
@Test
public void testListenerListToEndPointsWithWildcard() {
assertEquals(Arrays.asList(
new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, null, 8080)),
SocketServerConfigs.listenerListToEndPoints("PLAINTEXT://:8080",
SocketServerConfigs.DEFAULT_NAME_TO_SECURITY_PROTO));
}
|
public static List<URI> parseXrdLinkReferencesFor(XmlPullParser parser, String relation) throws IOException, XmlPullParserException, URISyntaxException {
ParserUtils.forwardToStartElement(parser);
List<URI> uriList = new ArrayList<>();
int initialDepth = parser.getDepth();
loop: while (true) {
XmlPullParser.TagEvent tag = parser.nextTag();
switch (tag) {
case START_ELEMENT:
String name = parser.getName();
String namespace = parser.getNamespace();
String rel = parser.getAttributeValue("rel");
if (!namespace.equals(XRD_NAMESPACE) || !name.equals("Link") || !rel.equals(relation)) {
continue loop;
}
String endpointUri = parser.getAttributeValue("href");
URI uri = new URI(endpointUri);
uriList.add(uri);
break;
case END_ELEMENT:
if (parser.getDepth() == initialDepth) {
break loop;
}
break;
}
}
return uriList;
}
|
@Test
public void parseXrdLinkReferencesForWebsockets() throws XmppStringprepException, IOException, XmlPullParserException, URISyntaxException {
List<URI> endpoints = new ArrayList<>();
endpoints.add(new URI("wss://xmpp.igniterealtime.org:7483/ws/"));
endpoints.add(new URI("ws://xmpp.igniterealtime.org:7070/ws/"));
List<URI> expectedEndpoints = HttpLookupMethod.parseXrdLinkReferencesFor(PacketParserUtils.getParserFor(XRD_XML), LinkRelation.WEBSOCKET);
assertEquals(expectedEndpoints, endpoints);
}
|
@Override
protected void decode(final ChannelHandlerContext ctx, final ByteBuf in, final List<Object> out) {
while (in.readableBytes() >= 1 + MySQLBinlogEventHeader.MYSQL_BINLOG_EVENT_HEADER_LENGTH) {
in.markReaderIndex();
MySQLPacketPayload payload = new MySQLPacketPayload(in, ctx.channel().attr(CommonConstants.CHARSET_ATTRIBUTE_KEY).get());
checkPayload(payload);
MySQLBinlogEventHeader binlogEventHeader = new MySQLBinlogEventHeader(payload, binlogContext.getChecksumLength());
if (!checkEventIntegrity(in, binlogEventHeader)) {
return;
}
Optional<MySQLBaseBinlogEvent> binlogEvent = decodeEvent(binlogEventHeader, payload);
if (!binlogEvent.isPresent()) {
skipChecksum(binlogEventHeader.getEventType(), in);
return;
}
if (binlogEvent.get() instanceof PlaceholderBinlogEvent) {
out.add(binlogEvent.get());
skipChecksum(binlogEventHeader.getEventType(), in);
return;
}
if (decodeWithTX) {
processEventWithTX(binlogEvent.get(), out);
} else {
processEventIgnoreTX(binlogEvent.get(), out);
}
skipChecksum(binlogEventHeader.getEventType(), in);
}
}
|
@Test
void assertDecodeRotateEvent() {
ByteBuf byteBuf = Unpooled.buffer();
byteBuf.writeBytes(StringUtil.decodeHexDump("000000000004010000002c0000000000000020001a9100000000000062696e6c6f672e3030303032394af65c24"));
List<Object> decodedEvents = new LinkedList<>();
binlogEventPacketDecoder.decode(channelHandlerContext, byteBuf, decodedEvents);
assertTrue(decodedEvents.isEmpty());
assertThat(binlogContext.getFileName(), is("binlog.000029"));
}
|
@Override
public WindowStoreIterator<V> backwardFetch(final K key,
final Instant timeFrom,
final Instant timeTo) throws IllegalArgumentException {
Objects.requireNonNull(key, "key can't be null");
final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType);
for (final ReadOnlyWindowStore<K, V> windowStore : stores) {
try {
final WindowStoreIterator<V> result = windowStore.backwardFetch(key, timeFrom, timeTo);
if (!result.hasNext()) {
result.close();
} else {
return result;
}
} catch (final InvalidStateStoreException e) {
throw new InvalidStateStoreException(
"State store is not available anymore and may have been migrated to another instance; " +
"please re-discover its location from the state metadata.");
}
}
return KeyValueIterators.emptyWindowStoreIterator();
}
|
@Test
public void shouldBackwardFetchKeyRangeAcrossStores() {
final ReadOnlyWindowStoreStub<String, String> secondUnderlying = new ReadOnlyWindowStoreStub<>(WINDOW_SIZE);
stubProviderTwo.addStore(storeName, secondUnderlying);
underlyingWindowStore.put("a", "a", 0L);
secondUnderlying.put("b", "b", 10L);
final List<KeyValue<Windowed<String>, String>> results =
StreamsTestUtils.toList(windowStore.backwardFetch("a", "b", ofEpochMilli(0), ofEpochMilli(10)));
assertThat(results, equalTo(Arrays.asList(
KeyValue.pair(new Windowed<>("a", new TimeWindow(0, WINDOW_SIZE)), "a"),
KeyValue.pair(new Windowed<>("b", new TimeWindow(10, 10 + WINDOW_SIZE)), "b"))));
}
|
SyncableFileSystemView getPreferredView() {
return preferredView;
}
|
@Test
public void testGetPreferredView() {
assertEquals(primary, fsView.getPreferredView());
}
|
@VisibleForTesting
void initializeForeachArtifactRollup(
ForeachStepOverview foreachOverview,
ForeachStepOverview prevForeachOverview,
String foreachWorkflowId) {
Set<Long> iterationsToRunInNewRun =
foreachOverview.getIterationsToRunFromDetails(prevForeachOverview);
WorkflowRollupOverview aggregatedRollupsPrevRun =
getAggregatedRollupFromIterations(foreachWorkflowId, iterationsToRunInNewRun);
foreachOverview.initiateStepRollup(prevForeachOverview.getRollup(), aggregatedRollupsPrevRun);
}
|
@Test
public void testGetAggregatedRollupFromIterationsNull() {
doReturn(Collections.singletonList(new WorkflowRollupOverview()))
.when(workflowInstanceDao)
.getBatchForeachLatestRunRollupForIterations(anyString(), any());
ForeachStepOverview stepOverview = mock(ForeachStepOverview.class);
ForeachStepOverview prevStepOverview = new ForeachStepOverview();
doReturn(null).when(stepOverview).getIterationsToRunFromDetails(any());
foreachStepRuntime.initializeForeachArtifactRollup(
stepOverview, prevStepOverview, "myworkflowid");
assertNull(stepOverview.getRollup());
Mockito.verify(workflowInstanceDao, times(0))
.getBatchForeachLatestRunRollupForIterations(eq("myworkflowid"), any());
}
|
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) {
return api.send(request);
}
|
@Test
public void editMessageReplyMarkup() {
String text = "Update" + System.currentTimeMillis();
InlineKeyboardMarkup keyboard = new InlineKeyboardMarkup(new InlineKeyboardButton(text).url("https://google.com"));
InlineKeyboardMarkup gameKeyboard = new InlineKeyboardMarkup(new InlineKeyboardButton(text).callbackGame("test game"));
BaseResponse response = bot.execute(new EditMessageReplyMarkup(chatId, 8124).replyMarkup(keyboard));
assertTrue(response.isOk());
response = bot.execute(new EditMessageReplyMarkup(channelName, 511).replyMarkup(keyboard));
assertTrue(response.isOk());
response = bot.execute(new EditMessageReplyMarkup("AgAAAPrwAQCj_Q4D2s-51_8jsuU").replyMarkup(gameKeyboard));
if (!response.isOk()) {
assertEquals(400, response.errorCode());
assertEquals("Bad Request: MESSAGE_ID_INVALID", response.description());
}
}
|
@Override
public void execute(Exchange exchange) throws SmppException {
SubmitSm[] submitSms = createSubmitSm(exchange);
List<String> messageIDs = new ArrayList<>(submitSms.length);
String messageID = null;
for (int i = 0; i < submitSms.length; i++) {
SubmitSm submitSm = submitSms[i];
messageID = null;
if (log.isDebugEnabled()) {
log.debug("Sending short message {} for exchange id '{}'...", i, exchange.getExchangeId());
}
try {
SubmitSmResult result = session.submitShortMessage(
submitSm.getServiceType(),
TypeOfNumber.valueOf(submitSm.getSourceAddrTon()),
NumberingPlanIndicator.valueOf(submitSm.getSourceAddrNpi()),
submitSm.getSourceAddr(),
TypeOfNumber.valueOf(submitSm.getDestAddrTon()),
NumberingPlanIndicator.valueOf(submitSm.getDestAddrNpi()),
submitSm.getDestAddress(),
new ESMClass(submitSm.getEsmClass()),
submitSm.getProtocolId(),
submitSm.getPriorityFlag(),
submitSm.getScheduleDeliveryTime(),
submitSm.getValidityPeriod(),
new RegisteredDelivery(submitSm.getRegisteredDelivery()),
submitSm.getReplaceIfPresent(),
DataCodings.newInstance(submitSm.getDataCoding()),
(byte) 0,
submitSm.getShortMessage(),
submitSm.getOptionalParameters());
if (result != null) {
messageID = result.getMessageId();
}
} catch (Exception e) {
throw new SmppException(e);
}
if (messageID != null) {
messageIDs.add(messageID);
}
}
if (log.isDebugEnabled()) {
log.debug("Sent short message for exchange id '{}' and received message ids '{}'",
exchange.getExchangeId(), messageIDs);
}
Message message = ExchangeHelper.getResultMessage(exchange);
message.setHeader(SmppConstants.ID, messageIDs);
message.setHeader(SmppConstants.SENT_MESSAGE_COUNT, messageIDs.size());
}
|
@Test
public void singleDlrRequestOverridesDeliveryReceiptFlag() throws Exception {
String longSms = "123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" +
"12345678901234567890123456789012345678901234567890123456789012345678901";
Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut);
exchange.getIn().setHeader(SmppConstants.COMMAND, "SubmitSm");
exchange.getIn().setHeader(SmppConstants.SINGLE_DLR, "true");
exchange.getIn().setBody(longSms.getBytes());
when(session.submitShortMessage(eq("CMT"),
eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"),
eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1717"),
eq(new ESMClass((byte) 64)), eq((byte) 0), eq((byte) 1),
(String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.DEFAULT)),
eq(ReplaceIfPresentFlag.DEFAULT.value()),
eq(DataCodings.newInstance((byte) 0)), eq((byte) 0), any(byte[].class)))
.thenReturn(new SubmitSmResult(new MessageId("1"), null));
when(session.submitShortMessage(eq("CMT"),
eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"),
eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1717"),
eq(new ESMClass((byte) 64)), eq((byte) 0), eq((byte) 1),
(String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)),
eq(ReplaceIfPresentFlag.DEFAULT.value()),
eq(DataCodings.newInstance((byte) 0)), eq((byte) 0), any(byte[].class)))
.thenReturn(new SubmitSmResult(new MessageId("2"), null));
command.execute(exchange);
assertEquals(Arrays.asList("1", "2"), exchange.getMessage().getHeader(SmppConstants.ID));
assertEquals(2, exchange.getMessage().getHeader(SmppConstants.SENT_MESSAGE_COUNT));
}
|
@Override
public ProtobufSystemInfo.Section toProtobuf() {
ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder();
protobuf.setName("System");
setAttribute(protobuf, "Server ID", server.getId());
setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel());
setAttribute(protobuf, NCLOC.getName() ,statisticsSupport.getLinesOfCode());
setAttribute(protobuf, "Container", containerSupport.isRunningInContainer());
setAttribute(protobuf, "High Availability", true);
setAttribute(protobuf, "External Users and Groups Provisioning",
commonSystemInformation.getManagedInstanceProviderName());
setAttribute(protobuf, "External User Authentication",
commonSystemInformation.getExternalUserAuthentication());
addIfNotEmpty(protobuf, "Accepted external identity providers",
commonSystemInformation.getEnabledIdentityProviders());
addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up",
commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders());
setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication());
return protobuf.build();
}
|
@Test
public void return_Lines_of_Codes_from_StatisticsSupport(){
when(statisticsSupport.getLinesOfCode()).thenReturn(17752L);
ProtobufSystemInfo.Section protobuf = underTest.toProtobuf();
assertThatAttributeIs(protobuf,"Lines of Code", 17752L);
}
|
public static BufferedImage fillImage(final BufferedImage image, final Color color)
{
final BufferedImage filledImage = new BufferedImage(image.getWidth(), image.getHeight(), BufferedImage.TYPE_INT_ARGB);
for (int x = 0; x < filledImage.getWidth(); x++)
{
for (int y = 0; y < filledImage.getHeight(); y++)
{
int pixel = image.getRGB(x, y);
int a = pixel >>> 24;
if (a == 0)
{
continue;
}
filledImage.setRGB(x, y, color.getRGB());
}
}
return filledImage;
}
|
@Test
public void fillImage()
{
// fillImage(BufferedImage image, Color color)
assertTrue(bufferedImagesEqual(centeredPixel(GRAY), ImageUtil.fillImage(centeredPixel(BLACK), GRAY)));
assertTrue(bufferedImagesEqual(solidColor(3, 3, GREEN), ImageUtil.fillImage(solidColor(3, 3, BLACK), GREEN)));
assertTrue(bufferedImagesEqual(oneByOne(BLACK_TRANSPARENT), ImageUtil.fillImage(oneByOne(BLACK_TRANSPARENT), WHITE)));
}
|
@Override
public void monitor(RedisServer master) {
connection.sync(RedisCommands.SENTINEL_MONITOR, master.getName(), master.getHost(),
master.getPort().intValue(), master.getQuorum().intValue());
}
|
@Test
public void testMonitor() {
Collection<RedisServer> masters = connection.masters();
RedisServer master = masters.iterator().next();
master.setName(master.getName() + ":");
connection.monitor(master);
}
|
@Override
public JreInfoRestResponse getJreMetadata(String id) {
return jresHandler.getJreMetadata(id);
}
|
@Test
void getJre_shouldDownloadJre_whenHeaderIsOctetStream() throws Exception {
String anyId = "anyId";
String anyFilename = "anyFilename";
JreInfoRestResponse jreInfoRestResponse = new JreInfoRestResponse(anyId, anyFilename, "sha256", "javaPath", "os", "arch");
when(jresHandler.getJreMetadata(anyId)).thenReturn(jreInfoRestResponse);
byte[] anyBinary = {1, 2, 3};
when(jresHandler.getJreBinary(anyFilename)).thenReturn(new ByteArrayInputStream(anyBinary));
mockMvc.perform(get(JRE_ENDPOINT + "/" + anyId)
.header("Accept", APPLICATION_OCTET_STREAM_VALUE))
.andExpect(status().isOk())
.andExpect(content().contentType(APPLICATION_OCTET_STREAM))
.andExpect(content().bytes(anyBinary));
}
|
public static int findAvailablePort(String portRange) throws IOException {
// ':' is the default value which means no constraints on the portRange
if (StringUtils.isBlank(portRange) || portRange.equals(":")) {
try (ServerSocket socket = new ServerSocket(0)) {
return socket.getLocalPort();
} catch (IOException e) {
throw new IOException("Failed to allocate a automatic port", e);
}
}
// valid user registered port https://en.wikipedia.org/wiki/Registered_port
int start = 1024;
int end = 65535;
String[] ports = portRange.split(":", -1);
if (!ports[0].isEmpty()) {
start = Integer.parseInt(ports[0]);
}
if (!ports[1].isEmpty()) {
end = Integer.parseInt(ports[1]);
}
for (int i = start; i <= end; ++i) {
try (ServerSocket socket = new ServerSocket(i)) {
return socket.getLocalPort();
} catch (IOException e) {
// ignore this
}
}
throw new IOException("No available port in the portRange: " + portRange);
}
|
@Test
void testfindAvailablePort() throws IOException {
assertTrue(RemoteInterpreterUtils.findAvailablePort(":") > 0);
String portRange = ":30000";
assertTrue(RemoteInterpreterUtils.findAvailablePort(portRange) <= 30000);
portRange = "30000:";
assertTrue(RemoteInterpreterUtils.findAvailablePort(portRange) >= 30000);
portRange = "30000:40000";
int port = RemoteInterpreterUtils.findAvailablePort(portRange);
assertTrue(port >= 30000 && port <= 40000);
}
|
@Override
@SuppressWarnings("rawtypes")
public void report(SortedMap<String, Gauge> gauges,
SortedMap<String, Counter> counters,
SortedMap<String, Histogram> histograms,
SortedMap<String, Meter> meters,
SortedMap<String, Timer> timers) {
final long timestamp = TimeUnit.MILLISECONDS.toSeconds(clock.getTime());
for (Map.Entry<String, Gauge> entry : gauges.entrySet()) {
reportGauge(timestamp, entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Counter> entry : counters.entrySet()) {
reportCounter(timestamp, entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Histogram> entry : histograms.entrySet()) {
reportHistogram(timestamp, entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Meter> entry : meters.entrySet()) {
reportMeter(timestamp, entry.getKey(), entry.getValue());
}
for (Map.Entry<String, Timer> entry : timers.entrySet()) {
reportTimer(timestamp, entry.getKey(), entry.getValue());
}
}
|
@Test
public void reportsCounterValues() throws Exception {
final Counter counter = mock(Counter.class);
when(counter.getCount()).thenReturn(100L);
reporter.report(map(),
map("test.counter", counter),
map(),
map(),
map());
assertThat(fileContents("test.counter.csv"))
.isEqualTo(csv(
"t,count",
"19910191,100"
));
}
|
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
if (request instanceof HttpServletRequest httpRequest) {
HttpServletResponse httpResponse = (HttpServletResponse) response;
try {
chain.doFilter(new ServletRequestWrapper(httpRequest), httpResponse);
} catch (Throwable e) {
if (httpResponse.isCommitted()) {
// Request has been aborted by the client, nothing can been done as Tomcat has committed the response
LOGGER.debug(format("Processing of request %s failed", toUrl(httpRequest)), e);
return;
}
LOGGER.error(format("Processing of request %s failed", toUrl(httpRequest)), e);
httpResponse.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
} else {
// Not an HTTP request, not profiled
chain.doFilter(request, response);
}
}
|
@Test
public void request_used_in_chain_do_filter_is_a_servlet_wrapper_when_static_resource() throws Exception {
underTest.doFilter(request("GET", "/context/static/image.png", null), mock(HttpServletResponse.class), chain);
ArgumentCaptor<ServletRequest> requestArgumentCaptor = ArgumentCaptor.forClass(ServletRequest.class);
verify(chain).doFilter(requestArgumentCaptor.capture(), any(HttpServletResponse.class));
assertThat(requestArgumentCaptor.getValue()).isInstanceOf(RootFilter.ServletRequestWrapper.class);
}
|
@Override
public boolean offerFirst(T t)
{
addFirstNode(t);
return true;
}
|
@Test
public void testOfferFirst()
{
List<Integer> control = new ArrayList<>(Arrays.asList(1, 2, 3));
LinkedDeque<Integer> q = new LinkedDeque<>(control);
control.add(0, 99);
Assert.assertTrue(q.offerFirst(99));
Assert.assertEquals(q, control);
}
|
@Override
public ConnectResponse<ConfigInfos> validate(
final String plugin,
final Map<String, String> config) {
try {
final Map<String, String> maskedConfig = QueryMask.getMaskedConnectConfig(config);
LOG.debug("Issuing validate request to Kafka Connect at URI {} for plugin {} and config {}",
connectUri,
plugin,
maskedConfig);
final ConnectResponse<ConfigInfos> connectResponse = withRetries(() -> Request
.put(resolveUri(String.format(VALIDATE_CONNECTOR, plugin)))
.setHeaders(requestHeaders)
.responseTimeout(Timeout.ofMilliseconds(requestTimeoutMs))
.connectTimeout(Timeout.ofMilliseconds(requestTimeoutMs))
.bodyString(MAPPER.writeValueAsString(config), ContentType.APPLICATION_JSON)
.execute(httpClient)
.handleResponse(
createHandler(HttpStatus.SC_OK, new TypeReference<ConfigInfos>() {},
Function.identity())));
connectResponse.error()
.ifPresent(error ->
LOG.warn("Did not VALIDATE connector configuration for plugin {} and config {}: {}",
plugin, maskedConfig, error));
return connectResponse;
} catch (final Exception e) {
throw new KsqlServerException(e);
}
}
|
@Test
public void testValidateWithError() throws JsonProcessingException {
// Given:
final String plugin = SAMPLE_PLUGIN.getClassName();
final String url = String.format(pathPrefix + "/connector-plugins/%s/config/validate", plugin);
WireMock.stubFor(
WireMock.put(WireMock.urlEqualTo(url))
.withHeader(AUTHORIZATION.toString(), new EqualToPattern(AUTH_HEADER))
.willReturn(WireMock.aResponse()
.withStatus(HttpStatus.SC_INTERNAL_SERVER_ERROR)
.withBody("Oh no!"))
);
// When:
final ConnectResponse<ConfigInfos> response =
client.validate(plugin, ImmutableMap.of());
// Then:
assertThat("Expected no datum!", !response.datum().isPresent());
assertThat(response.error(), OptionalMatchers.of(is("Oh no!")));
}
|
public String getKVConfigValue(final String namespace, final String key, final long timeoutMillis)
throws RemotingException, MQClientException, InterruptedException {
GetKVConfigRequestHeader requestHeader = new GetKVConfigRequestHeader();
requestHeader.setNamespace(namespace);
requestHeader.setKey(key);
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_KV_CONFIG, requestHeader);
RemotingCommand response = this.remotingClient.invokeSync(null, request, timeoutMillis);
assert response != null;
switch (response.getCode()) {
case ResponseCode.SUCCESS: {
GetKVConfigResponseHeader responseHeader =
(GetKVConfigResponseHeader) response.decodeCommandCustomHeader(GetKVConfigResponseHeader.class);
return responseHeader.getValue();
}
default:
break;
}
throw new MQClientException(response.getCode(), response.getRemark());
}
|
@Test
public void assertGetKVConfigValue() throws RemotingException, InterruptedException, MQClientException {
mockInvokeSync();
GetKVConfigResponseHeader responseHeader = mock(GetKVConfigResponseHeader.class);
when(responseHeader.getValue()).thenReturn("value");
setResponseHeader(responseHeader);
assertEquals("value", mqClientAPI.getKVConfigValue("", "", defaultTimeout));
}
|
@Override
public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException {
try {
final AttributedList<Path> buckets = new AttributedList<>();
Buckets response;
String page = null;
do {
final Storage.Buckets.List request = session.getClient().buckets().list(session.getHost().getCredentials().getUsername())
.setMaxResults(new HostPreferences(session.getHost()).getLong("googlestorage.listing.chunksize"))
.setPageToken(page);
response = request.execute();
if(null != response.getItems()) {
for(Bucket item : response.getItems()) {
final Path bucket = new Path(PathNormalizer.normalize(item.getName()), EnumSet.of(Path.Type.volume, Path.Type.directory),
attributes.toAttributes(item)
);
buckets.add(bucket);
listener.chunk(directory, buckets);
}
}
page = response.getNextPageToken();
}
while(page != null);
return buckets;
}
catch(IOException e) {
throw new GoogleStorageExceptionMappingService().map("Listing directory {0} failed", e, directory);
}
}
|
@Test
public void testListContainers() throws Exception {
final Path container = new Path("/", EnumSet.of(Path.Type.directory));
final AttributedList<Path> list = new GoogleStorageBucketListService(session).list(container, new DisabledListProgressListener());
assertFalse(list.isEmpty());
for(Path bucket : list) {
assertEquals(bucket.attributes(), new GoogleStorageAttributesFinderFeature(session).find(bucket, new DisabledListProgressListener()));
}
}
|
@Override
public void removeLogListener(@Nonnull LogListener logListener) {
throw new UnsupportedOperationException();
}
|
@Test(expected = UnsupportedOperationException.class)
public void testRemoveLogListener() {
loggingService.removeLogListener(logListener);
}
|
@SuppressWarnings("unchecked")
public static <K, V> CowMap<K, V> emptyMap()
{
return (CowMap<K,V>) EMPTY_MAP;
}
|
@Test
public void testEmptyMap()
{
final CowMap<String, String> map = CowUtil.emptyMap();
Assert.assertTrue(map.isEmpty());
Assert.assertTrue(map.isReadOnly());
try
{
mutateMap(map);
Assert.fail("Should have thrown UnsupportedOperationException");
}
catch (UnsupportedOperationException e)
{
// Expected case
}
Assert.assertTrue(CowUtil.<Object, Object>emptyMap().isEmpty());
}
|
@Override
public Map<SubClusterId, List<ResourceRequest>> splitResourceRequests(
List<ResourceRequest> resourceRequests,
Set<SubClusterId> timedOutSubClusters) throws YarnException {
// object used to accumulate statistics about the answer, initialize with
// active subclusters. Create a new instance per call because this method
// can be called concurrently.
AllocationBookkeeper bookkeeper = new AllocationBookkeeper();
bookkeeper.reinitialize(getActiveSubclusters(), timedOutSubClusters, conf);
List<ResourceRequest> nonLocalizedRequests = new ArrayList<>();
SubClusterId targetId = null;
Set<SubClusterId> targetIds = null;
// if the RR is resolved to a local subcluster add it directly (node and
// resolvable racks)
for (ResourceRequest rr : resourceRequests) {
targetId = null;
targetIds = null;
// Handle: ANY (accumulated for later)
if (ResourceRequest.isAnyLocation(rr.getResourceName())) {
nonLocalizedRequests.add(rr);
continue;
}
// Handle "node" requests
try {
targetId = resolver.getSubClusterForNode(rr.getResourceName());
// If needed, re-reroute node requests base on SC load
boolean loadBasedSCSelectorEnabled =
conf.getBoolean(LOAD_BASED_SC_SELECTOR_ENABLED, DEFAULT_LOAD_BASED_SC_SELECTOR_ENABLED);
if (loadBasedSCSelectorEnabled) {
int maxPendingThreshold = conf.getInt(LOAD_BASED_SC_SELECTOR_THRESHOLD,
DEFAULT_LOAD_BASED_SC_SELECTOR_THRESHOLD);
targetId = routeNodeRequestIfNeeded(targetId, maxPendingThreshold,
bookkeeper.getActiveAndEnabledSC());
}
LOG.debug("Node request {}", rr.getResourceName());
} catch (YarnException e) {
// this might happen as we can't differentiate node from rack names
// we log altogether later
}
if (bookkeeper.isActiveAndEnabled(targetId)) {
bookkeeper.addLocalizedNodeRR(targetId, rr);
continue;
}
// Handle "rack" requests
try {
targetIds = resolver.getSubClustersForRack(rr.getResourceName());
} catch (YarnException e) {
// this might happen as we can't differentiate node from rack names
// we log altogether later
}
if (targetIds != null && targetIds.size() > 0) {
boolean hasActive = false;
for (SubClusterId tid : targetIds) {
if (bookkeeper.isActiveAndEnabled(tid)) {
bookkeeper.addRackRR(tid, rr);
hasActive = true;
}
}
if (hasActive) {
continue;
}
}
// Handle node/rack requests that the SubClusterResolver cannot map to
// any cluster. Pick a random sub-cluster from active and enabled ones.
targetId = getSubClusterForUnResolvedRequest(bookkeeper,
rr.getAllocationRequestId());
LOG.debug("ERROR resolving sub-cluster for resourceName: {}, picked a "
+ "random subcluster to forward:{}", rr.getResourceName(), targetId);
if (targetIds != null && targetIds.size() > 0) {
bookkeeper.addRackRR(targetId, rr);
} else {
bookkeeper.addLocalizedNodeRR(targetId, rr);
}
}
// handle all non-localized requests (ANY)
splitAnyRequests(nonLocalizedRequests, bookkeeper);
// Take the split result, feed into the askBalancer
Map<SubClusterId, List<ResourceRequest>> answer = bookkeeper.getAnswer();
LOG.info("Before split {} RRs: {}", resourceRequests.size(),
prettyPrintRequests(resourceRequests, this.printRRMax));
for (Map.Entry<SubClusterId, List<ResourceRequest>> entry : bookkeeper.getAnswer().entrySet()) {
LOG.info("After split {} has {} RRs: {}", entry.getKey(), entry.getValue().size(),
prettyPrintRequests(entry.getValue(), this.printRRMax));
}
return answer;
}
|
@Test(timeout = 5000)
public void testStressPolicy() throws Exception {
// Tests how the headroom info are used to split based on the capacity
// each RM claims to give us.
// Configure policy to be 100% headroom based
getPolicyInfo().setHeadroomAlpha(1.0f);
initializePolicy();
addHomeSubClusterAsActive();
int numRR = 1000;
List<ResourceRequest> resourceRequests = createLargeRandomList(numRR);
prepPolicyWithHeadroom(true);
int numIterations = 1000;
long tstart = System.currentTimeMillis();
for (int i = 0; i < numIterations; i++) {
Map<SubClusterId, List<ResourceRequest>> response =
((FederationAMRMProxyPolicy) getPolicy()).splitResourceRequests(
resourceRequests, new HashSet<SubClusterId>());
validateSplit(response, resourceRequests);
}
long tend = System.currentTimeMillis();
LOG.info("Performed " + numIterations + " policy invocations (and "
+ "validations) in " + (tend - tstart) + "ms");
}
|
@Override
public boolean supportsMultipleResultSets() {
return false;
}
|
@Test
void assertSupportsMultipleResultSets() {
assertFalse(metaData.supportsMultipleResultSets());
}
|
public static KTableHolder<GenericKey> build(
final KGroupedTableHolder groupedTable,
final TableAggregate aggregate,
final RuntimeBuildContext buildContext,
final MaterializedFactory materializedFactory) {
return build(
groupedTable,
aggregate,
buildContext,
materializedFactory,
new AggregateParamsFactory()
);
}
|
@Test
public void shouldBuildValueSerdeCorrectlyForAggregate() {
// When:
aggregate.build(planBuilder, planInfo);
// Then:
verify(buildContext).buildValueSerde(
VALUE_FORMAT,
PHYSICAL_AGGREGATE_SCHEMA,
MATERIALIZE_CTX
);
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
public <T extends Gauge> T gauge(String name) {
return (T) getOrAdd(name, MetricBuilder.GAUGES);
}
|
@Test
public void settableGaugeIsTreatedLikeAGauge() {
final MetricRegistry.MetricSupplier<SettableGauge<String>> supplier = () -> settableGauge;
final SettableGauge<String> gauge1 = registry.gauge("thing", supplier);
final SettableGauge<String> gauge2 = registry.gauge("thing", supplier);
assertThat(gauge1)
.isSameAs(gauge2);
verify(listener).onGaugeAdded("thing", gauge1);
}
|
public static boolean arePrefixColumns(List<FieldSchema> p, List<FieldSchema> s) {
if (p == s) {
return true;
}
if (p == null || s == null || p.size() > s.size()) {
return false;
}
return areSameColumns(p, s.subList(0, p.size()));
}
|
@Test
public void testPrefixColumns() {
FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
FieldSchema Col1 = new FieldSchema("Col1", "string", "col1 comment");
FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
Assert.assertTrue(MetaStoreServerUtils.arePrefixColumns(null, null));
Assert.assertFalse(MetaStoreServerUtils.arePrefixColumns(Arrays.asList(col1), null));
Assert.assertFalse(MetaStoreServerUtils.arePrefixColumns(null, Arrays.asList(col1)));
Assert.assertTrue(MetaStoreServerUtils.arePrefixColumns(Arrays.asList(col1), Arrays.asList(col1)));
Assert.assertTrue(MetaStoreServerUtils.arePrefixColumns(Arrays.asList(col1, col2), Arrays.asList(col1, col2, col3)));
Assert.assertTrue(MetaStoreServerUtils.arePrefixColumns(Arrays.asList(Col1, col2), Arrays.asList(col1, col2, col3)));
Assert.assertFalse(MetaStoreServerUtils.arePrefixColumns(Arrays.asList(col1, col2, col3), Arrays.asList(col1, col2)));
}
|
public static org.apache.pinot.common.utils.regex.Matcher matcher(Pattern pattern, CharSequence input) {
if (pattern instanceof Re2jPattern) {
return new Re2jMatcher(pattern, input);
} else {
return new JavaUtilMatcher(pattern, input);
}
}
|
@Test
public void testRe2jMatcherFactory() {
Re2jPattern re2jPattern = new Re2jPattern("pattern");
Matcher matcher = MatcherFactory.matcher(re2jPattern, "");
Assert.assertTrue(matcher instanceof Re2jMatcher);
}
|
@Override
protected String getFolderSuffix() {
return FOLDER_SUFFIX;
}
|
@Test
public void testGetFolderSuffix() {
Assert.assertEquals("/", mOBSUnderFileSystem.getFolderSuffix());
}
|
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) {
final String storeName = storeQueryParameters.storeName();
final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType();
final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType);
if (!globalStore.isEmpty()) {
return queryableStoreType.create(globalStoreProvider, storeName);
}
return queryableStoreType.create(
new WrappingStoreProvider(storeProviders.values(), storeQueryParameters),
storeName
);
}
|
@Test
public void shouldReturnWindowStoreWithPartitionWhenItExists() {
assertNotNull(storeProvider.getStore(StoreQueryParameters.fromNameAndType(windowStore, QueryableStoreTypes.windowStore()).withPartition(numStateStorePartitions - 1)));
}
|
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) {
if ( list == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null"));
}
boolean result = true;
boolean containsNull = false;
// Spec. definition: return false if any item is false, else true if all items are true, else null
for ( final Object element : list ) {
if (element != null && !(element instanceof Boolean)) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean"));
} else {
if (element != null) {
result &= (Boolean) element;
} else if (!containsNull) {
containsNull = true;
}
}
}
if (containsNull && result) {
return FEELFnResult.ofResult( null );
} else {
return FEELFnResult.ofResult( result );
}
}
|
@Test
void invokeArrayParamTypeHeterogenousArray() {
FunctionTestUtil.assertResultError(allFunction.invoke(new Object[]{Boolean.TRUE, 1}),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(allFunction.invoke(new Object[]{Boolean.FALSE, 1}),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(allFunction.invoke(new Object[]{Boolean.TRUE, null, 1}),
InvalidParametersEvent.class);
}
|
int calculatePrice(Integer basePrice, Integer percent, Integer fixedPrice) {
// 1. 优先使用固定佣金
if (fixedPrice != null && fixedPrice > 0) {
return ObjectUtil.defaultIfNull(fixedPrice, 0);
}
// 2. 根据比例计算佣金
if (basePrice != null && basePrice > 0 && percent != null && percent > 0) {
return MoneyUtils.calculateRatePriceFloor(basePrice, Double.valueOf(percent));
}
return 0;
}
|
@Test
public void testCalculatePrice_useFixedPrice() {
// mock 数据
Integer payPrice = randomInteger();
Integer percent = randomInt(1, 101);
Integer fixedPrice = randomInt();
// 调用
int brokerage = brokerageRecordService.calculatePrice(payPrice, percent, fixedPrice);
// 断言
assertEquals(brokerage, fixedPrice);
}
|
public static boolean useGlobalDirectorySequenceId(SegmentNameGeneratorSpec spec) {
if (spec == null || spec.getConfigs() == null) {
return false;
}
String useGlobalDirectorySequenceId =
spec.getConfigs().get(SegmentGenerationTaskRunner.USE_GLOBAL_DIRECTORY_SEQUENCE_ID);
if (useGlobalDirectorySequenceId == null) {
String useLocalDirectorySequenceId =
spec.getConfigs().get(SegmentGenerationTaskRunner.DEPRECATED_USE_LOCAL_DIRECTORY_SEQUENCE_ID);
if (useLocalDirectorySequenceId != null) {
return !Boolean.parseBoolean(useLocalDirectorySequenceId);
}
}
return Boolean.parseBoolean(useGlobalDirectorySequenceId);
}
|
@Test
public void testUseGlobalDirectorySequenceId() {
Assert.assertFalse(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(null));
SegmentNameGeneratorSpec spec = new SegmentNameGeneratorSpec();
Assert.assertFalse(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(spec));
spec.setConfigs(new HashMap<>());
Assert.assertFalse(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(spec));
spec.setConfigs(ImmutableMap.of("use.global.directory.sequence.id", "false"));
Assert.assertFalse(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(spec));
spec.setConfigs(ImmutableMap.of("use.global.directory.sequence.id", "FALSE"));
Assert.assertFalse(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(spec));
spec.setConfigs(ImmutableMap.of("use.global.directory.sequence.id", "True"));
Assert.assertTrue(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(spec));
spec.setConfigs(ImmutableMap.of("local.directory.sequence.id", "true"));
Assert.assertFalse(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(spec));
spec.setConfigs(ImmutableMap.of("local.directory.sequence.id", "TRUE"));
Assert.assertFalse(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(spec));
spec.setConfigs(ImmutableMap.of("local.directory.sequence.id", "False"));
Assert.assertTrue(SegmentGenerationJobUtils.useGlobalDirectorySequenceId(spec));
}
|
@Override
public void failSlot(
final ResourceID taskManagerId,
final AllocationID allocationId,
final Exception cause) {
if (registeredTaskManagers.containsKey(taskManagerId)) {
internalFailAllocation(taskManagerId, allocationId, cause);
} else {
log.warn(
"Cannot fail slot "
+ allocationId
+ " because the TaskManager "
+ taskManagerId
+ " is unknown.");
}
}
|
@Test
void testReleasingTaskExecutorIfNoMoreSlotsRegistered() throws Exception {
final JobGraph jobGraph = createSingleVertexJobWithRestartStrategy();
try (final JobMaster jobMaster =
new JobMasterBuilder(jobGraph, rpcService)
.withConfiguration(configuration)
.withHighAvailabilityServices(haServices)
.withHeartbeatServices(heartbeatServices)
.createJobMaster()) {
final CompletableFuture<JobID> disconnectTaskExecutorFuture = new CompletableFuture<>();
final CompletableFuture<AllocationID> freedSlotFuture = new CompletableFuture<>();
final TestingTaskExecutorGateway testingTaskExecutorGateway =
new TestingTaskExecutorGatewayBuilder()
.setFreeSlotFunction(
(allocationID, throwable) -> {
freedSlotFuture.complete(allocationID);
return CompletableFuture.completedFuture(Acknowledge.get());
})
.setDisconnectJobManagerConsumer(
(jobID, throwable) ->
disconnectTaskExecutorFuture.complete(jobID))
.createTestingTaskExecutorGateway();
final LocalUnresolvedTaskManagerLocation taskManagerLocation =
new LocalUnresolvedTaskManagerLocation();
jobMaster.start();
final JobMasterGateway jobMasterGateway =
jobMaster.getSelfGateway(JobMasterGateway.class);
final Collection<SlotOffer> slotOffers =
registerSlotsAtJobMaster(
1,
jobMasterGateway,
jobGraph.getJobID(),
testingTaskExecutorGateway,
taskManagerLocation);
// check that we accepted the offered slot
assertThat(slotOffers).hasSize(1);
final AllocationID allocationId = slotOffers.iterator().next().getAllocationId();
// now fail the allocation and check that we close the connection to the TaskExecutor
jobMasterGateway.failSlot(
taskManagerLocation.getResourceID(),
allocationId,
new FlinkException("Fail allocation test exception"));
// we should free the slot and then disconnect from the TaskExecutor because we use no
// longer slots from it
assertThat(freedSlotFuture.get()).isEqualTo(allocationId);
assertThat(disconnectTaskExecutorFuture.get()).isEqualTo(jobGraph.getJobID());
}
}
|
public CruiseConfig deserializeConfig(String content) throws Exception {
String md5 = md5Hex(content);
Element element = parseInputStream(new ByteArrayInputStream(content.getBytes()));
LOGGER.debug("[Config Save] Updating config cache with new XML");
CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse();
setMd5(configForEdit, md5);
configForEdit.setOrigins(new FileConfigOrigin());
return configForEdit;
}
|
@Test
void shouldRetainArtifactSourceThatIsNotWhitespace() throws Exception {
CruiseConfig cruiseConfig = xmlLoader.deserializeConfig(goConfigMigration.upgradeIfNecessary(configWithArtifactSourceAs("t ")));
JobConfig plan = cruiseConfig.jobConfigByName("pipeline", "stage", "job", true);
assertThat(plan.artifactTypeConfigs().getBuiltInArtifactConfigs().get(0).getSource()).isEqualTo("t ");
}
|
public CompletableFuture<E> subscribe(String entryName, String channelName) {
AsyncSemaphore semaphore = service.getSemaphore(new ChannelName(channelName));
CompletableFuture<E> newPromise = new CompletableFuture<>();
semaphore.acquire().thenAccept(c -> {
if (newPromise.isDone()) {
semaphore.release();
return;
}
E entry = entries.get(entryName);
if (entry != null) {
entry.acquire();
semaphore.release();
entry.getPromise().whenComplete((r, e) -> {
if (e != null) {
newPromise.completeExceptionally(e);
return;
}
newPromise.complete(r);
});
return;
}
E value = createEntry(newPromise);
value.acquire();
E oldValue = entries.putIfAbsent(entryName, value);
if (oldValue != null) {
oldValue.acquire();
semaphore.release();
oldValue.getPromise().whenComplete((r, e) -> {
if (e != null) {
newPromise.completeExceptionally(e);
return;
}
newPromise.complete(r);
});
return;
}
RedisPubSubListener<Object> listener = createListener(channelName, value);
CompletableFuture<PubSubConnectionEntry> s = service.subscribeNoTimeout(LongCodec.INSTANCE, channelName, semaphore, listener);
newPromise.whenComplete((r, e) -> {
if (e != null) {
s.completeExceptionally(e);
}
});
s.whenComplete((r, e) -> {
if (e != null) {
entries.remove(entryName);
value.getPromise().completeExceptionally(e);
return;
}
if (!value.getPromise().complete(value)) {
if (value.getPromise().isCompletedExceptionally()) {
entries.remove(entryName);
}
}
});
});
return newPromise;
}
|
@Test
public void testSubscribeForRaceCondition() throws InterruptedException {
AtomicReference<CompletableFuture<PubSubConnectionEntry>> sRef = new AtomicReference<>();
new MockUp<PublishSubscribeService>() {
@Mock
AsyncSemaphore getSemaphore(ChannelName channelName) {
return new AsyncSemaphore(1);
}
@Mock
CompletableFuture<PubSubConnectionEntry> subscribeNoTimeout(
Codec codec, String channelName,
AsyncSemaphore semaphore, RedisPubSubListener<?>... listeners) {
sRef.set(new CompletableFuture<>());
return sRef.get();
}
};
CompletableFuture<RedissonLockEntry> newPromise = lockPubSub.subscribe(
"test", "redisson_lock__channel__test"
);
sRef.get().whenComplete((r, e) -> {
try {
Thread.sleep(1);
} catch (InterruptedException ex) {
throw new RuntimeException(ex);
}
});
Thread thread1 = new Thread(() -> sRef.get().complete(null));
Thread thread2 = new Thread(() -> newPromise.completeExceptionally(new RedisTimeoutException("test")));
thread1.start();
thread2.start();
thread1.join();
thread2.join();
assertTrue(newPromise.isCompletedExceptionally());
assertTrue(sRef.get().isDone());
assertFalse(sRef.get().isCompletedExceptionally());
CompletableFuture<RedissonLockEntry> secondPromise = lockPubSub.subscribe(
"test", "redisson_lock__channel__test"
);
Thread thread3 = new Thread(() -> secondPromise.complete(null));
thread3.start();
thread3.join();
assertTrue(secondPromise.isDone());
assertFalse(secondPromise.isCompletedExceptionally());
}
|
@Override
public <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer,
final Aggregator<? super K, ? super V, VR> aggregator,
final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) {
return aggregate(initializer, aggregator, NamedInternal.empty(), materialized);
}
|
@Test
public void shouldNotHaveNullInitializerOnAggregate() {
assertThrows(NullPointerException.class, () -> groupedStream.aggregate(null, MockAggregator.TOSTRING_ADDER, Materialized.as("store")));
}
|
@Override
public boolean decide(final SelectStatementContext selectStatementContext, final List<Object> parameters,
final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingRule rule, final Collection<DataNode> includedDataNodes) {
Collection<String> tableNames = rule.getShardingLogicTableNames(selectStatementContext.getTablesContext().getTableNames());
if (tableNames.isEmpty()) {
return false;
}
includedDataNodes.addAll(getTableDataNodes(rule, tableNames, database));
if (selectStatementContext.isContainsSubquery() || selectStatementContext.isContainsHaving()
|| selectStatementContext.isContainsCombine() || selectStatementContext.isContainsPartialDistinctAggregation()) {
return true;
}
if (!selectStatementContext.isContainsJoinQuery() || rule.isAllTablesInSameDataSource(tableNames)) {
return false;
}
if (1 == tableNames.size() && selectStatementContext.isContainsJoinQuery() && !rule.isAllBindingTables(database, selectStatementContext, tableNames)) {
return true;
}
return tableNames.size() > 1 && !rule.isAllBindingTables(database, selectStatementContext, tableNames);
}
|
@Test
void assertDecideWhenAllTablesIsNotBindingTables() {
SelectStatementContext select = createStatementContext();
when(select.isContainsJoinQuery()).thenReturn(true);
ShardingRule shardingRule = createShardingRule();
ShardingSphereDatabase database = createDatabase(shardingRule);
when(shardingRule.isAllBindingTables(database, select, Arrays.asList("t_order", "t_order_item"))).thenReturn(false);
Collection<DataNode> includedDataNodes = new HashSet<>();
assertTrue(new ShardingSQLFederationDecider().decide(select, Collections.emptyList(), mock(RuleMetaData.class), database, shardingRule, includedDataNodes));
assertThat(includedDataNodes.size(), is(4));
}
|
@SuppressWarnings("unchecked")
@Override
public int run(InputStream stdin, PrintStream out, PrintStream err, List<String> args) throws Exception {
OptionParser p = new OptionParser();
OptionSpec<Integer> count = p.accepts("count", "Record Count").withRequiredArg().ofType(Integer.class);
OptionSpec<String> codec = Util.compressionCodecOption(p);
OptionSpec<Integer> level = Util.compressionLevelOption(p);
OptionSpec<String> file = p.accepts("schema-file", "Schema File").withOptionalArg().ofType(String.class);
OptionSpec<String> inschema = p.accepts("schema", "Schema").withOptionalArg().ofType(String.class);
OptionSpec<Long> seedOpt = p.accepts("seed", "Seed for random").withOptionalArg().ofType(Long.class);
OptionSet opts = p.parse(args.toArray(new String[0]));
if (opts.nonOptionArguments().size() != 1) {
err.println("Usage: outFile (filename or '-' for stdout)");
p.printHelpOn(err);
return 1;
}
args = (List<String>) opts.nonOptionArguments();
String schemastr = inschema.value(opts);
String schemafile = file.value(opts);
Long seed = seedOpt.value(opts);
if (schemastr == null && schemafile == null) {
err.println("Need input schema (--schema-file) or (--schema)");
p.printHelpOn(err);
return 1;
}
Schema schema = (schemafile != null) ? Util.parseSchemaFromFS(schemafile) : new Schema.Parser().parse(schemastr);
DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>());
writer.setCodec(Util.codecFactory(opts, codec, level));
writer.create(schema, Util.fileOrStdout(args.get(0), out));
Integer countValue = count.value(opts);
if (countValue == null) {
err.println("Need count (--count)");
p.printHelpOn(err);
writer.close();
return 1;
}
RandomData rd = seed == null ? new RandomData(schema, countValue) : new RandomData(schema, countValue, seed);
for (Object datum : rd)
writer.append(datum);
writer.close();
return 0;
}
|
@Test
void defaultCodec() throws Exception {
// The default codec for random is deflate
run(Collections.emptyList());
assertTrue(err.toString().contains("Compression codec (default: deflate)"));
}
|
public PrefixedConfiguration(Configuration origin, String prefix) {
this.origin = origin;
this.prefix = prefix;
}
|
@Test
void testPrefixedConfiguration() {
Map<String, String> props = new LinkedHashMap<>();
props.put("dubbo.protocol.name", "dubbo");
props.put("dubbo.protocol.port", "1234");
props.put("dubbo.protocols.rest.port", "2345");
InmemoryConfiguration inmemoryConfiguration = new InmemoryConfiguration();
inmemoryConfiguration.addProperties(props);
// prefixed over InmemoryConfiguration
PrefixedConfiguration prefixedConfiguration =
new PrefixedConfiguration(inmemoryConfiguration, "dubbo.protocol");
Assertions.assertEquals("dubbo", prefixedConfiguration.getProperty("name"));
Assertions.assertEquals("1234", prefixedConfiguration.getProperty("port"));
prefixedConfiguration = new PrefixedConfiguration(inmemoryConfiguration, "dubbo.protocols.rest");
Assertions.assertEquals("2345", prefixedConfiguration.getProperty("port"));
// prefixed over composite configuration
CompositeConfiguration compositeConfiguration = new CompositeConfiguration();
compositeConfiguration.addConfiguration(inmemoryConfiguration);
prefixedConfiguration = new PrefixedConfiguration(compositeConfiguration, "dubbo.protocols.rest");
Assertions.assertEquals("2345", prefixedConfiguration.getProperty("port"));
}
|
public static String removeCRLF(String s) {
return s
.replace("\r", "")
.replace("\n", "");
}
|
@Test
public void testSimpleCRLF() {
String out = StringHelper.removeCRLF("hello");
assertEquals("hello", out);
boolean b6 = !out.contains("\r");
assertTrue(b6, "Should not contain : ");
boolean b5 = !out.contains("\n");
assertTrue(b5, "Should not contain : ");
out = StringHelper.removeCRLF("hello\r\n");
assertEquals("hello", out);
boolean b4 = !out.contains("\r");
assertTrue(b4, "Should not contain : ");
boolean b3 = !out.contains("\n");
assertTrue(b3, "Should not contain : ");
out = StringHelper.removeCRLF("\r\nhe\r\nllo\n");
assertEquals("hello", out);
boolean b2 = !out.contains("\r");
assertTrue(b2, "Should not contain : ");
boolean b1 = !out.contains("\n");
assertTrue(b1, "Should not contain : ");
out = StringHelper.removeCRLF("hello" + System.lineSeparator());
assertEquals("hello", out);
boolean b = !out.contains(System.lineSeparator());
assertTrue(b, "Should not contain : ");
}
|
@Override
public boolean processArgument(final ShenyuRequest shenyuRequest, final Annotation annotation, final Object arg) {
RequestTemplate requestTemplate = shenyuRequest.getRequestTemplate();
RequestParam requestParam = ANNOTATION.cast(annotation);
String name = requestParam.value();
checkState(emptyToNull(name) != null || arg instanceof Map, "RequestParam.value() was empty on parameter %s#%s",
requestTemplate.getMethod().getDeclaringClass().getSimpleName(), requestTemplate.getMethod().getName());
StringBuilder pathResult = new StringBuilder(requestTemplate.getPath());
Map<Object, Object> params = Maps.newHashMap();
if (!(arg instanceof Map) && !(arg instanceof MultipartFile)) {
params.put(name, arg);
} else if (arg instanceof Map) {
params = (Map<Object, Object>) arg;
}
params.forEach((key, value) -> {
if (pathResult.indexOf("?") > 0) {
pathResult.append("&");
} else {
pathResult.append("?");
}
pathResult.append(key).append("=").append(value);
});
shenyuRequest.setUrl(requestTemplate.getUrl() + pathResult);
return true;
}
|
@Test
public void processArgumentNullTest() {
RequestTemplate template = new RequestTemplate(Void.class, method1, "method1", "/dev/url/param", "", "/path", ShenyuRequest.HttpMethod.GET, null, null, null);
this.request = ShenyuRequest.create(ShenyuRequest.HttpMethod.POST, "", Maps.newHashMap(), "", "test", template);
final RequestParam param = spy(RequestParam.class);
when(param.value()).thenReturn("");
assertThrows(IllegalStateException.class, () -> processor.processArgument(request, param, ""));
}
|
public String convertUnicodeCharacterRepresentation(String input) {
final char[] chars = input.toCharArray();
int nonAsciiCharCount = countNonAsciiCharacters(chars);
if(! (input.contains("\\u") || input.contains("\\U")) && nonAsciiCharCount == 0)
return input;
int replacedNonAsciiCharacterCount = 0;
final char[] result = nonAsciiCharCount == 0 ? chars : new char[chars.length + 5 * nonAsciiCharCount];
for (int offset = 0; offset < chars.length; offset++) {
int resultOffset = offset + 5 * replacedNonAsciiCharacterCount;
final char c = chars[offset];
if (c == '\\' && (chars[offset+1] == 'u' || chars[offset+1] == 'U')) {
putFormattedUnicodeRepresentation(chars, offset, result, resultOffset);
offset+=5;
} else if(c <= 127) {
if(resultOffset >= result.length ) {
throw new AssertionError(input + "//" + new String(result));
}
result[resultOffset] = c;
} else {
putEncodedNonAsciiCharacter(c, result, resultOffset);
replacedNonAsciiCharacterCount++;
}
}
return new String(result);
}
|
@Test
public void convertsUnicodeToUpperCase(){
final FormatTranslation formatTranslation = new FormatTranslation();
assertThat(formatTranslation.convertUnicodeCharacterRepresentation("u"), CoreMatchers.equalTo("u"));
assertThat(formatTranslation.convertUnicodeCharacterRepresentation("\\Uabcde"), CoreMatchers.equalTo("\\uABCDe"));
assertThat(formatTranslation.convertUnicodeCharacterRepresentation("\\uabcde"), CoreMatchers.equalTo("\\uABCDe"));
assertThat(formatTranslation.convertUnicodeCharacterRepresentation("1\\Uabcde"), CoreMatchers.equalTo("1\\uABCDe"));
}
|
public synchronized void createInstance(List<BigtableResourceManagerCluster> clusters)
throws BigtableResourceManagerException {
// Check to see if instance already exists, and throw error if it does
if (hasInstance) {
LOG.warn(
"Skipping instance creation. Instance was already created or static instance was passed. Reusing : {}.",
instanceId);
return;
}
LOG.info("Creating instance {} in project {}.", instanceId, projectId);
// Create instance request object and add all the given clusters to the request
CreateInstanceRequest request = CreateInstanceRequest.of(instanceId);
for (BigtableResourceManagerCluster cluster : clusters) {
request.addCluster(
cluster.clusterId(), cluster.zone(), cluster.numNodes(), cluster.storageType());
}
// Send the instance request to Google Cloud
try (BigtableInstanceAdminClient instanceAdminClient =
bigtableResourceManagerClientFactory.bigtableInstanceAdminClient()) {
instanceAdminClient.createInstance(request);
} catch (Exception e) {
throw new BigtableResourceManagerException(
"Failed to create instance " + instanceId + ".", e);
}
hasInstance = true;
this.clusters = clusters;
LOG.info("Successfully created instance {}.", instanceId);
}
|
@Test
public void testCreateInstanceShouldThrowErrorWhenInstanceAdminClientFailsToClose() {
BigtableInstanceAdminClient mockClient =
bigtableResourceManagerClientFactory.bigtableInstanceAdminClient();
doThrow(RuntimeException.class).when(mockClient).close();
assertThrows(BigtableResourceManagerException.class, () -> testManager.createInstance(cluster));
}
|
public IssueQuery create(SearchRequest request) {
try (DbSession dbSession = dbClient.openSession(false)) {
final ZoneId timeZone = parseTimeZone(request.getTimeZone()).orElse(clock.getZone());
Collection<RuleDto> ruleDtos = ruleKeysToRuleId(dbSession, request.getRules());
Collection<String> ruleUuids = ruleDtos.stream().map(RuleDto::getUuid).collect(Collectors.toSet());
Collection<String> issueKeys = collectIssueKeys(dbSession, request);
if (request.getRules() != null && request.getRules().stream().collect(Collectors.toSet()).size() != ruleDtos.size()) {
ruleUuids.add("non-existing-uuid");
}
IssueQuery.Builder builder = IssueQuery.builder()
.issueKeys(issueKeys)
.severities(request.getSeverities())
.cleanCodeAttributesCategories(request.getCleanCodeAttributesCategories())
.impactSoftwareQualities(request.getImpactSoftwareQualities())
.impactSeverities(request.getImpactSeverities())
.statuses(request.getStatuses())
.resolutions(request.getResolutions())
.issueStatuses(request.getIssueStatuses())
.resolved(request.getResolved())
.prioritizedRule(request.getPrioritizedRule())
.rules(ruleDtos)
.ruleUuids(ruleUuids)
.assigneeUuids(request.getAssigneeUuids())
.authors(request.getAuthors())
.scopes(request.getScopes())
.languages(request.getLanguages())
.tags(request.getTags())
.types(request.getTypes())
.pciDss32(request.getPciDss32())
.pciDss40(request.getPciDss40())
.owaspAsvs40(request.getOwaspAsvs40())
.owaspAsvsLevel(request.getOwaspAsvsLevel())
.owaspTop10(request.getOwaspTop10())
.owaspTop10For2021(request.getOwaspTop10For2021())
.stigAsdR5V3(request.getStigAsdV5R3())
.casa(request.getCasa())
.sansTop25(request.getSansTop25())
.cwe(request.getCwe())
.sonarsourceSecurity(request.getSonarsourceSecurity())
.assigned(request.getAssigned())
.createdAt(parseStartingDateOrDateTime(request.getCreatedAt(), timeZone))
.createdBefore(parseEndingDateOrDateTime(request.getCreatedBefore(), timeZone))
.facetMode(request.getFacetMode())
.timeZone(timeZone)
.codeVariants(request.getCodeVariants());
List<ComponentDto> allComponents = new ArrayList<>();
boolean effectiveOnComponentOnly = mergeDeprecatedComponentParameters(dbSession, request, allComponents);
addComponentParameters(builder, dbSession, effectiveOnComponentOnly, allComponents, request);
setCreatedAfterFromRequest(dbSession, builder, request, allComponents, timeZone);
String sort = request.getSort();
if (!isNullOrEmpty(sort)) {
builder.sort(sort);
builder.asc(request.getAsc());
}
return builder.build();
}
}
|
@Test
public void fail_if_components_and_components_uuid_params_are_set_at_the_same_time() {
SearchRequest request = new SearchRequest()
.setComponentKeys(singletonList("foo"))
.setComponentUuids(singletonList("bar"));
assertThatThrownBy(() -> underTest.create(request))
.isInstanceOf(IllegalArgumentException.class)
.hasMessageContaining("At most one of the following parameters can be provided: components and componentUuids");
}
|
public static <V> TimestampedValue<V> of(V value, Instant timestamp) {
return new TimestampedValue<>(value, timestamp);
}
|
@Test
public void testCoderEncodeDecodeEquals() throws Exception {
CoderProperties.coderDecodeEncodeEqual(
CODER, TimestampedValue.of(GlobalWindow.INSTANCE, Instant.now()));
}
|
@Override
public String serialize(SarifSchema210 sarif210) {
try {
return mapper
.writerWithDefaultPrettyPrinter()
.writeValueAsString(sarif210);
} catch (JsonProcessingException e) {
throw new IllegalStateException("Unable to serialize SARIF", e);
}
}
|
@Test
public void serialize() {
new Run().withResults(List.of());
SarifSchema210 sarif210 = new SarifSchema210()
.with$schema(URI.create("http://json.schemastore.org/sarif-2.1.0-rtm.4"))
.withVersion(SarifSchema210.Version._2_1_0)
.withRuns(List.of(new Run().withResults(List.of())));
String result = serializer.serialize(sarif210);
JsonAssert.assertJson(result).isSimilarTo(SARIF_JSON);
}
|
public Cookie decode(String header) {
final int headerLen = checkNotNull(header, "header").length();
if (headerLen == 0) {
return null;
}
CookieBuilder cookieBuilder = null;
loop: for (int i = 0;;) {
// Skip spaces and separators.
for (;;) {
if (i == headerLen) {
break loop;
}
char c = header.charAt(i);
if (c == ',') {
// Having multiple cookies in a single Set-Cookie header is
// deprecated, modern browsers only parse the first one
break loop;
} else if (c == '\t' || c == '\n' || c == 0x0b || c == '\f'
|| c == '\r' || c == ' ' || c == ';') {
i++;
continue;
}
break;
}
int nameBegin = i;
int nameEnd;
int valueBegin;
int valueEnd;
for (;;) {
char curChar = header.charAt(i);
if (curChar == ';') {
// NAME; (no value till ';')
nameEnd = i;
valueBegin = valueEnd = -1;
break;
} else if (curChar == '=') {
// NAME=VALUE
nameEnd = i;
i++;
if (i == headerLen) {
// NAME= (empty value, i.e. nothing after '=')
valueBegin = valueEnd = 0;
break;
}
valueBegin = i;
// NAME=VALUE;
int semiPos = header.indexOf(';', i);
valueEnd = i = semiPos > 0 ? semiPos : headerLen;
break;
} else {
i++;
}
if (i == headerLen) {
// NAME (no value till the end of string)
nameEnd = headerLen;
valueBegin = valueEnd = -1;
break;
}
}
if (valueEnd > 0 && header.charAt(valueEnd - 1) == ',') {
// old multiple cookies separator, skipping it
valueEnd--;
}
if (cookieBuilder == null) {
// cookie name-value pair
DefaultCookie cookie = initCookie(header, nameBegin, nameEnd, valueBegin, valueEnd);
if (cookie == null) {
return null;
}
cookieBuilder = new CookieBuilder(cookie, header);
} else {
// cookie attribute
cookieBuilder.appendAttribute(nameBegin, nameEnd, valueBegin, valueEnd);
}
}
return cookieBuilder != null ? cookieBuilder.cookie() : null;
}
|
@Test
public void testDecodingSingleCookieV1() {
String cookieString = "myCookie=myValue;max-age=50;path=/apathsomewhere;domain=.adomainsomewhere"
+ ";secure;comment=this is a comment;version=1;";
Cookie cookie = ClientCookieDecoder.STRICT.decode(cookieString);
assertEquals("myValue", cookie.value());
assertNotNull(cookie);
assertEquals(".adomainsomewhere", cookie.domain());
assertEquals(50, cookie.maxAge());
assertEquals("/apathsomewhere", cookie.path());
assertTrue(cookie.isSecure());
}
|
public static void validateKerberosPrincipal(
KerberosPrincipal kerberosPrincipal) throws IOException {
if (!StringUtils.isEmpty(kerberosPrincipal.getPrincipalName())) {
if (!kerberosPrincipal.getPrincipalName().contains("/")) {
throw new IllegalArgumentException(String.format(
RestApiErrorMessages.ERROR_KERBEROS_PRINCIPAL_NAME_FORMAT,
kerberosPrincipal.getPrincipalName()));
}
}
}
|
@Test
public void testKerberosPrincipalNameFormat() throws IOException {
Service app = createValidApplication("comp-a");
KerberosPrincipal kp = new KerberosPrincipal();
kp.setPrincipalName("[email protected]");
app.setKerberosPrincipal(kp);
try {
ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal());
Assert.fail(EXCEPTION_PREFIX + "service with invalid principal name " +
"format.");
} catch (IllegalArgumentException e) {
assertEquals(
String.format(
RestApiErrorMessages.ERROR_KERBEROS_PRINCIPAL_NAME_FORMAT,
kp.getPrincipalName()),
e.getMessage());
}
kp.setPrincipalName("user/[email protected]");
try {
ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal());
} catch (IllegalArgumentException e) {
Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
}
kp.setPrincipalName(null);
kp.setKeytab(null);
try {
ServiceApiUtil.validateKerberosPrincipal(app.getKerberosPrincipal());
} catch (NullPointerException e) {
Assert.fail(NO_EXCEPTION_PREFIX + e.getMessage());
}
}
|
@Override
public Set<String> listTopicNames() {
try {
return ExecutorUtil.executeWithRetries(
() -> adminClient.get().listTopics().names().get(),
ExecutorUtil.RetryBehaviour.ON_RETRYABLE);
} catch (final Exception e) {
throw new KafkaResponseGetFailedException("Failed to retrieve Kafka Topic names", e);
}
}
|
@Test
public void shouldRetryListTopics() {
// When:
givenTopicExists("topic1", 1, 1);
givenTopicExists("topic2", 1, 2);
when(adminClient.listTopics())
.thenAnswer(listTopicResult(new NotControllerException("Not Controller")))
.thenAnswer(listTopicResult());
// When:
kafkaTopicClient.listTopicNames();
// Then:
verify(adminClient, times(2)).listTopics();
}
|
void serialize(OutputStream out, TransportSecurityOptions options) {
try {
mapper.writerWithDefaultPrettyPrinter().writeValue(out, toTransportSecurityOptionsEntity(options));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
|
@Test
void can_serialize_options_without_authorized_peers() throws IOException {
TransportSecurityOptions options = new TransportSecurityOptions.Builder()
.withCertificates(Paths.get("certs.pem"), Paths.get("myhost.key"))
.withCaCertificates(Paths.get("my_cas.pem"))
.withAcceptedCiphers(List.of("TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_AES_256_GCM_SHA384"))
.withAcceptedProtocols(List.of("TLSv1.2"))
.withHostnameValidationDisabled(true)
.build();
File outputFile = File.createTempFile("junit", null, tempDirectory);
try (OutputStream out = Files.newOutputStream(outputFile.toPath())) {
new TransportSecurityOptionsJsonSerializer().serialize(out, options);
}
String expectedOutput = new String(Files.readAllBytes(TEST_CONFIG_FILE));
String actualOutput = new String(Files.readAllBytes(outputFile.toPath()));
assertJsonEquals(expectedOutput, actualOutput);
}
|
public static SuccessResponse<?> noContent() {
return SuccessResponse.builder().data(Map.of()).build();
}
|
@Test
@DisplayName("SuccessResponse.from() - data가 존재하는 성공 응답")
public void successResponseWithNoContent() {
// When
SuccessResponse<?> response = SuccessResponse.noContent();
// Then
assertEquals("2000", response.getCode());
assertEquals(Map.of(), response.getData());
}
|
public PDPageLabels getPageLabels() throws IOException
{
COSDictionary dict = root.getCOSDictionary(COSName.PAGE_LABELS);
return dict == null ? null : new PDPageLabels(document, dict);
}
|
@Test
void retrievePageLabelsOnMalformedPdf() throws IOException
{
try (PDDocument doc = Loader
.loadPDF(RandomAccessReadBuffer.createBufferFromStream(
TestPDDocumentCatalog.class.getResourceAsStream("badpagelabels.pdf"))))
{
PDDocumentCatalog cat = doc.getDocumentCatalog();
// getLabelsByPageIndices() should not throw an exception
cat.getPageLabels().getLabelsByPageIndices();
}
}
|
public static Impl join(By clause) {
return new Impl(new JoinArguments(clause));
}
|
@Test
@Category(NeedsRunner.class)
public void testMismatchingKeys() {
PCollection<Row> pc1 =
pipeline
.apply(
"Create1",
Create.of(Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build()))
.setRowSchema(CG_SCHEMA_1);
PCollection<Row> pc2 =
pipeline
.apply(
"Create2",
Create.of(Row.withSchema(CG_SCHEMA_1).addValues("user1", 9, "us").build()))
.setRowSchema(CG_SCHEMA_1);
thrown.expect(IllegalArgumentException.class);
PCollectionTuple.of("pc1", pc1, "pc2", pc2)
.apply(
"CoGroup",
CoGroup.join("pc1", By.fieldNames("user")).join("pc2", By.fieldNames("count")));
pipeline.run();
}
|
public ArrayList<AnalysisResult<T>> getOutliers(Track<T> track) {
// the stream is wonky due to the raw type, probably could be improved
return track.points().stream()
.map(point -> analyzePoint(point, track))
.filter(analysisResult -> analysisResult.isOutlier())
.collect(toCollection(ArrayList::new));
}
|
@Test
public void testMissingAltitude_aka_modeCSwap_2() {
/*
* This is another mode C swap test.
*
* This test contains a much smaller change (from 1k to 0 instead of from 22k to 0).
* This test data also has a missing altitude value at the very front of the track.
*/
Track<NopHit> testTrack2 = createTrackFromResource(VerticalOutlierDetector.class, "AltitudeOutlier_2.txt");
Collection<AnalysisResult<NopHit>> outliers = (new VerticalOutlierDetector<NopHit>()).getOutliers(testTrack2);
confirmExactlyTheseOutliers(
outliers,
"[RH],AGW,ERI_B,09/20/2017,17:36:47.160,,,,1200,000,84,247,042.22935,-079.88024,106,,14.12,10.6,,,,ERI,,,,,???,,,,,4277,???,,00,,,1,,0,,95.17,90.78,{RH}",
"[RH],AGW,ERI_B,09/20/2017,17:39:00.896,,,,1200,000,87,246,042.20291,-079.94142,106,,11.19,9.45,,,,ERI,,,,,???,,,,,5502,???,,00,,,1,,0,,92.47,89.19,{RH}"
);
}
|
@Override
public Node upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener,
final TransferStatus status, final ConnectionCallback callback) throws BackgroundException {
final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency);
try {
final InputStream in;
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) {
in = new SDSTripleCryptEncryptorFeature(session, nodeid).encrypt(file, local.getInputStream(), status);
}
else {
in = local.getInputStream();
}
final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest()
.directS3Upload(true)
.timestampModification(status.getModified() != null ? new DateTime(status.getModified()) : null)
.timestampCreation(status.getCreated() != null ? new DateTime(status.getCreated()) : null)
.size(TransferStatus.UNKNOWN_LENGTH == status.getLength() ? null : status.getLength())
.parentId(Long.parseLong(nodeid.getVersionId(file.getParent())))
.name(file.getName());
final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient())
.createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY);
if(log.isDebugEnabled()) {
log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse));
}
final Map<Integer, TransferStatus> etags = new HashMap<>();
final List<PresignedUrl> presignedUrls = this.retrievePresignedUrls(createFileUploadResponse, status);
final List<Future<TransferStatus>> parts = new ArrayList<>();
try {
final String random = new UUIDRandomStringService().random();
// Full size of file
final long size = status.getLength() + status.getOffset();
long offset = 0;
long remaining = status.getLength();
for(int partNumber = 1; remaining >= 0; partNumber++) {
final long length = Math.min(Math.max((size / (MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining);
final PresignedUrl presignedUrl = presignedUrls.get(partNumber - 1);
if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) {
final Local temporary = temp.create(String.format("%s-%d", random, partNumber));
if(log.isDebugEnabled()) {
log.debug(String.format("Encrypted contents for part %d to %s", partNumber, temporary));
}
final FileBuffer buffer = new FileBuffer(temporary);
new StreamCopier(status, StreamProgress.noop).withAutoclose(false).withLimit(length)
.transfer(in, new BufferOutputStream(buffer));
parts.add(this.submit(pool, file, temporary, buffer, throttle, listener, status,
presignedUrl.getUrl(), presignedUrl.getPartNumber(), 0L, length, callback));
}
else {
parts.add(this.submit(pool, file, local, Buffer.noop, throttle, listener, status,
presignedUrl.getUrl(), presignedUrl.getPartNumber(), offset, length, callback));
}
remaining -= length;
offset += length;
if(0L == remaining) {
break;
}
}
}
finally {
in.close();
}
Interruptibles.awaitAll(parts)
.forEach(part -> etags.put(part.getPart(), part));
final CompleteS3FileUploadRequest completeS3FileUploadRequest = new CompleteS3FileUploadRequest()
.keepShareLinks(new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep"))
.resolutionStrategy(CompleteS3FileUploadRequest.ResolutionStrategyEnum.OVERWRITE);
if(status.getFilekey() != null) {
final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class);
final FileKey fileKey = reader.readValue(status.getFilekey().array());
final EncryptedFileKey encryptFileKey = Crypto.encryptFileKey(
TripleCryptConverter.toCryptoPlainFileKey(fileKey),
TripleCryptConverter.toCryptoUserPublicKey(session.keyPair().getPublicKeyContainer())
);
completeS3FileUploadRequest.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptFileKey));
}
etags.forEach((key, value) -> completeS3FileUploadRequest.addPartsItem(
new S3FileUploadPart().partEtag(value.getChecksum().hash).partNumber(key)));
if(log.isDebugEnabled()) {
log.debug(String.format("Complete file upload with %s for %s", completeS3FileUploadRequest, file));
}
new NodesApi(session.getClient()).completeS3FileUpload(completeS3FileUploadRequest, createFileUploadResponse.getUploadId(), StringUtils.EMPTY);
// Polling
return new SDSUploadService(session, nodeid).await(file, status, createFileUploadResponse.getUploadId()).getNode();
}
catch(CryptoSystemException | InvalidFileKeyException | InvalidKeyPairException | UnknownVersionException e) {
throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file);
}
catch(ApiException e) {
throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file);
}
catch(IOException e) {
throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file);
}
finally {
temp.shutdown();
// Cancel future tasks
pool.shutdown(false);
}
}
|
@Test
public void testUploadBelowMultipartSize() throws Exception {
final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session);
final SDSDirectS3UploadFeature feature = new SDSDirectS3UploadFeature(session, nodeid, new SDSDelegatingWriteFeature(session, nodeid, new SDSDirectS3WriteFeature(session, nodeid)));
final Path room = new SDSDirectoryFeature(session, nodeid).mkdir(
new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus());
final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file));
final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString());
final byte[] random = RandomUtils.nextBytes(578);
final OutputStream out = local.getOutputStream(false);
IOUtils.write(random, out);
out.close();
final TransferStatus status = new TransferStatus();
status.setLength(random.length);
final Node node = feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED),
new DisabledStreamListener(), status, new DisabledLoginCallback());
assertTrue(new SDSFindFeature(session, nodeid).find(test));
final PathAttributes attributes = new SDSAttributesFinderFeature(session, nodeid).find(test);
assertEquals(random.length, attributes.getSize());
assertEquals(new SDSAttributesAdapter(session).toAttributes(node), attributes);
new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback());
local.delete();
}
|
@Override
public Result apply(PathData item, int depth) throws IOException {
String name = getPath(item).getName();
if (!caseSensitive) {
name = StringUtils.toLowerCase(name);
}
if (globPattern.matches(name)) {
return Result.PASS;
} else {
return Result.FAIL;
}
}
|
@Test
public void applyMixedCase() throws IOException {
setup("name");
PathData item = new PathData("/directory/path/NaMe", mockFs.getConf());
assertEquals(Result.FAIL, name.apply(item, -1));
}
|
@Override
public void doAlarm(List<AlarmMessage> alarmMessages) throws Exception {
Map<String, WeLinkSettings> settingsMap = alarmRulesWatcher.getWeLinkSettings();
if (settingsMap == null || settingsMap.isEmpty()) {
return;
}
Map<String, List<AlarmMessage>> groupedMessages = groupMessagesByHook(alarmMessages);
for (Map.Entry<String, List<AlarmMessage>> entry : groupedMessages.entrySet()) {
var hookName = entry.getKey();
var messages = entry.getValue();
var setting = settingsMap.get(hookName);
if (setting == null || CollectionUtils.isEmpty(setting.getWebhooks()) || CollectionUtils.isEmpty(
messages)) {
continue;
}
for (final var webHookUrl : setting.getWebhooks()) {
final var accessToken = getAccessToken(webHookUrl);
for (final var alarmMessage : messages) {
final var content = String.format(
setting.getTextTemplate(),
alarmMessage.getAlarmMessage()
);
sendAlarmMessage(webHookUrl, accessToken, content);
}
}
}
}
|
@Test
public void testWeLinkDoAlarm() throws Exception {
List<WeLinkSettings.WebHookUrl> webHooks = new ArrayList<>();
webHooks.add(new WeLinkSettings.WebHookUrl("clientId", "clientSecret",
"http://127.0.0.1:" + SERVER.httpPort() + "/welinkhook/api/auth/v2/tickets",
"http://127.0.0.1:" + SERVER.httpPort() + "/welinkhook/api/welinkim/v1/im-service/chat/group-chat",
"robotName", "1,2,3"
));
Rules rules = new Rules();
String template = "Apache SkyWalking Alarm: \n %s.";
WeLinkSettings setting1 = new WeLinkSettings("setting1", AlarmHooksType.welink, true);
setting1.setWebhooks(webHooks);
setting1.setTextTemplate(template);
WeLinkSettings setting2 = new WeLinkSettings("setting2", AlarmHooksType.welink, false);
setting2.setWebhooks(webHooks);
setting2.setTextTemplate(template);
rules.getWeLinkSettingsMap().put(setting1.getFormattedName(), setting1);
rules.getWeLinkSettingsMap().put(setting2.getFormattedName(), setting2);
AlarmRulesWatcher alarmRulesWatcher = new AlarmRulesWatcher(rules, null);
WeLinkHookCallback welinkHookCallback = new WeLinkHookCallback(alarmRulesWatcher);
List<AlarmMessage> alarmMessages = new ArrayList<>(2);
AlarmMessage alarmMessage = new AlarmMessage();
alarmMessage.setScopeId(DefaultScopeDefine.SERVICE);
alarmMessage.setRuleName("service_resp_time_rule");
alarmMessage.setAlarmMessage("alarmMessage with [DefaultScopeDefine.All]");
alarmMessage.getHooks().add(setting1.getFormattedName());
alarmMessages.add(alarmMessage);
AlarmMessage anotherAlarmMessage = new AlarmMessage();
anotherAlarmMessage.setRuleName("service_resp_time_rule_2");
anotherAlarmMessage.setScopeId(DefaultScopeDefine.ENDPOINT);
anotherAlarmMessage.setAlarmMessage("anotherAlarmMessage with [DefaultScopeDefine.Endpoint]");
anotherAlarmMessage.getHooks().add(setting2.getFormattedName());
alarmMessages.add(anotherAlarmMessage);
welinkHookCallback.doAlarm(alarmMessages);
Assertions.assertTrue(IS_SUCCESS.get());
}
|
@Nullable
public Integer getIntValue(@IntFormat final int formatType,
@IntRange(from = 0) final int offset) {
if ((offset + getTypeLen(formatType)) > size()) return null;
return switch (formatType) {
case FORMAT_UINT8 -> unsignedByteToInt(mValue[offset]);
case FORMAT_UINT16_LE -> unsignedBytesToInt(mValue[offset], mValue[offset + 1]);
case FORMAT_UINT16_BE -> unsignedBytesToInt(mValue[offset + 1], mValue[offset]);
case FORMAT_UINT24_LE -> unsignedBytesToInt(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
(byte) 0
);
case FORMAT_UINT24_BE -> unsignedBytesToInt(
mValue[offset + 2],
mValue[offset + 1],
mValue[offset],
(byte) 0
);
case FORMAT_UINT32_LE -> unsignedBytesToInt(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
mValue[offset + 3]
);
case FORMAT_UINT32_BE -> unsignedBytesToInt(
mValue[offset + 3],
mValue[offset + 2],
mValue[offset + 1],
mValue[offset]
);
case FORMAT_SINT8 -> unsignedToSigned(unsignedByteToInt(mValue[offset]), 8);
case FORMAT_SINT16_LE -> unsignedToSigned(unsignedBytesToInt(mValue[offset],
mValue[offset + 1]), 16);
case FORMAT_SINT16_BE -> unsignedToSigned(unsignedBytesToInt(mValue[offset + 1],
mValue[offset]), 16);
case FORMAT_SINT24_LE -> unsignedToSigned(unsignedBytesToInt(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
(byte) 0
), 24);
case FORMAT_SINT24_BE -> unsignedToSigned(unsignedBytesToInt(
(byte) 0,
mValue[offset + 2],
mValue[offset + 1],
mValue[offset]
), 24);
case FORMAT_SINT32_LE -> unsignedToSigned(unsignedBytesToInt(
mValue[offset],
mValue[offset + 1],
mValue[offset + 2],
mValue[offset + 3]
), 32);
case FORMAT_SINT32_BE -> unsignedToSigned(unsignedBytesToInt(
mValue[offset + 3],
mValue[offset + 2],
mValue[offset + 1],
mValue[offset]
), 32);
default -> null;
};
}
|
@Test
public void getValue_SINT16() {
final Data data = new Data(new byte[] { (byte) 0xD0, (byte) 0xE7 });
final int value = data.getIntValue(Data.FORMAT_SINT16_LE, 0);
assertEquals(-6192, value);
}
|
void writeInformationsDetails() throws DocumentException, IOException {
for (final JavaInformations javaInformations : javaInformationsList) {
currentTable = createJavaInformationsTable();
writeSummary(javaInformations);
writeDetails(javaInformations);
addToDocument(currentTable);
}
}
|
@Test
public void testTomcatInformations() throws Exception {
final ByteArrayOutputStream output = new ByteArrayOutputStream();
final PdfDocumentFactory pdfDocumentFactory = new PdfDocumentFactory(TEST_APP, null,
output);
final MBeanServer mBeanServer = MBeans.getPlatformMBeanServer();
final List<ObjectName> mBeans = new ArrayList<>();
try {
mBeans.add(mBeanServer
.registerMBean(new ThreadPool(),
new ObjectName("Catalina:type=ThreadPool,name=jk-8009"))
.getObjectName());
mBeans.add(
mBeanServer
.registerMBean(new GlobalRequestProcessor(),
new ObjectName(
"Catalina:type=GlobalRequestProcessor,name=jk-8009"))
.getObjectName());
TomcatInformations.initMBeans();
final List<JavaInformations> myJavaInformationsList = List
.of(new JavaInformations(null, true));
final Document document = pdfDocumentFactory.createDocument();
document.open();
final PdfJavaInformationsReport pdfReport = new PdfJavaInformationsReport(
myJavaInformationsList, document);
pdfReport.writeInformationsDetails();
document.close();
assertNotEmptyAndClear(output);
mBeans.add(mBeanServer
.registerMBean(new ThreadPool(),
new ObjectName("Catalina:type=ThreadPool,name=jk-8010"))
.getObjectName());
final GlobalRequestProcessor jk8010 = new GlobalRequestProcessor();
jk8010.setrequestCount(0);
mBeans.add(
mBeanServer
.registerMBean(jk8010,
new ObjectName(
"Catalina:type=GlobalRequestProcessor,name=jk-8010"))
.getObjectName());
TomcatInformations.initMBeans();
final List<JavaInformations> myJavaInformationsList2 = List
.of(new JavaInformations(null, true));
final Document document2 = pdfDocumentFactory.createDocument();
document2.open();
final PdfJavaInformationsReport pdfReport2 = new PdfJavaInformationsReport(
myJavaInformationsList2, document2);
pdfReport2.writeInformationsDetails();
document2.close();
assertNotEmptyAndClear(output);
jk8010.setrequestCount(1000);
final List<JavaInformations> myJavaInformationsList3 = List
.of(new JavaInformations(null, true));
final Document document3 = pdfDocumentFactory.createDocument();
document3.open();
final PdfJavaInformationsReport pdfReport3 = new PdfJavaInformationsReport(
myJavaInformationsList3, document3);
pdfReport3.writeInformationsDetails();
document3.close();
assertNotEmptyAndClear(output);
} finally {
for (final ObjectName registeredMBean : mBeans) {
mBeanServer.unregisterMBean(registeredMBean);
}
TomcatInformations.initMBeans();
}
}
|
public RowExpression extract(PlanNode node)
{
return node.accept(new Visitor(domainTranslator, functionAndTypeManager), null);
}
|
@Test
public void testLeftJoinWithFalseInner()
{
List<EquiJoinClause> criteria = ImmutableList.of(new EquiJoinClause(AV, DV));
Map<VariableReferenceExpression, ColumnHandle> leftAssignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(AV, BV, CV)));
TableScanNode leftScan = tableScanNode(leftAssignments);
Map<VariableReferenceExpression, ColumnHandle> rightAssignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(DV, EV, FV)));
TableScanNode rightScan = tableScanNode(rightAssignments);
FilterNode left = filter(leftScan,
and(
lessThan(BV, AV),
lessThan(CV, bigintLiteral(10)),
equals(GV, bigintLiteral(10))));
FilterNode right = filter(rightScan, FALSE_CONSTANT);
PlanNode node = new JoinNode(
Optional.empty(),
newId(),
JoinType.LEFT,
left,
right,
criteria,
ImmutableList.<VariableReferenceExpression>builder()
.addAll(left.getOutputVariables())
.addAll(right.getOutputVariables())
.build(),
Optional.empty(),
Optional.empty(),
Optional.empty(),
Optional.empty(),
ImmutableMap.of());
RowExpression effectivePredicate = effectivePredicateExtractor.extract(node);
// False literal on the right side should be ignored
assertEquals(normalizeConjuncts(effectivePredicate),
normalizeConjuncts(lessThan(BV, AV),
lessThan(CV, bigintLiteral(10)),
or(equals(AV, DV), isNull(DV))));
}
|
Map<ServiceId, HealthEndpoint> extractHealthEndpoints(ApplicationInfo application) {
Map<ServiceId, HealthEndpoint> endpoints = new HashMap<>();
for (HostInfo hostInfo : application.getModel().getHosts()) {
DomainName hostname = DomainName.of(hostInfo.getHostname());
for (ServiceInfo serviceInfo : hostInfo.getServices()) {
ServiceId serviceId = ApplicationInstanceGenerator.getServiceId(application, serviceInfo);
for (PortInfo portInfo : serviceInfo.getPorts()) {
if (portTaggedWith(portInfo, HTTP_HEALTH_PORT_TAGS)) {
StateV1HealthEndpoint endpoint = new StateV1HealthEndpoint(
serviceId,
hostname,
portInfo.getPort(),
targetHealthStaleness,
requestTimeout,
connectionKeepAlive,
executor);
endpoints.put(serviceId, endpoint);
break; // Avoid >1 endpoints per serviceId
}
}
}
}
return endpoints;
}
|
@Test
public void test() {
Map<ServiceId, HealthEndpoint> endpoints = model.extractHealthEndpoints(proxyHostApplicationInfo);
assertEquals(2, endpoints.size());
ApplicationId applicationId = ApplicationId.from("hosted-vespa", "proxy-host", "default");
ClusterId clusterId = new ClusterId("proxy-host");
ServiceId hostAdmin1 = new ServiceId(applicationId, clusterId, ServiceType.HOST_ADMIN, new ConfigId("proxy-host/host1"));
ServiceId hostAdmin2 = new ServiceId(applicationId, clusterId, ServiceType.HOST_ADMIN, new ConfigId("proxy-host/host2"));
HealthEndpoint endpoint1 = endpoints.get(hostAdmin1);
assertNotNull(endpoint1);
assertEquals("http://host1:8080/state/v1/health", endpoint1.description());
HealthEndpoint endpoint2 = endpoints.get(hostAdmin2);
assertNotNull(endpoint2);
assertEquals("http://host2:8080/state/v1/health", endpoint2.description());
Cancellable cancellable = mock(Cancellable.class);
when(executor.scheduleWithFixedDelay(any(), any())).thenReturn(cancellable);
try (HealthMonitor healthMonitor = endpoint1.startMonitoring()) {
assertEquals(ServiceStatus.UNKNOWN, healthMonitor.getStatus().serviceStatus());
}
}
|
@Override
public <T> ModelEnforcement<T> forBundle(
CommittedBundle<T> input, AppliedPTransform<?, ?, ?> consumer) {
if (isReadTransform(consumer)) {
return NoopReadEnforcement.INSTANCE;
}
return new ImmutabilityCheckingEnforcement<>(input, consumer);
}
|
@Test
public void unchangedSucceeds() {
WindowedValue<byte[]> element = WindowedValue.valueInGlobalWindow("bar".getBytes(UTF_8));
CommittedBundle<byte[]> elements =
bundleFactory.createBundle(pcollection).add(element).commit(Instant.now());
ModelEnforcement<byte[]> enforcement = factory.forBundle(elements, consumer);
enforcement.beforeElement(element);
enforcement.afterElement(element);
enforcement.afterFinish(
elements,
StepTransformResult.<byte[]>withoutHold(consumer).build(),
Collections.emptyList());
}
|
static SVNClientManager newSvnClientManager(SvnConfiguration configuration) {
ISVNOptions options = SVNWCUtil.createDefaultOptions(true);
final char[] passwordValue = getCharsOrNull(configuration.password());
final char[] passPhraseValue = getCharsOrNull(configuration.passPhrase());
ISVNAuthenticationManager authManager = SVNWCUtil.createDefaultAuthenticationManager(
null,
configuration.username(),
passwordValue,
configuration.privateKey(),
passPhraseValue,
false);
return SVNClientManager.newInstance(options, authManager);
}
|
@Test
public void newSvnClientManager_whenPasswordNotConfigured_shouldNotReturnNull() {
assertThat(config.password()).isNull();
assertThat(config.passPhrase()).isNull();
assertThat(newSvnClientManager(config)).isNotNull();
}
|
@Override
public String description() {
return "ChronoZonedDateTime.timeLineOrder()";
}
|
@Test
void should_have_description() {
assertThat(comparator.description()).isEqualTo("ChronoZonedDateTime.timeLineOrder()");
}
|
@Override
public Dataset<Row> apply(
final JavaSparkContext jsc,
final SparkSession sparkSession,
final Dataset<Row> rowDataset,
final TypedProperties props) {
final String sqlFile = getStringWithAltKeys(props, SqlTransformerConfig.TRANSFORMER_SQL_FILE);
final FileSystem fs = HadoopFSUtils.getFs(sqlFile, jsc.hadoopConfiguration(), true);
// tmp table name doesn't like dashes
final String tmpTable = TMP_TABLE.concat(UUID.randomUUID().toString().replace("-", "_"));
LOG.info("Registering tmp table: {}", tmpTable);
rowDataset.createOrReplaceTempView(tmpTable);
try (final Scanner scanner = new Scanner(fs.open(new Path(sqlFile)), "UTF-8")) {
Dataset<Row> rows = null;
// each sql statement is separated with semicolon hence set that as delimiter.
scanner.useDelimiter(";");
LOG.info("SQL Query for transformation:");
while (scanner.hasNext()) {
String sqlStr = scanner.next();
sqlStr = sqlStr.replaceAll(SRC_PATTERN, tmpTable).trim();
if (!sqlStr.isEmpty()) {
LOG.info(sqlStr);
// overwrite the same dataset object until the last statement then return.
rows = sparkSession.sql(sqlStr);
}
}
return rows;
} catch (final IOException ioe) {
throw new HoodieTransformExecutionException("Error reading transformer SQL file.", ioe);
} finally {
sparkSession.catalog().dropTempView(tmpTable);
}
}
|
@Test
public void testSqlFileBasedTransformerIncorrectConfig() {
// Test if the class throws hoodie IO exception correctly when given a incorrect config.
props.setProperty(
"hoodie.streamer.transformer.sql.file",
UtilitiesTestBase.basePath + "/non-exist-sql-file.sql");
assertThrows(
HoodieTransformException.class,
() -> sqlFileTransformer.apply(jsc, sparkSession, inputDatasetRows, props));
}
|
public static void checkState(boolean isValid, String message) throws IllegalStateException {
if (!isValid) {
throw new IllegalStateException(message);
}
}
|
@Test
public void testCheckStateWithOneArgument() {
try {
Preconditions.checkState(true, "Test message %s", 12);
} catch (IllegalStateException e) {
Assert.fail("Should not throw exception when isValid is true");
}
try {
Preconditions.checkState(false, "Test message %s", 12);
Assert.fail("Should throw exception when isValid is false");
} catch (IllegalStateException e) {
Assert.assertEquals("Should format message", "Test message 12", e.getMessage());
}
}
|
@Override
public MaterializedTable nonWindowed() {
return new KsqlMaterializedTable(inner.nonWindowed());
}
|
@Test
public void shouldCallInnerNonWindowedWithCorrectParamsOnGet() {
// Given:
final MaterializedTable table = materialization.nonWindowed();
givenNoopFilter();
// When:
table.get(aKey, partition);
// Then:
verify(innerNonWindowed).get(aKey, partition, Optional.empty());
}
|
@Override
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
try {
Deadline.registerIfNot(socketTimeout);
boolean isTimerStarted = Deadline.startTimer(method.getName());
try {
return method.invoke(base, args);
} finally {
if (isTimerStarted) {
Deadline.stopTimer();
}
}
} catch (UndeclaredThrowableException e) {
throw e.getCause();
} catch (InvocationTargetException e) {
throw e.getCause();
}
}
|
@Test
public void testExceptionDispatch() throws Throwable {
Configuration conf = MetastoreConf.newMetastoreConf();
MetastoreConf.setTimeVar(conf, MetastoreConf.ConfVars.CLIENT_SOCKET_TIMEOUT, 10,
TimeUnit.MILLISECONDS);
RawStoreProxy rsp = new RawStoreProxy(conf, conf, TestStore.class);
try {
rsp.invoke(null, TestStore.class.getMethod("exceptions"), new Object[] {});
fail("an exception is expected");
} catch (IllegalStateException ise) {
// expected
}
Thread.sleep(20);
// this shouldn't throw an exception
rsp.invoke(null, TestStore.class.getMethod("noopMethod"), new Object[] {});
}
|
@Override
public GetTelemetrySubscriptionsResponse getErrorResponse(int throttleTimeMs, Throwable e) {
GetTelemetrySubscriptionsResponseData responseData = new GetTelemetrySubscriptionsResponseData()
.setErrorCode(Errors.forException(e).code())
.setThrottleTimeMs(throttleTimeMs);
return new GetTelemetrySubscriptionsResponse(responseData);
}
|
@Test
public void testGetErrorResponse() {
GetTelemetrySubscriptionsRequest req = new GetTelemetrySubscriptionsRequest(new GetTelemetrySubscriptionsRequestData(), (short) 0);
GetTelemetrySubscriptionsResponse response = req.getErrorResponse(0, Errors.CLUSTER_AUTHORIZATION_FAILED.exception());
assertEquals(Collections.singletonMap(Errors.CLUSTER_AUTHORIZATION_FAILED, 1), response.errorCounts());
}
|
@Udf
public String extractProtocol(
@UdfParameter(description = "a valid URL to extract a protocl from") final String input) {
return UrlParser.extract(input, URI::getScheme);
}
|
@Test
public void shouldReturnNullIfNoProtocol() {
assertThat(
extractUdf.extractProtocol("///current/ksql/docs/syntax-reference.html#scalar-functions"),
nullValue());
}
|
@Override
public Object handle(ProceedingJoinPoint proceedingJoinPoint,
TimeLimiter timeLimiter, String methodName) throws Throwable {
Object returnValue = proceedingJoinPoint.proceed();
if (Flux.class.isAssignableFrom(returnValue.getClass())) {
Flux<?> fluxReturnValue = (Flux<?>) returnValue;
return fluxReturnValue.transformDeferred(TimeLimiterOperator.of(timeLimiter));
} else if (Mono.class.isAssignableFrom(returnValue.getClass())) {
Mono<?> monoReturnValue = (Mono<?>) returnValue;
return monoReturnValue.transformDeferred(TimeLimiterOperator.of(timeLimiter));
} else {
throw new IllegalReturnTypeException(returnValue.getClass(), methodName,
"Reactor expects Mono/Flux.");
}
}
|
@Test
public void shouldThrowIllegalArgumentExceptionWithNotReactorType() throws Throwable{
TimeLimiter timeLimiter = TimeLimiter.ofDefaults("test");
when(proceedingJoinPoint.proceed()).thenReturn("NOT REACTOR TYPE");
try {
reactorTimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod");
fail("exception missed");
} catch (Throwable e) {
assertThat(e).isInstanceOf(IllegalReturnTypeException.class)
.hasMessage(
"java.lang.String testMethod has unsupported by @TimeLimiter return type. Reactor expects Mono/Flux.");
}
}
|
public static BDBJournalCursor getJournalCursor(BDBEnvironment env, long fromKey, long toKey)
throws JournalException, JournalInconsistentException, InterruptedException {
return getJournalCursor(env, "", fromKey, toKey);
}
|
@Test(expected = JournalException.class)
public void testDatabaseNamesFails(@Mocked BDBEnvironment environment) throws Exception {
new Expectations(environment) {
{
environment.getDatabaseNamesWithPrefix("");
minTimes = 0;
result = null;
}
};
BDBJournalCursor.getJournalCursor(environment, 10, 10);
Assert.fail();
}
|
@Override
public void run() {
// We need to get this thread so it can be interrupted if the cached proposal has been invalidated.
_proposalPrecomputingSchedulerThread = Thread.currentThread();
LOG.info("Starting proposal candidate computation.");
while (!_shutdown && _numPrecomputingThreads > 0) {
LoadMonitorTaskRunner.LoadMonitorTaskRunnerState loadMonitorTaskRunnerState = _loadMonitor.taskRunnerState();
long sleepTimeMs = _proposalExpirationMs;
if (loadMonitorTaskRunnerState == LOADING || loadMonitorTaskRunnerState == BOOTSTRAPPING) {
LOG.info("Skipping proposal precomputing because load monitor is in " + loadMonitorTaskRunnerState + " state.");
// Check in {@value HALF_MINUTE_IN_MS} to see if the load monitor state has changed.
sleepTimeMs = HALF_MINUTE_IN_MS;
} else if (!_loadMonitor.meetCompletenessRequirements(_requirementsWithAvailableValidWindows)) {
LOG.info("Skipping proposal precomputing because load monitor does not have enough snapshots.");
// Check in {@value HALF_MINUTE_IN_MS} to see if the load monitor has sufficient number of snapshots.
sleepTimeMs = HALF_MINUTE_IN_MS;
} else {
try {
if (!validCachedProposal()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Invalidated cache. Model generation (cached: {}, current: {}).{}",
_cachedProposals == null ? null : _cachedProposals.modelGeneration(),
_loadMonitor.clusterModelGeneration(),
_cachedProposals == null ? "" : String.format(" Cached was excluding default topics: %s.",
_cachedProposals.excludedTopics()));
}
clearCachedProposal();
long start = System.nanoTime();
// Proposal precomputation runs with the default topics to exclude, and allows capacity estimation.
computeCachedProposal(_allowCapacityEstimationOnProposalPrecompute);
_proposalComputationTimer.update(System.nanoTime() - start, TimeUnit.NANOSECONDS);
} else {
LOG.debug("Skipping proposal precomputing because the cached proposal result is still valid. "
+ "Cached generation: {}", _cachedProposals.modelGeneration());
}
} catch (KafkaCruiseControlException e) {
// Check in {@value HALF_MINUTE_IN_MS} to see if the ongoing execution has finished.
sleepTimeMs = HALF_MINUTE_IN_MS;
LOG.debug("Skipping proposal precomputing because there is an ongoing execution.", e);
}
}
long deadline = _time.milliseconds() + sleepTimeMs;
if (!_shutdown && _time.milliseconds() < deadline) {
try {
Thread.sleep(deadline - _time.milliseconds());
} catch (InterruptedException e) {
// let it go.
}
}
}
}
|
@Test
public void testNoPreComputingThread() {
GoalOptimizer goalOptimizer = createGoalOptimizer();
// Should exit immediately.
goalOptimizer.run();
}
|
public synchronized Topology addSink(final String name,
final String topic,
final String... parentNames) {
internalTopologyBuilder.addSink(name, topic, null, null, null, parentNames);
return this;
}
|
@Test
public void shouldFailWithUnknownParent() {
assertThrows(TopologyException.class, () -> topology.addSink("sink", "topic-2", "source"));
}
|
@Override
public void onCreating(AbstractJob job) {
JobDetails jobDetails = job.getJobDetails();
Optional<Job> jobAnnotation = getJobAnnotation(jobDetails);
setJobName(job, jobAnnotation);
setAmountOfRetries(job, jobAnnotation);
setLabels(job, jobAnnotation);
}
|
@Test
void testLabelsIsUsedIfProvidedByAnnotation() {
Job job = anEnqueuedJob()
.withJobDetails(() -> testService.doWorkWithJobAnnotationAndLabels(3, "customer name"))
.build();
defaultJobFilter.onCreating(job);
assertThat(job).hasLabels(Set.of("label-3 - customer name"));
}
|
public static <OutputT> Coder<OutputT> inferCoder(
SingleStoreIO.RowMapper<OutputT> rowMapper,
CoderRegistry registry,
SchemaRegistry schemaRegistry,
Logger log) {
if (rowMapper instanceof SingleStoreIO.RowMapperWithCoder) {
try {
return ((SingleStoreIO.RowMapperWithCoder<OutputT>) rowMapper).getCoder();
} catch (Exception e) {
log.warn("Unable to infer a coder from RowMapper. Attempting to infer a coder from type.");
}
}
TypeDescriptor<OutputT> outputType =
TypeDescriptors.extractFromTypeParameters(
rowMapper,
SingleStoreIO.RowMapper.class,
new TypeDescriptors.TypeVariableExtractor<
SingleStoreIO.RowMapper<OutputT>, OutputT>() {});
try {
return schemaRegistry.getSchemaCoder(outputType);
} catch (NoSuchSchemaException e) {
log.warn(
"Unable to infer a schema for type {}. Attempting to infer a coder without a schema.",
outputType);
}
try {
return registry.getCoder(outputType);
} catch (CannotProvideCoderException e) {
throw new IllegalArgumentException(
String.format("Unable to infer a coder for type %s", outputType));
}
}
|
@Test
public void testInferCoderFromRowMapper() {
SchemaRegistry sr = SchemaRegistry.createDefault();
CoderRegistry cr = CoderRegistry.createDefault();
Coder<TestRow> c = SerializableCoder.of(TestRow.class);
assertEquals(c, SingleStoreUtil.inferCoder(new TestRowMapperWithCoder(), cr, sr, LOG));
}
|
public static Node build(final List<JoinInfo> joins) {
Node root = null;
for (final JoinInfo join : joins) {
if (root == null) {
root = new Leaf(join.getLeftSource());
}
if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) {
throw new KsqlException("Cannot perform circular join - both " + join.getRightSource()
+ " and " + join.getLeftJoinExpression()
+ " are already included in the current join tree: " + root.debugString(0));
} else if (root.containsSource(join.getLeftSource())) {
root = new Join(root, new Leaf(join.getRightSource()), join);
} else if (root.containsSource(join.getRightSource())) {
root = new Join(root, new Leaf(join.getLeftSource()), join.flip());
} else {
throw new KsqlException(
"Cannot build JOIN tree; neither source in the join is the FROM source or included "
+ "in a previous JOIN: " + join + ". The current join tree is "
+ root.debugString(0)
);
}
}
return root;
}
|
@Test
public void handlesBasicTwoWayJoin() {
// Given:
when(j1.getLeftSource()).thenReturn(a);
when(j1.getRightSource()).thenReturn(b);
final List<JoinInfo> joins = ImmutableList.of(j1);
// When:
final Node root = JoinTree.build(joins);
// Then:
assertThat(root, instanceOf(Join.class));
assertThat(((Join) root).getLeft(), is(new JoinTree.Leaf(a)));
assertThat(((Join) root).getRight(), is(new JoinTree.Leaf(b)));
assertThat(((Join) root).getInfo(), is(j1));
}
|
public static Builder custom() {
return new Builder();
}
|
@Test
public void shouldCreateAmountCutoff() {
HedgeConfig config = HedgeConfig.custom()
.averagePlusAmountDuration(200, false, 100).build();
HedgeDurationSupplier supplier = HedgeDurationSupplier.fromConfig(config);
then(((AverageDurationSupplier) supplier)).isInstanceOf(AverageDurationSupplier.class);
then(((AverageDurationSupplier) supplier).getFactor()).isEqualTo(200);
then(((AverageDurationSupplier) supplier).shouldMeasureErrors()).isFalse();
then(((AverageDurationSupplier) supplier).shouldUseFactorAsPercentage()).isFalse();
}
|
@Override
public SeekableByteChannel truncate(long size) throws IOException {
delegate.truncate(size);
return this;
}
|
@Test
public void testTruncate() throws IOException {
int newSize = 5;
channelUnderTest.truncate(newSize);
assertEquals(newSize, delegate.size());
assertEquals(newSize, channelUnderTest.size());
}
|
@Override
public void debug(String msg) {
logger.debug(msg);
}
|
@Test
void testDebugWithFormat2() {
jobRunrDashboardLogger.debug("Debug with {} {}", "format1", "format2");
verify(slfLogger).debug("Debug with {} {}", "format1", "format2");
}
|
@Override
public SarifSchema210 deserialize(Path reportPath) {
try {
return mapper
.enable(JsonParser.Feature.INCLUDE_SOURCE_IN_LOCATION)
.addHandler(new DeserializationProblemHandler() {
@Override
public Object handleInstantiationProblem(DeserializationContext ctxt, Class<?> instClass, Object argument, Throwable t) throws IOException {
if (!instClass.equals(SarifSchema210.Version.class)) {
return NOT_HANDLED;
}
throw new UnsupportedSarifVersionException(format(UNSUPPORTED_VERSION_MESSAGE_TEMPLATE, argument), t);
}
})
.readValue(reportPath.toFile(), SarifSchema210.class);
} catch (UnsupportedSarifVersionException e) {
throw new IllegalStateException(e.getMessage(), e);
} catch (JsonMappingException | JsonParseException e) {
throw new IllegalStateException(format(SARIF_JSON_SYNTAX_ERROR, reportPath), e);
} catch (IOException e) {
throw new IllegalStateException(format(SARIF_REPORT_ERROR, reportPath), e);
}
}
|
@Test
public void deserialize_whenFileIsNotUtf8encoded_shouldFail() throws URISyntaxException {
URL sarifResource = requireNonNull(getClass().getResource("sarif210-nonUtf8.json"));
Path sarif = Paths.get(sarifResource.toURI());
assertThatThrownBy(() -> serializer.deserialize(sarif))
.isInstanceOf(IllegalStateException.class)
.hasMessage(format("Failed to read SARIF report at '%s': invalid JSON syntax or file is not UTF-8 encoded", sarif));
}
|
static double toDouble(final JsonNode object) {
if (object instanceof NumericNode) {
return object.doubleValue();
}
if (object instanceof TextNode) {
try {
return Double.parseDouble(object.textValue());
} catch (final NumberFormatException e) {
throw failedStringCoercionException(SqlBaseType.DOUBLE);
}
}
throw invalidConversionException(object, SqlBaseType.DOUBLE);
}
|
@Test
public void shouldConvertStringToDoubleCorrectly() {
final Double d = JsonSerdeUtils.toDouble(JsonNodeFactory.instance.textNode("1.0"));
assertThat(d, equalTo(1.0));
}
|
public Future<Void> migrateFromDeploymentToStrimziPodSets(Deployment deployment, StrimziPodSet podSet) {
if (deployment == null) {
// Deployment does not exist anymore => no migration needed
return Future.succeededFuture();
} else {
int depReplicas = deployment.getSpec().getReplicas();
int podSetReplicas = podSet != null ? podSet.getSpec().getPods().size() : 0;
return moveOnePodFromDeploymentToStrimziPodSet(depReplicas - 1, Math.min(podSetReplicas + 1, connect.getReplicas()));
}
}
|
@Test
public void testMigrationToPodSetsWithRecreateStrategy(VertxTestContext context) {
DeploymentOperator mockDepOps = mock(DeploymentOperator.class);
StrimziPodSetOperator mockPodSetOps = mock(StrimziPodSetOperator.class);
PodOperator mockPodOps = mock(PodOperator.class);
LinkedList<String> events = mockKubernetes(mockDepOps, mockPodSetOps, mockPodOps);
KafkaConnect connect = new KafkaConnectBuilder(CONNECT)
.editSpec()
.withNewTemplate()
.withNewDeployment()
.withDeploymentStrategy(DeploymentStrategy.RECREATE)
.endDeployment()
.endTemplate()
.endSpec()
.build();
KafkaConnectCluster cluster = KafkaConnectCluster.fromCrd(RECONCILIATION, connect, VERSIONS, SHARED_ENV_PROVIDER);
KafkaConnectMigration migration = new KafkaConnectMigration(
RECONCILIATION,
cluster,
null,
null,
1_000L,
false,
null,
null,
null,
mockDepOps,
mockPodSetOps,
mockPodOps
);
Checkpoint async = context.checkpoint();
migration.migrateFromDeploymentToStrimziPodSets(
DEPLOYMENT,
null
).onComplete(context.succeeding(v -> context.verify(() -> {
assertThat(events.size(), is(11));
assertThat(events.poll(), is("DEP-SCALE-DOWN-TO-2"));
assertThat(events.poll(), is("DEP-READINESS-" + COMPONENT_NAME));
assertThat(events.poll(), is("POD-SET-RECONCILE-TO-1"));
assertThat(events.poll(), is("POD-READINESS-my-connect-connect-0"));
assertThat(events.poll(), is("DEP-SCALE-DOWN-TO-1"));
assertThat(events.poll(), is("DEP-READINESS-" + COMPONENT_NAME));
assertThat(events.poll(), is("POD-SET-RECONCILE-TO-2"));
assertThat(events.poll(), is("POD-READINESS-my-connect-connect-1"));
assertThat(events.poll(), is("DEP-DELETE-" + COMPONENT_NAME));
assertThat(events.poll(), is("POD-SET-RECONCILE-TO-3"));
assertThat(events.poll(), is("POD-READINESS-my-connect-connect-2"));
async.flag();
})));
}
|
@Bean
public WebSocketPlugin webSocketPlugin(final WebSocketClient webSocketClient, final WebSocketService webSocketService) {
return new WebSocketPlugin(webSocketClient, webSocketService);
}
|
@Test
public void testWebSocketPlugin() {
applicationContextRunner.run(context -> {
WebSocketPlugin plugin = context.getBean("webSocketPlugin", WebSocketPlugin.class);
assertNotNull(plugin);
assertThat(plugin.named()).isEqualTo(PluginEnum.WEB_SOCKET.getName());
}
);
}
|
@Override
public <V1, R> KTable<K, R> leftJoin(final KTable<K, V1> other,
final ValueJoiner<? super V, ? super V1, ? extends R> joiner) {
return leftJoin(other, joiner, NamedInternal.empty());
}
|
@Test
public void shouldThrowNullPointerOnLeftJoinWhenMaterializedIsNull() {
assertThrows(NullPointerException.class, () -> table.leftJoin(table, MockValueJoiner.TOSTRING_JOINER, (Materialized) null));
}
|
public int compare(ZonedDateTime otherDate) {
if (hasDateStamp() && otherDate != null) {
if (getDateTime().isAfter(otherDate)) return 1;
if (getDateTime().isBefore(otherDate)) return -1;
return 0;
} else {
throw new IllegalStateException("One or more DateStamp is missing");
}
}
|
@Test
void compare() {
DateTimeStamp a = new DateTimeStamp("2018-04-04T09:10:00.586-0100");
DateTimeStamp b = new DateTimeStamp("2018-04-04T09:10:00.587-0100");
assertTrue(a.compare(b.getDateTime()) < 0);
assertTrue(b.compare(a.getDateTime()) > 0);
assertEquals(0, a.compare(new DateTimeStamp("2018-04-04T09:10:00.586-0100").getDateTime()));
a = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18);
b = new DateTimeStamp("2018-04-04T09:10:00.587-0100", 0.18);
assertTrue(a.compare(b.getDateTime()) < 0);
assertTrue(b.compare(a.getDateTime()) > 0);
assertEquals(0, a.compare(new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18).getDateTime()));
a = new DateTimeStamp("2018-04-04T09:10:00.586-0000", 0.18);
b = new DateTimeStamp("2018-04-04T09:10:00.586-0100", 0.18);
assertTrue(a.compare(b.getDateTime()) < 0);
assertTrue(b.compare(a.getDateTime()) > 0);
assertEquals(0, a.compare(new DateTimeStamp("2018-04-04T09:10:00.586-0000", 0.18).getDateTime()));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.