focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public boolean retryRequest(
HttpRequest request, IOException exception, int execCount, HttpContext context) {
if (execCount > maxRetries) {
// Do not retry if over max retries
return false;
}
if (nonRetriableExceptions.contains(exception.getClass())) {
return false;
} else {
for (Class<? extends IOException> rejectException : nonRetriableExceptions) {
if (rejectException.isInstance(exception)) {
return false;
}
}
}
if (request instanceof CancellableDependency
&& ((CancellableDependency) request).isCancelled()) {
return false;
}
// Retry if the request is considered idempotent
return Method.isIdempotent(request.getMethod());
}
|
@Test
public void noRetryOnConnect() {
HttpGet request = new HttpGet("/");
assertThat(retryStrategy.retryRequest(request, new ConnectException(), 1, null)).isFalse();
}
|
@Override
public Iterable<DiscoveryNode> discoverNodes() {
try {
Collection<AzureAddress> azureAddresses = azureClient.getAddresses();
logAzureAddresses(azureAddresses);
List<DiscoveryNode> result = new ArrayList<>();
for (AzureAddress azureAddress : azureAddresses) {
for (int port = portRange.getFromPort(); port <= portRange.getToPort(); port++) {
result.add(createDiscoveryNode(azureAddress, port));
}
}
return result;
} catch (NoCredentialsException e) {
if (!isKnownExceptionAlreadyLogged) {
LOGGER.warning("No Azure credentials found! Starting standalone. To use Hazelcast Azure discovery, configure"
+ " properties (client-id, tenant-id, client-secret) or assign a managed identity to the Azure Compute"
+ " instance");
LOGGER.finest(e);
isKnownExceptionAlreadyLogged = true;
}
} catch (RestClientException e) {
if (e.getHttpErrorCode() == HTTP_FORBIDDEN) {
if (!isKnownExceptionAlreadyLogged) {
LOGGER.warning("Required role is not assigned to service principal! To use Hazelcast Azure discovery assign"
+ " a role to service principal with correct 'Read' permissions. Starting standalone.");
isKnownExceptionAlreadyLogged = true;
}
LOGGER.finest(e);
} else {
LOGGER.warning("Cannot discover nodes. Starting standalone.", e);
}
} catch (Exception e) {
LOGGER.warning("Cannot discover nodes. Starting standalone.", e);
}
return Collections.emptyList();
}
|
@Test
public void discoverNodes() {
// given
AzureAddress azureAddress1 = new AzureAddress("192.168.1.15", "38.146.24.2");
AzureAddress azureAddress2 = new AzureAddress("192.168.1.16", "38.146.28.15");
given(azureClient.getAddresses()).willReturn(asList(azureAddress1, azureAddress2));
// when
Iterable<DiscoveryNode> nodes = azureDiscoveryStrategy.discoverNodes();
// then
Iterator<DiscoveryNode> iterator = nodes.iterator();
DiscoveryNode node1 = iterator.next();
assertEquals(azureAddress1.getPrivateAddress(), node1.getPrivateAddress().getHost());
assertEquals(azureAddress1.getPublicAddress(), node1.getPublicAddress().getHost());
assertEquals(PORT1, node1.getPrivateAddress().getPort());
DiscoveryNode node2 = iterator.next();
assertEquals(azureAddress1.getPrivateAddress(), node2.getPrivateAddress().getHost());
assertEquals(azureAddress1.getPublicAddress(), node2.getPublicAddress().getHost());
assertEquals(PORT2, node2.getPrivateAddress().getPort());
DiscoveryNode node3 = iterator.next();
assertEquals(azureAddress2.getPrivateAddress(), node3.getPrivateAddress().getHost());
assertEquals(azureAddress2.getPublicAddress(), node3.getPublicAddress().getHost());
assertEquals(PORT1, node3.getPrivateAddress().getPort());
DiscoveryNode node4 = iterator.next();
assertEquals(azureAddress2.getPrivateAddress(), node4.getPrivateAddress().getHost());
assertEquals(azureAddress2.getPublicAddress(), node4.getPublicAddress().getHost());
assertEquals(PORT2, node4.getPrivateAddress().getPort());
}
|
private void watchConfigKeyValues(final String watchPathRoot,
final BiConsumer<String, String> updateHandler,
final Consumer<String> deleteHandler) {
try {
Long currentIndex = this.consulIndexes.get(watchPathRoot);
if (Objects.isNull(currentIndex)) {
currentIndex = ConsulConstants.INIT_CONFIG_VERSION_INDEX;
}
Response<List<GetValue>> response = this.consulClient.getKVValues(watchPathRoot, null,
new QueryParams(TimeUnit.MILLISECONDS.toSeconds(consulConfig.getWaitTime()), currentIndex));
if (Objects.isNull(response.getValue()) || response.getValue().isEmpty()) {
if (LOG.isTraceEnabled()) {
LOG.trace("No value for watchPathRoot {}", watchPathRoot);
}
this.executor.schedule(() -> watchConfigKeyValues(watchPathRoot, updateHandler, deleteHandler),
consulConfig.getWatchDelay(), TimeUnit.MILLISECONDS);
return;
}
Long newIndex = response.getConsulIndex();
if (Objects.isNull(newIndex)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Same index for watchPathRoot {}", watchPathRoot);
}
this.executor.schedule(() -> watchConfigKeyValues(watchPathRoot, updateHandler, deleteHandler),
consulConfig.getWatchDelay(), TimeUnit.MILLISECONDS);
return;
}
if (Objects.equals(newIndex, currentIndex)) {
this.executor.schedule(() -> watchConfigKeyValues(watchPathRoot, updateHandler, deleteHandler),
-1, TimeUnit.MILLISECONDS);
return;
}
if (!this.consulIndexes.containsValue(newIndex)
&& !currentIndex.equals(ConsulConstants.INIT_CONFIG_VERSION_INDEX)) {
if (LOG.isTraceEnabled()) {
LOG.trace("watchPathRoot {} has new index {}", watchPathRoot, newIndex);
}
final Long lastIndex = currentIndex;
final List<ConsulData> lastDatas = cacheConsulDataKeyMap.get(watchPathRoot);
response.getValue().forEach(data -> {
if (data.getModifyIndex() == lastIndex) {
//data has not changed
return;
}
if (Objects.nonNull(lastDatas)) {
final ConsulData consulData = lastDatas.stream()
.filter(lastData -> data.getKey().equals(lastData.getConsulKey())).findFirst().orElse(null);
if (Objects.nonNull(consulData) && !StringUtils.isBlank(consulData.getConsulDataMd5())
&& consulData.getConsulDataMd5().equals(DigestUtils.md5Hex(data.getValue()))) {
return;
}
}
updateHandler.accept(data.getKey(), data.getDecodedValue());
});
final List<String> currentKeys = response.getValue().stream().map(GetValue::getKey).collect(Collectors.toList());
if (!ObjectUtils.isEmpty(lastDatas)) {
// handler delete event
lastDatas.stream()
.map(ConsulData::getConsulKey)
.filter(lastKey -> !currentKeys.contains(lastKey))
.forEach(deleteHandler);
}
// save last Keys
cacheConsulDataKeyMap.put(watchPathRoot, response.getValue().stream().map(data -> {
final ConsulData consulData = new ConsulData();
consulData.setConsulKey(data.getKey());
consulData.setConsulDataMd5(DigestUtils.md5Hex(data.getValue()));
return consulData;
}).collect(Collectors.toList()));
} else if (LOG.isTraceEnabled()) {
LOG.info("Event for index already published for watchPathRoot {}", watchPathRoot);
}
this.consulIndexes.put(watchPathRoot, newIndex);
this.executor.schedule(() -> watchConfigKeyValues(watchPathRoot, updateHandler, deleteHandler),
-1, TimeUnit.MILLISECONDS);
} catch (Exception e) {
LOG.warn("Error querying consul Key/Values for watchPathRoot '{}'. Message: ", watchPathRoot, e);
this.executor.schedule(() -> watchConfigKeyValues(watchPathRoot, updateHandler, deleteHandler),
consulConfig.getWatchDelay(), TimeUnit.MILLISECONDS);
}
}
|
@Test
public void testWatchConfigKeyValues() throws NoSuchMethodException, IllegalAccessException, NoSuchFieldException {
final Method watchConfigKeyValues = ConsulSyncDataService.class.getDeclaredMethod("watchConfigKeyValues",
String.class, BiConsumer.class, Consumer.class);
watchConfigKeyValues.setAccessible(true);
final Field consul = ConsulSyncDataService.class.getDeclaredField("consulClient");
consul.setAccessible(true);
final ConsulClient consulClient = mock(ConsulClient.class);
consul.set(consulSyncDataService, consulClient);
final Field declaredField = ConsulSyncDataService.class.getDeclaredField("consulConfig");
declaredField.setAccessible(true);
final ConsulConfig consulConfig = mock(ConsulConfig.class);
declaredField.set(consulSyncDataService, consulConfig);
final Field executorField = ConsulSyncDataService.class.getDeclaredField("executor");
executorField.setAccessible(true);
executorField.set(consulSyncDataService, mock(ScheduledThreadPoolExecutor.class));
final Response<List<GetValue>> response = mock(Response.class);
when(consulClient.getKVValues(any(), any(), any())).thenReturn(response);
BiConsumer<String, String> updateHandler = (changeData, decodedValue) -> {
};
Consumer<String> deleteHandler = removeKey -> {
};
String watchPathRoot = "/shenyu";
Assertions.assertDoesNotThrow(() -> watchConfigKeyValues.invoke(consulSyncDataService, watchPathRoot, updateHandler, deleteHandler));
List<GetValue> getValues = new ArrayList<>(1);
getValues.add(mock(GetValue.class));
when(response.getValue()).thenReturn(getValues);
Assertions.assertDoesNotThrow(() -> watchConfigKeyValues.invoke(consulSyncDataService, watchPathRoot, updateHandler, deleteHandler));
when(response.getConsulIndex()).thenReturn(2L);
Assertions.assertDoesNotThrow(() -> watchConfigKeyValues.invoke(consulSyncDataService, watchPathRoot, updateHandler, deleteHandler));
when(response.getConsulIndex()).thenReturn(null);
Assertions.assertDoesNotThrow(() -> watchConfigKeyValues.invoke(consulSyncDataService, watchPathRoot, updateHandler, deleteHandler));
when(response.getConsulIndex()).thenReturn(0L);
Assertions.assertDoesNotThrow(() -> watchConfigKeyValues.invoke(consulSyncDataService, watchPathRoot, updateHandler, deleteHandler));
final Field consulIndexes = ConsulSyncDataService.class.getDeclaredField("consulIndexes");
consulIndexes.setAccessible(true);
final Map<String, Long> consulIndexesSource = (Map<String, Long>) consulIndexes.get(consulSyncDataService);
consulIndexesSource.put("/null", null);
when(response.getConsulIndex()).thenReturn(2L);
Assertions.assertDoesNotThrow(() -> watchConfigKeyValues.invoke(consulSyncDataService, watchPathRoot, updateHandler, deleteHandler));
}
|
public double calculateDensity(Graph graph, boolean isGraphDirected) {
double result;
double edgesCount = graph.getEdgeCount();
double nodesCount = graph.getNodeCount();
double multiplier = 1;
if (!isGraphDirected) {
multiplier = 2;
}
result = (multiplier * edgesCount) / (nodesCount * nodesCount - nodesCount);
return result;
}
|
@Test
public void testCyclicGraphDensity() {
GraphModel graphModel = GraphGenerator.generateCyclicUndirectedGraph(6);
Graph graph = graphModel.getGraph();
GraphDensity d = new GraphDensity();
double density = d.calculateDensity(graph, false);
assertEquals(density, 0.4);
}
|
@Nonnull
public static ToConverter getToConverter(QueryDataType type) {
if (type.getTypeFamily() == QueryDataTypeFamily.OBJECT) {
// User-defined types are subject to the same conversion rules as ordinary OBJECT.
type = QueryDataType.OBJECT;
}
return Objects.requireNonNull(CONVERTERS.get(type), "missing converter for " + type);
}
|
@Test
public void test_bigIntegerConversion() {
Object converted = getToConverter(DECIMAL_BIG_INTEGER).convert(new BigDecimal("1"));
assertThat(converted).isEqualTo(new BigInteger("1"));
}
|
public static <T> Optional<T> getFieldValue(final Object target, final String fieldName) {
return findField(fieldName, target.getClass()).map(optional -> getFieldValue(target, optional));
}
|
@Test
void assertGetFieldValue() {
assertThat(ReflectionUtils.getFieldValue(new ReflectionFixture(), "instanceValue").orElse(""), is("instance_value"));
assertFalse(ReflectionUtils.getFieldValue(new ReflectionFixture(), "not_existed_field").isPresent());
}
|
public int poll(final FragmentHandler fragmentHandler, final int fragmentLimit)
{
if (isClosed)
{
return 0;
}
final long position = subscriberPosition.get();
return TermReader.read(
activeTermBuffer(position),
(int)position & termLengthMask,
fragmentHandler,
fragmentLimit,
header,
errorHandler,
position,
subscriberPosition);
}
|
@Test
void shouldReportCorrectPositionOnReceptionWithNonZeroPositionInInitialTermId()
{
final int initialMessageIndex = 5;
final int initialTermOffset = offsetForFrame(initialMessageIndex);
final long initialPosition = computePosition(
INITIAL_TERM_ID, initialTermOffset, POSITION_BITS_TO_SHIFT, INITIAL_TERM_ID);
position.setOrdered(initialPosition);
final Image image = createImage();
insertDataFrame(INITIAL_TERM_ID, offsetForFrame(initialMessageIndex));
final int messages = image.poll(mockFragmentHandler, Integer.MAX_VALUE);
assertThat(messages, is(1));
verify(mockFragmentHandler).onFragment(
any(UnsafeBuffer.class),
eq(initialTermOffset + HEADER_LENGTH),
eq(DATA.length),
any(Header.class));
final InOrder inOrder = Mockito.inOrder(position);
inOrder.verify(position).setOrdered(initialPosition);
inOrder.verify(position).setOrdered(initialPosition + ALIGNED_FRAME_LENGTH);
}
|
public Map<Consumer, List<EntryAndMetadata>> assign(final List<EntryAndMetadata> entryAndMetadataList,
final int numConsumers) {
assert numConsumers >= 0;
consumerToPermits.clear();
final Map<Consumer, List<EntryAndMetadata>> consumerToEntries = new IdentityHashMap<>();
Consumer consumer = getConsumer(numConsumers);
if (consumer == null) {
entryAndMetadataList.forEach(EntryAndMetadata::release);
return consumerToEntries;
}
// The actual available permits might change, here we use the permits at the moment to assign entries
int availablePermits = consumerToPermits.computeIfAbsent(consumer, Consumer::getAvailablePermits);
int index = 0;
for (; index < entryAndMetadataList.size(); index++) {
final EntryAndMetadata entryAndMetadata = entryAndMetadataList.get(index);
final MessageMetadata metadata = entryAndMetadata.getMetadata();
// Select another consumer to ensure `consumer != null` and `availablePermits > 0`
if (availablePermits <= 0) {
consumerToPermits.put(consumer, availablePermits);
consumer = getConsumer(numConsumers);
if (consumer == null) {
break;
}
availablePermits = consumer.getAvailablePermits();
}
if (metadata == null || !metadata.hasUuid() || !metadata.hasChunkId() || !metadata.hasNumChunksFromMsg()) {
consumerToEntries.computeIfAbsent(consumer, __ -> new ArrayList<>()).add(entryAndMetadata);
} else {
final Consumer consumerForUuid = getConsumerForUuid(metadata, consumer, availablePermits);
if (consumerForUuid == null) {
unassignedMessageProcessor.accept(entryAndMetadata);
continue;
}
consumerToEntries.computeIfAbsent(consumerForUuid, __ -> new ArrayList<>()).add(entryAndMetadata);
}
availablePermits--;
}
for (; index < entryAndMetadataList.size(); index++) {
unassignedMessageProcessor.accept(entryAndMetadataList.get(index));
}
return consumerToEntries;
}
|
@Test
public void testSingleConsumerMultiAssign() {
// Only first 5 entries can be received because the number of permits is 5
final Consumer consumer = new Consumer("A", 5);
roundRobinConsumerSelector.addConsumers(consumer);
Map<Consumer, List<EntryAndMetadata>> result = assignor.assign(entryAndMetadataList, 1);
assertEquals(result.getOrDefault(consumer, Collections.emptyList()), entryAndMetadataList.subList(0, 5));
// Since two chunked messages (A-1 and B-1) are both not received, these uuids have been cached
assertEquals(assignor.getUuidToConsumer().keySet(), Sets.newHashSet("A-1", "B-1"));
assertEquals(toString(replayQueue), Arrays.asList("0:5@A-1-2-3", "0:6@B-1-1-2"));
result = assignor.assign(entryAndMetadataList.subList(5, 6), 1);
assertEquals(result.getOrDefault(consumer, Collections.emptyList()), entryAndMetadataList.subList(5, 6));
// A-1 is received so that uuid "A-1" has been removed from the cache
assertEquals(assignor.getUuidToConsumer().keySet(), Sets.newHashSet("B-1"));
result = assignor.assign(entryAndMetadataList.subList(6, 7), 1);
assertEquals(result.getOrDefault(consumer, Collections.emptyList()), entryAndMetadataList.subList(6, 7));
assertTrue(assignor.getUuidToConsumer().isEmpty());
}
|
@Override
public void onBeginFailure(GlobalTransaction tx, Throwable cause) {
LOGGER.warn("Failed to begin transaction. ", cause);
}
|
@Test
void onBeginFailure() {
RootContext.bind(DEFAULT_XID);
DefaultGlobalTransaction tx = (DefaultGlobalTransaction)GlobalTransactionContext.getCurrentOrCreate();
FailureHandler failureHandler = new DefaultFailureHandlerImpl();
failureHandler.onBeginFailure(tx, new MyRuntimeException("").getCause());
}
|
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException {
final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class);
final var status = OpenSAMLUtils.buildSAMLObject(Status.class);
final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class);
final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class);
return ArtifactResponseBuilder
.newInstance(artifactResponse)
.addID()
.addIssueInstant()
.addInResponseTo(artifactResolveRequest.getArtifactResolve().getID())
.addStatus(StatusBuilder
.newInstance(status)
.addStatusCode(statusCode, StatusCode.SUCCESS)
.build())
.addIssuer(issuer, entityId)
.addMessage(buildResponse(artifactResolveRequest, entityId, signType))
.addSignature(signatureService, signType)
.build();
}
|
@Test
void validateAssertionIsPresent() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException, JsonProcessingException {
when(bvdClientMock.retrieveRepresentationAffirmations(anyString())).thenReturn(getBvdResponse());
ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(getArtifactResolveRequest("success", true,true, SAML_COMBICONNECT, EncryptionType.BSN, ENTRANCE_ENTITY_ID), ENTRANCE_ENTITY_ID, TD);
Response response = (Response) artifactResponse.getMessage();
assertEquals(1, response.getAssertions().size());
}
|
static DataType getTargetDataType(final MiningFunction miningFunction, final MathContext mathContext) {
switch (miningFunction) {
case REGRESSION:
return DataType.fromValue(mathContext.value());
case CLASSIFICATION:
case CLUSTERING:
return DataType.STRING;
default:
return null;
}
}
|
@Test
void getTargetDataType() {
MiningFunction miningFunction = MiningFunction.REGRESSION;
MathContext mathContext = MathContext.DOUBLE;
DataType retrieved = KiePMMLUtil.getTargetDataType(miningFunction, mathContext);
assertThat(retrieved).isEqualTo(DataType.DOUBLE);
mathContext = MathContext.FLOAT;
retrieved = KiePMMLUtil.getTargetDataType(miningFunction, mathContext);
assertThat(retrieved).isEqualTo(DataType.FLOAT);
miningFunction = MiningFunction.CLASSIFICATION;
retrieved = KiePMMLUtil.getTargetDataType(miningFunction, mathContext);
assertThat(retrieved).isEqualTo(DataType.STRING);
miningFunction = MiningFunction.CLUSTERING;
retrieved = KiePMMLUtil.getTargetDataType(miningFunction, mathContext);
assertThat(retrieved).isEqualTo(DataType.STRING);
List<MiningFunction> notMappedMiningFunctions = Arrays.asList(MiningFunction.ASSOCIATION_RULES,
MiningFunction.MIXED,
MiningFunction.SEQUENCES,
MiningFunction.TIME_SERIES);
notMappedMiningFunctions.forEach(minFun -> assertThat(KiePMMLUtil.getTargetDataType(minFun, MathContext.DOUBLE)).isNull());
}
|
@VisibleForTesting
public Journal getJournal(String jid) {
return journalsById.get(jid);
}
|
@Test(timeout=100000)
public void testJournalDefaultDirForOneNameSpace() {
Collection<String> nameServiceIds = DFSUtilClient.getNameServiceIds(conf);
setupStaticHostResolution(2, "journalnode");
String jid = "test-journalid-ns1";
Journal nsJournal = jn.getJournal(jid);
JNStorage journalStorage = nsJournal.getStorage();
File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
File.separator + "TestJournalNode" + File.separator + "ns1" + File
.separator + jid);
assertEquals(editsDir.toString(), journalStorage.getRoot().toString());
jid = "test-journalid-ns2";
nsJournal = jn.getJournal(jid);
journalStorage = nsJournal.getStorage();
editsDir = new File(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT +
File.separator + jid);
assertEquals(editsDir.toString(), journalStorage.getRoot().toString());
}
|
@Override
public QualityGate.Condition apply(Condition input) {
String metricKey = input.getMetric().getKey();
ConditionStatus conditionStatus = statusPerConditions.get(input);
checkState(conditionStatus != null, "Missing ConditionStatus for condition on metric key %s", metricKey);
return builder
.setStatus(convert(conditionStatus.getStatus()))
.setMetricKey(metricKey)
.setOperator(convert(input.getOperator()))
.setErrorThreshold(input.getErrorThreshold())
.setValue(conditionStatus.getValue())
.build();
}
|
@Test
@UseDataProvider("allOperatorValues")
public void apply_converts_all_values_of_operator(Condition.Operator operator) {
Condition condition = new Condition(newMetric(METRIC_KEY), operator.getDbValue(), ERROR_THRESHOLD);
ConditionToCondition underTest = new ConditionToCondition(of(condition, SOME_CONDITION_STATUS));
assertThat(underTest.apply(condition).getOperator().name()).isEqualTo(operator.name());
}
|
@Override
public Optional<DatabaseAdminExecutor> create(final SQLStatementContext sqlStatementContext) {
SQLStatement sqlStatement = sqlStatementContext.getSqlStatement();
if (sqlStatement instanceof ShowFunctionStatusStatement) {
return Optional.of(new ShowFunctionStatusExecutor((ShowFunctionStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowProcedureStatusStatement) {
return Optional.of(new ShowProcedureStatusExecutor((ShowProcedureStatusStatement) sqlStatement));
}
if (sqlStatement instanceof ShowTablesStatement) {
return Optional.of(new ShowTablesExecutor((ShowTablesStatement) sqlStatement, sqlStatementContext.getDatabaseType()));
}
return Optional.empty();
}
|
@Test
void assertCreateWithSelectStatementFromPerformanceSchema() {
initProxyContext(Collections.emptyMap());
SimpleTableSegment tableSegment = new SimpleTableSegment(new TableNameSegment(10, 13, new IdentifierValue("accounts")));
tableSegment.setOwner(new OwnerSegment(7, 8, new IdentifierValue("performance_schema")));
MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class);
when(selectStatement.getFrom()).thenReturn(Optional.of(tableSegment));
when(sqlStatementContext.getSqlStatement()).thenReturn(selectStatement);
Optional<DatabaseAdminExecutor> actual = new MySQLAdminExecutorCreator().create(sqlStatementContext, "select * from accounts", "", Collections.emptyList());
assertFalse(actual.isPresent());
}
|
@Override
public void close() {
RuntimeException firstException = null;
for (Map.Entry<String, StateStore> entry : stores.entrySet()) {
final StateStore store = entry.getValue();
if (log.isDebugEnabled()) {
log.debug("Closing store {}", store.fqsn());
}
try {
store.close();
} catch (RuntimeException e) {
if (firstException == null) {
firstException = e;
}
log.error("Failed to close state store {}: ", store.fqsn(), e);
}
}
stores.clear();
if (null != firstException) {
throw firstException;
}
}
|
@Test
public void testClose() {
final String fqsn1 = "t/ns/store-1";
StateStore store1 = mock(StateStore.class);
when(store1.fqsn()).thenReturn(fqsn1);
final String fqsn2 = "t/ns/store-2";
StateStore store2 = mock(StateStore.class);
when(store2.fqsn()).thenReturn(fqsn2);
this.stateManager.registerStore(store1);
this.stateManager.registerStore(store2);
this.stateManager.close();
verify(store1, times(1)).close();
verify(store2, times(1)).close();
}
|
public static <T> TypeInformation<T> of(Class<T> typeClass) {
try {
return TypeExtractor.createTypeInfo(typeClass);
} catch (InvalidTypesException e) {
throw new FlinkRuntimeException(
"Cannot extract TypeInformation from Class alone, because generic parameters are missing. "
+ "Please use TypeInformation.of(TypeHint) instead, or another equivalent method in the API that "
+ "accepts a TypeHint instead of a Class. "
+ "For example for a Tuple2<Long, String> pass a 'new TypeHint<Tuple2<Long, String>>(){}'.");
}
}
|
@Test
void testOfGenericClassForFlink() {
assertThatThrownBy(() -> TypeInformation.of(Tuple3.class))
.isInstanceOf(FlinkRuntimeException.class)
.hasMessageContaining("TypeHint");
}
|
@Override
public void write(final MySQLPacketPayload payload, final Object value) {
if (value instanceof BigDecimal) {
payload.writeInt4(((BigDecimal) value).intValue());
} else if (value instanceof Integer) {
payload.writeInt4((Integer) value);
} else {
payload.writeInt4(((Long) value).intValue());
}
}
|
@Test
void assertWrite() {
new MySQLInt4BinaryProtocolValue().write(payload, 1);
verify(payload).writeInt4(1);
}
|
public static String validIdentifier(String value, int maxLen, String name) {
Check.notEmpty(value, name);
if (value.length() > maxLen) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] = [{1}] exceeds max len [{2}]", name, value, maxLen));
}
if (!IDENTIFIER_PATTERN.matcher(value).find()) {
throw new IllegalArgumentException(
MessageFormat.format("[{0}] = [{1}] must be \"{2}\"", name, value, IDENTIFIER_PATTERN_STR));
}
return value;
}
|
@Test(expected = IllegalArgumentException.class)
public void validIdentifierInvalid5() throws Exception {
Check.validIdentifier("[a", 2, "");
}
|
public T add(String str) {
requireNonNull(str, JVM_OPTION_NOT_NULL_ERROR_MESSAGE);
String value = str.trim();
if (isInvalidOption(value)) {
throw new IllegalArgumentException("a JVM option can't be empty and must start with '-'");
}
checkMandatoryOptionOverwrite(value);
options.add(value);
return castThis();
}
|
@Test
public void toString_prints_all_jvm_options() {
underTest.add("-foo").add("-bar");
assertThat(underTest).hasToString("[-foo, -bar]");
}
|
@Override public Message receive() {
Message message = delegate.receive();
handleReceive(message);
return message;
}
|
@Test void receive_creates_consumer_span() throws Exception {
ActiveMQTextMessage message = new ActiveMQTextMessage(clientSession);
receive(message);
MutableSpan consumer = testSpanHandler.takeRemoteSpan(CONSUMER);
assertThat(consumer.name()).isEqualTo("receive");
assertThat(consumer.name()).isEqualTo("receive");
}
|
static ParseResult parse(String expression, NameValidator validator, ClassHelper helper) {
ParseResult result = new ParseResult();
try {
Parser parser = new Parser(new Scanner("ignore", new StringReader(expression)));
Java.Atom atom = parser.parseConditionalExpression();
// after parsing the expression the input should end (otherwise it is not "simple")
if (parser.peek().type == TokenType.END_OF_INPUT) {
result.guessedVariables = new LinkedHashSet<>();
ConditionalExpressionVisitor visitor = new ConditionalExpressionVisitor(result, validator, helper);
result.ok = atom.accept(visitor);
result.invalidMessage = visitor.invalidMessage;
if (result.ok) {
result.converted = new StringBuilder(expression.length());
int start = 0;
for (Replacement replace : visitor.replacements.values()) {
result.converted.append(expression, start, replace.start).append(replace.newString);
start = replace.start + replace.oldLength;
}
result.converted.append(expression.substring(start));
}
}
} catch (Exception ex) {
}
return result;
}
|
@Test
public void isValidAndSimpleCondition() {
NameValidator validVariable = s -> Helper.toUpperCase(s).equals(s)
|| s.equals("road_class") || s.equals("toll") || s.equals("my_speed") || s.equals("backward_my_speed");
ParseResult result = parse("in_something", validVariable, k -> "");
assertTrue(result.ok);
assertEquals("[in_something]", result.guessedVariables.toString());
result = parse("edge == edge", validVariable, k -> "");
assertFalse(result.ok);
result = parse("Math.sqrt(my_speed)", validVariable, k -> "");
assertTrue(result.ok);
assertEquals("[my_speed]", result.guessedVariables.toString());
result = parse("Math.sqrt(2)", validVariable, k -> "");
assertTrue(result.ok);
assertTrue(result.guessedVariables.isEmpty());
result = parse("edge.blup()", validVariable, k -> "");
assertFalse(result.ok);
assertTrue(result.guessedVariables.isEmpty());
result = parse("edge.getDistance()", validVariable, k -> "");
assertTrue(result.ok);
assertEquals("[edge]", result.guessedVariables.toString());
assertFalse(parse("road_class == PRIMARY", s -> false, k -> "").ok);
result = parse("road_class == PRIMARY", validVariable, k -> "");
assertTrue(result.ok);
assertEquals("[road_class]", result.guessedVariables.toString());
result = parse("toll == Toll.NO", validVariable, k -> "");
assertFalse(result.ok);
assertEquals("[toll]", result.guessedVariables.toString());
assertTrue(parse("road_class.ordinal()*2 == PRIMARY.ordinal()*2", validVariable, k -> "").ok);
assertTrue(parse("Math.sqrt(road_class.ordinal()) > 1", validVariable, k -> "").ok);
result = parse("(toll == NO || road_class == PRIMARY) && toll == NO", validVariable, k -> "");
assertTrue(result.ok);
assertEquals("[toll, road_class]", result.guessedVariables.toString());
result = parse("backward_my_speed", validVariable, k -> "");
assertTrue(result.ok);
assertEquals("[backward_my_speed]", result.guessedVariables.toString());
}
|
static Iterator<Map<Integer, List<Data>>> toBatches(final Iterator<Entry<Integer, Data>> entries,
final int maxBatch, Semaphore nodeWideLoadedKeyLimiter) {
return new UnmodifiableIterator<>() {
@Override
public boolean hasNext() {
return entries.hasNext();
}
@Override
public Map<Integer, List<Data>> next() {
if (!entries.hasNext()) {
throw new NoSuchElementException();
}
return nextBatch(entries, maxBatch, nodeWideLoadedKeyLimiter);
}
};
}
|
@Test
public void test_toBatches_with_nodeWideLimit() {
int nodeWideLimit = 7;
int entryCount = 100;
Semaphore nodeWideLoadedKeyLimiter = new Semaphore(nodeWideLimit);
Iterator<Map<Integer, List<Data>>> batches
= MapKeyLoaderUtil.toBatches(newIterator(entryCount),
1000, nodeWideLoadedKeyLimiter);
List<Integer> batchSizes = new ArrayList<>();
while (batches.hasNext()) {
Map<Integer, List<Data>> batch = batches.next();
batchSizes.add(batch.size());
nodeWideLoadedKeyLimiter.release(batch.values().size());
}
// assert no batch is bigger than nodeWideLimit.
assertEquals(0,
batchSizes.stream().filter(size -> size > nodeWideLimit).count());
// assert sum of all batches equals to entryCount.
assertEquals(entryCount,
batchSizes.stream().mapToInt(size -> size).sum());
}
|
public static UpdateRequirement fromJson(String json) {
return JsonUtil.parse(json, UpdateRequirementParser::fromJson);
}
|
@Test
public void testAssertRefSnapshotIdToJsonWithNullSnapshotId() {
String requirementType = UpdateRequirementParser.ASSERT_REF_SNAPSHOT_ID;
String refName = "snapshot-name";
Long snapshotId = null;
String json =
String.format(
"{\"type\":\"%s\",\"ref\":\"%s\",\"snapshot-id\":%d}",
requirementType, refName, snapshotId);
UpdateRequirement expected = new UpdateRequirement.AssertRefSnapshotID(refName, snapshotId);
assertEquals(requirementType, expected, UpdateRequirementParser.fromJson(json));
}
|
T getFunction(final List<SqlArgument> arguments) {
// first try to get the candidates without any implicit casting
Optional<T> candidate = findMatchingCandidate(arguments, false);
if (candidate.isPresent()) {
return candidate.get();
} else if (!supportsImplicitCasts) {
throw createNoMatchingFunctionException(arguments);
}
// if none were found (candidate isn't present) try again with implicit casting
candidate = findMatchingCandidate(arguments, true);
if (candidate.isPresent()) {
return candidate.get();
}
throw createNoMatchingFunctionException(arguments);
}
|
@Test
public void shouldChooseVarArgsIfSpecificDoesntMatchMultipleArgs() {
// Given:
givenFunctions(
function(OTHER, -1, STRING, STRING, STRING, STRING),
function(EXPECTED, 0, STRING_VARARGS)
);
// When:
final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(
SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.STRING), SqlArgument.of(SqlTypes.STRING),
SqlArgument.of(SqlTypes.STRING)
));
// Then:
assertThat(fun.name(), equalTo(EXPECTED));
}
|
public double[][] test(DataFrame data) {
DataFrame x = formula.x(data);
int n = x.nrow();
int ntrees = models.length;
double[][] prediction = new double[ntrees][n];
for (int j = 0; j < n; j++) {
Tuple xj = x.get(j);
double base = 0;
for (int i = 0; i < ntrees; i++) {
base = base + models[i].tree.predict(xj);
prediction[i][j] = base / (i+1);
}
}
return prediction;
}
|
@Test
public void testAilerons() {
test("ailerons", Ailerons.formula, Ailerons.data, 0.0002);
}
|
@Override
public PageResult<OperateLogDO> getOperateLogPage(OperateLogPageReqVO pageReqVO) {
return operateLogMapper.selectPage(pageReqVO);
}
|
@Test
public void testGetOperateLogPage_dto() {
// 构造操作日志
OperateLogDO operateLogDO = RandomUtils.randomPojo(OperateLogDO.class, o -> {
o.setUserId(2048L);
o.setBizId(999L);
o.setType("订单");
});
operateLogMapper.insert(operateLogDO);
// 测试 userId 不匹配
operateLogMapper.insert(cloneIgnoreId(operateLogDO, o -> o.setUserId(1024L)));
// 测试 bizId 不匹配
operateLogMapper.insert(cloneIgnoreId(operateLogDO, o -> o.setBizId(888L)));
// 测试 type 不匹配
operateLogMapper.insert(cloneIgnoreId(operateLogDO, o -> o.setType("退款")));
// 构造调用参数
OperateLogPageReqDTO reqDTO = new OperateLogPageReqDTO();
reqDTO.setUserId(2048L);
reqDTO.setBizId(999L);
reqDTO.setType("订单");
// 调用
PageResult<OperateLogDO> pageResult = operateLogServiceImpl.getOperateLogPage(reqDTO);
// 断言,只查到了一条符合条件的
assertEquals(1, pageResult.getTotal());
assertEquals(1, pageResult.getList().size());
assertPojoEquals(operateLogDO, pageResult.getList().get(0));
}
|
@Override
public String toString() {
return "config builder of " + getConfigDefinition();
}
|
@Test
public void require_that_builder_can_be_created_from_payload() throws IOException {
Slime slime = new Slime();
Cursor root = slime.setObject();
root.setString("foo", "bar");
Cursor obj = root.setObject("foorio");
obj.setString("bar", "bam");
Cursor obj2 = obj.setObject("bario");
obj2.setString("bim", "bul");
Cursor a2 = obj.setArray("blim");
Cursor arrayobj = a2.addObject();
arrayobj.setString("fim", "fam");
Cursor arrayobj2 = a2.addObject();
arrayobj2.setString("blim", "blam");
Cursor a1 = root.setArray("arrio");
a1.addString("himbio");
ConfigPayloadBuilder builder = new ConfigPayloadBuilder(new ConfigPayload(slime));
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ConfigPayload.fromBuilder(builder).serialize(baos, new JsonFormat(true));
assertEquals("{\"foo\":\"bar\",\"foorio\":{\"bar\":\"bam\",\"bario\":{\"bim\":\"bul\"},\"blim\":[{\"fim\":\"fam\"},{\"blim\":\"blam\"}]},\"arrio\":[\"himbio\"]}",
baos.toString());
}
|
@Override
public String render(String text) {
if (StringUtils.isBlank(text)) {
return "";
}
if (regex.isEmpty() || link.isEmpty()) {
Comment comment = new Comment();
comment.escapeAndAdd(text);
return comment.render();
}
try {
Matcher matcher = Pattern.compile(regex).matcher(text);
int start = 0;
Comment comment = new Comment();
while (hasMatch(matcher)) {
comment.escapeAndAdd(text.substring(start, matcher.start()));
comment.add(dynamicLink(matcher));
start = matcher.end();
}
comment.escapeAndAdd(text.substring(start));
return comment.render();
} catch (PatternSyntaxException e) {
LOGGER.warn("Illegal regular expression: {} - {}", regex, e.getMessage());
}
return text;
}
|
@Test
public void shouldRenderStringWithSpecifiedRegexAndLink1() throws Exception {
String link = "http://mingle05/projects/cce/cards/${ID}";
String regex = "(?:Task |#|Bug )(\\d+)";
trackingTool = new DefaultCommentRenderer(link, regex);
assertThat(trackingTool.render("Task 111: checkin message"),
is("<a href=\"" + "http://mingle05/projects/cce/cards/111\" "
+ "target=\"story_tracker\">Task 111</a>: checkin message"));
assertThat(trackingTool.render("Bug 111: checkin message"),
is("<a href=\"" + "http://mingle05/projects/cce/cards/111\" "
+ "target=\"story_tracker\">Bug 111</a>: checkin message"));
assertThat(trackingTool.render("#111: checkin message"),
is("<a href=\"" + "http://mingle05/projects/cce/cards/111\" "
+ "target=\"story_tracker\">#111</a>: checkin message"));
}
|
public static void validateImageInDaemonConf(Map<String, Object> conf) {
List<String> allowedImages = getAllowedImages(conf, true);
if (allowedImages.isEmpty()) {
LOG.debug("{} is not configured; skip image validation", DaemonConfig.STORM_OCI_ALLOWED_IMAGES);
} else {
String defaultImage = (String) conf.get(DaemonConfig.STORM_OCI_IMAGE);
validateImage(allowedImages, defaultImage, DaemonConfig.STORM_OCI_IMAGE);
}
}
|
@Test
public void validateImageInDaemonConfSkipped() {
Map<String, Object> conf = new HashMap<>();
conf.put(DaemonConfig.STORM_OCI_IMAGE, "storm/rhel7:dev_test");
//this is essentially a no-op
OciUtils.validateImageInDaemonConf(conf);
}
|
@Override
public void isEqualTo(@Nullable Object expected) {
super.isEqualTo(expected);
}
|
@Test
@GwtIncompatible("gwt Arrays.equals(double[], double[])")
public void isEqualTo_WithoutToleranceParameter_NaN_Success() {
assertThat(array(2.2d, 5.4d, POSITIVE_INFINITY, NEGATIVE_INFINITY, NaN, 0.0, -0.0))
.isEqualTo(array(2.2d, 5.4d, POSITIVE_INFINITY, NEGATIVE_INFINITY, NaN, 0.0, -0.0));
}
|
public String decode(byte[] val) {
return codecs[0].decode(val, 0, val.length);
}
|
@Test
public void testDecodeGreekPersonName() {
assertEquals(GREEK_PERSON_NAME,
iso8859_7().decode(GREEK_PERSON_NAME_BYTE));
}
|
public void updateAutoCommitTimer(final long currentTimeMs) {
this.autoCommitState.ifPresent(t -> t.updateTimer(currentTimeMs));
}
|
@Test
public void testPollEnsureAutocommitSent() {
TopicPartition tp = new TopicPartition("t1", 1);
subscriptionState.assignFromUser(Collections.singleton(tp));
subscriptionState.seek(tp, 100);
CommitRequestManager commitRequestManager = create(true, 100);
assertPoll(0, commitRequestManager);
commitRequestManager.updateAutoCommitTimer(time.milliseconds());
time.sleep(100);
commitRequestManager.updateAutoCommitTimer(time.milliseconds());
List<NetworkClientDelegate.FutureCompletionHandler> pollResults = assertPoll(1, commitRequestManager);
pollResults.forEach(v -> v.onComplete(mockOffsetCommitResponse(
"t1",
1,
(short) 1,
Errors.NONE)));
assertEquals(0.03, (double) getMetric("commit-rate").metricValue(), 0.01);
assertEquals(1.0, getMetric("commit-total").metricValue());
}
|
@Override
public String buildQuery(
String metricsAccountName,
CanaryConfig canaryConfig,
CanaryMetricConfig canaryMetricConfig,
CanaryScope canaryScope) {
WavefrontCanaryMetricSetQueryConfig queryConfig =
(WavefrontCanaryMetricSetQueryConfig) canaryMetricConfig.getQuery();
String query = queryConfig.getMetricName();
if (canaryScope.getScope() != null && !canaryScope.getScope().equals("")) {
query = query + ", " + canaryScope.getScope();
}
query = "ts(" + query + ")";
if (queryConfig.getAggregate() != null && !queryConfig.getAggregate().equals("")) {
query = queryConfig.getAggregate() + "(" + query + ")";
}
return query;
}
|
@Test
public void testBuildQuery_NoScopeProvided() {
CanaryScope canaryScope = createScope("");
CanaryMetricConfig canaryMetricSetQueryConfig = queryConfig(AGGREGATE);
String query =
wavefrontMetricsService.buildQuery("", null, canaryMetricSetQueryConfig, canaryScope);
assertThat(query).isEqualTo(AGGREGATE + "(ts(" + METRIC_NAME + "))");
}
|
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
getRawMapping().setConf(conf);
}
|
@Test
public void testFilenameMeansMultiSwitch() throws Throwable {
Configuration conf = new Configuration();
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename");
ScriptBasedMapping mapping = createMapping(conf);
assertFalse("Expected to be multi switch", mapping.isSingleSwitch());
mapping.setConf(new Configuration());
assertTrue("Expected to be single switch", mapping.isSingleSwitch());
}
|
@Override
@MethodNotAvailable
public CompletionStage<Void> setAsync(K key, V value) {
throw new MethodNotAvailableException();
}
|
@Test(expected = MethodNotAvailableException.class)
public void testSetAsyncWithTtl() {
adapter.setAsync(42, "value", 1, TimeUnit.MILLISECONDS);
}
|
@Override
public void onProjectBranchesChanged(Set<Project> projects, Set<String> impactedBranches) {
checkNotNull(projects, "projects can't be null");
if (projects.isEmpty()) {
return;
}
Arrays.stream(listeners)
.forEach(safelyCallListener(listener -> listener.onProjectBranchesChanged(projects, impactedBranches)));
}
|
@Test
@UseDataProvider("oneOrManyProjects")
public void onProjectBranchesChanged_calls_all_listeners_even_if_one_throws_an_Error(Set<Project> projects) {
InOrder inOrder = Mockito.inOrder(listener1, listener2, listener3);
doThrow(new Error("Faking listener2 throwing an Error"))
.when(listener2)
.onProjectBranchesChanged(any(), anySet());
underTestWithListeners.onProjectBranchesChanged(projects, emptySet());
inOrder.verify(listener1).onProjectBranchesChanged(same(projects), eq(emptySet()));
inOrder.verify(listener2).onProjectBranchesChanged(same(projects), eq(emptySet()));
inOrder.verify(listener3).onProjectBranchesChanged(same(projects), eq(emptySet()));
inOrder.verifyNoMoreInteractions();
}
|
@Override
public void handleData(String dataId, UserData userData) {
if (dataId == null) {
return;
}
this.lastUserData = userData;
printUserData(dataId, userData);
if (flag != null) {
flag[0].compareAndSet(false, true);
}
if (canNotify()) {
flag = null; // 已经没作用了
composeAndNotify(userData, lastConfigData);
}
}
|
@Test
public void handleData() throws Exception {
Subscriber listSub = new MockSubscribe(5);
Configurator attrSub = new MockConfigurator(2);
final AtomicInteger ps = new AtomicInteger(0);
ProviderInfoListener listener = new ProviderInfoListener() {
@Override
public void addProvider(ProviderGroup providerGroup) {
ps.addAndGet(providerGroup.size());
}
@Override
public void removeProvider(ProviderGroup providerGroup) {
ps.addAndGet(-providerGroup.size());
}
@Override
public void updateProviders(ProviderGroup providerGroup) {
ps.set(providerGroup.size());
}
@Override
public void updateAllProviders(List<ProviderGroup> providerGroups) {
ps.set(0);
for (ProviderGroup providerGroup : providerGroups) {
ps.addAndGet(providerGroup.size());
}
}
};
SofaRegistrySubscribeCallback callback = new SofaRegistrySubscribeCallback();
callback.addProviderInfoListener("xxxxx", new ConsumerConfig(), listener);
Assert.assertTrue((!callback.flag[0].get()) && (!callback.flag[1].get()));
callback.handleData("xxxxx", buildConfigPs(2));
try {
Thread.sleep(200);
} finally {
}
Assert.assertTrue(callback.flag[1].get());
Assert.assertTrue(ps.get() == 0);
callback.handleData("xxxxx", buildPs(5));
try {
Thread.sleep(200);
} finally {
}
Assert.assertTrue(callback.flag == null);
//default+localZone
Assert.assertEquals(ps.get(), 5 + 5);
callback = new SofaRegistrySubscribeCallback();
ps.set(0);
callback.addProviderInfoListener("yyyyy", new ConsumerConfig(), listener);
callback.handleData("yyyyy", buildPs(5));
try {
Thread.sleep(200);
} finally {
}
callback.handleData("yyyyy", buildConfigPs(2));
try {
Thread.sleep(200);
} finally {
}
Assert.assertTrue(callback.flag == null);
Assert.assertEquals(ps.get(), 5 + 5);
}
|
public Location setY(double y) {
return new Location(extent, position.withY(y), yaw, pitch);
}
|
@Test
public void testSetY() throws Exception {
World world = mock(World.class);
Location location1 = new Location(world, Vector3.ZERO);
Location location2 = location1.setY(TEST_VALUE);
assertEquals(0, location1.getY(), EPSILON);
assertEquals(0, location2.getX(), EPSILON);
assertEquals(TEST_VALUE, location2.getY(), EPSILON);
assertEquals(0, location2.getZ(), EPSILON);
}
|
@Override
public CompletableFuture<ResponseFuture> invokeImpl(final Channel channel, final RemotingCommand request,
final long timeoutMillis) {
Stopwatch stopwatch = Stopwatch.createStarted();
String channelRemoteAddr = RemotingHelper.parseChannelRemoteAddr(channel);
doBeforeRpcHooks(channelRemoteAddr, request);
return super.invokeImpl(channel, request, timeoutMillis).thenCompose(responseFuture -> {
RemotingCommand response = responseFuture.getResponseCommand();
if (response.getCode() == ResponseCode.GO_AWAY) {
if (nettyClientConfig.isEnableReconnectForGoAway()) {
ChannelWrapper channelWrapper = channelWrapperTables.computeIfPresent(channel, (channel0, channelWrapper0) -> {
try {
if (channelWrapper0.reconnect()) {
LOGGER.info("Receive go away from channel {}, recreate the channel", channel0);
channelWrapperTables.put(channelWrapper0.getChannel(), channelWrapper0);
}
} catch (Throwable t) {
LOGGER.error("Channel {} reconnect error", channelWrapper0, t);
}
return channelWrapper0;
});
if (channelWrapper != null) {
if (nettyClientConfig.isEnableTransparentRetry()) {
RemotingCommand retryRequest = RemotingCommand.createRequestCommand(request.getCode(), request.readCustomHeader());
retryRequest.setBody(request.getBody());
if (channelWrapper.isOK()) {
long duration = stopwatch.elapsed(TimeUnit.MILLISECONDS);
stopwatch.stop();
Channel retryChannel = channelWrapper.getChannel();
if (retryChannel != null && channel != retryChannel) {
return super.invokeImpl(retryChannel, retryRequest, timeoutMillis - duration);
}
} else {
CompletableFuture<ResponseFuture> future = new CompletableFuture<>();
ChannelFuture channelFuture = channelWrapper.getChannelFuture();
channelFuture.addListener(f -> {
long duration = stopwatch.elapsed(TimeUnit.MILLISECONDS);
stopwatch.stop();
if (f.isSuccess()) {
Channel retryChannel0 = channelFuture.channel();
if (retryChannel0 != null && channel != retryChannel0) {
super.invokeImpl(retryChannel0, retryRequest, timeoutMillis - duration).whenComplete((v, t) -> {
if (t != null) {
future.completeExceptionally(t);
} else {
future.complete(v);
}
});
}
} else {
future.completeExceptionally(new RemotingConnectException(channelWrapper.channelAddress));
}
});
return future;
}
}
}
}
}
return CompletableFuture.completedFuture(responseFuture);
}).whenComplete((v, t) -> {
if (t == null) {
doAfterRpcHooks(channelRemoteAddr, request, v.getResponseCommand());
}
});
}
|
@Test
public void testInvokeImpl() throws ExecutionException, InterruptedException {
remotingClient.registerRPCHook(rpcHookMock);
Channel channel = new LocalChannel();
RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.PULL_MESSAGE, null);
RemotingCommand response = RemotingCommand.createResponseCommand(null);
response.setCode(ResponseCode.SUCCESS);
ResponseFuture responseFuture = new ResponseFuture(channel, request.getOpaque(), request, 1000, new InvokeCallback() {
@Override
public void operationComplete(ResponseFuture responseFuture) {
}
}, new SemaphoreReleaseOnlyOnce(new Semaphore(1)));
responseFuture.setResponseCommand(response);
CompletableFuture<ResponseFuture> future = new CompletableFuture<>();
future.complete(responseFuture);
doReturn(future).when(remotingClient).invoke0(any(Channel.class), any(RemotingCommand.class), anyLong());
CompletableFuture<ResponseFuture> future0 = remotingClient.invokeImpl(channel, request, 1000);
assertThat(future0.get()).isEqualTo(responseFuture);
verify(rpcHookMock).doBeforeRequest(anyString(), eq(request));
verify(rpcHookMock).doAfterResponse(anyString(), eq(request), eq(response));
}
|
@Override
public List<AwsEndpoint> getClusterEndpoints() {
if (clientConfig.shouldUseDnsForFetchingServiceUrls()) {
if (logger.isInfoEnabled()) {
logger.info("Resolving eureka endpoints via DNS: {}", getDNSName());
}
return getClusterEndpointsFromDns();
} else {
logger.info("Resolving eureka endpoints via configuration");
return getClusterEndpointsFromConfig();
}
}
|
@Test
public void testReadFromConfig() {
List<AwsEndpoint> endpoints = resolver.getClusterEndpoints();
assertThat(endpoints.size(), equalTo(6));
for (AwsEndpoint endpoint : endpoints) {
if (endpoint.getZone().equals("us-east-1e")) {
assertThat("secure was wrong", endpoint.isSecure(), is(true));
assertThat("serviceUrl contains -1", endpoint.getServiceUrl().contains("-1"), is(false));
assertThat("BASIC auth credentials expected", endpoint.getServiceUrl().contains("myuser:mypassword"), is(true));
}
}
}
|
boolean matchesNonValueField(final Optional<SourceName> source, final ColumnName column) {
if (!source.isPresent()) {
return sourceSchemas.values().stream()
.anyMatch(schema ->
SystemColumns.isPseudoColumn(column) || schema.isKeyColumn(column));
}
final SourceName sourceName = source.get();
final LogicalSchema sourceSchema = sourceSchemas.get(sourceName);
if (sourceSchema == null) {
throw new IllegalArgumentException("Unknown source: " + sourceName);
}
return sourceSchema.isKeyColumn(column) || SystemColumns.isPseudoColumn(column);
}
|
@Test
public void shouldNotMatchNonKeyFieldOnWrongSource() {
assertThat(sourceSchemas.matchesNonValueField(Optional.of(ALIAS_2), K0), is(false));
}
|
@Internal
public static String getFqName(
String system, @Nullable String routine, Iterable<String> segments) {
StringBuilder builder = new StringBuilder(system);
if (!Strings.isNullOrEmpty(routine)) {
builder.append(":").append(routine);
}
int idx = 0;
for (String segment : segments) {
if (idx == 0) {
builder.append(":");
} else {
builder.append(".");
}
builder.append(wrapSegment(segment));
++idx;
}
return builder.toString();
}
|
@Test
public void testGetFqName() {
Map<String, String> testCases =
ImmutableMap.<String, String>builder()
.put("apache-beam", "apache-beam")
.put("`apache-beam`", "`apache-beam`")
.put("apache.beam", "`apache.beam`")
.put("apache:beam", "`apache:beam`")
.put("apache beam", "`apache beam`")
.put("`apache beam`", "`apache beam`")
.put("apache\tbeam", "`apache\tbeam`")
.put("apache\nbeam", "`apache\nbeam`")
.build();
testCases.forEach(
(key, value) ->
assertEquals("apache:" + value, Lineage.getFqName("apache", ImmutableList.of(key))));
testCases.forEach(
(key, value) ->
assertEquals(
"apache:beam:" + value,
Lineage.getFqName("apache", "beam", ImmutableList.of(key))));
testCases.forEach(
(key, value) ->
assertEquals(
"apache:beam:" + value + "." + value,
Lineage.getFqName("apache", "beam", ImmutableList.of(key, key))));
}
|
@Override
public ComponentCreationData createProjectAndBindToDevOpsPlatform(DbSession dbSession, CreationMethod creationMethod, Boolean monorepo, @Nullable String projectKey,
@Nullable String projectName) {
String pat = findPersonalAccessTokenOrThrow(dbSession, almSettingDto);
String workspace = ofNullable(almSettingDto.getAppId())
.orElseThrow(() -> new IllegalArgumentException(String.format("workspace for alm setting %s is missing", almSettingDto.getKey())));
Repository repo = bitbucketCloudRestClient.getRepo(pat, workspace, devOpsProjectDescriptor.repositoryIdentifier());
ComponentCreationData componentCreationData = projectCreator.createProject(
dbSession,
getProjectKey(workspace, projectKey, repo),
getProjectName(projectName, repo),
repo.getMainBranch().getName(),
creationMethod);
ProjectDto projectDto = Optional.ofNullable(componentCreationData.projectDto()).orElseThrow();
createProjectAlmSettingDto(dbSession, repo.getSlug(), projectDto, almSettingDto, monorepo);
return componentCreationData;
}
|
@Test
void createProjectAndBindToDevOpsPlatform_whenRepositoryNotFound_shouldThrow() {
mockPatForUser();
when(almSettingDto.getAppId()).thenReturn("workspace");
when(bitbucketCloudRestClient.getRepo(USER_PAT, "workspace", REPOSITORY_SLUG)).thenThrow(new IllegalStateException("Problem fetching repository from Bitbucket Cloud"));
assertThatExceptionOfType(IllegalStateException.class)
.isThrownBy(() -> underTest.createProjectAndBindToDevOpsPlatform(mock(DbSession.class), CreationMethod.ALM_IMPORT_API, false, null, null))
.withMessage("Problem fetching repository from Bitbucket Cloud");
}
|
public static StatementExecutorResponse execute(
final ConfiguredStatement<AssertSchema> statement,
final SessionProperties sessionProperties,
final KsqlExecutionContext executionContext,
final ServiceContext serviceContext
) {
return AssertExecutor.execute(
statement.getMaskedStatementText(),
statement.getStatement(),
executionContext.getKsqlConfig().getInt(KSQL_ASSERT_SCHEMA_DEFAULT_TIMEOUT_MS),
serviceContext,
(stmt, sc) -> assertSchema(
sc.getSchemaRegistryClient(),
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists()),
(str, stmt) -> new AssertSchemaEntity(
str,
((AssertSchema) stmt).getSubject(),
((AssertSchema) stmt).getId(),
stmt.checkExists())
);
}
|
@Test
public void shouldAssertNotExistSchemaBySubjectAndId() {
// Given
final AssertSchema assertSchema = new AssertSchema(Optional.empty(), Optional.of("def"), Optional.of(100), Optional.empty(), false);
final ConfiguredStatement<AssertSchema> statement = ConfiguredStatement
.of(KsqlParser.PreparedStatement.of("", assertSchema),
SessionConfig.of(ksqlConfig, ImmutableMap.of()));
// When:
final Optional<KsqlEntity> entity = AssertSchemaExecutor
.execute(statement, mock(SessionProperties.class), engine, serviceContext).getEntity();
// Then:
assertThat("expected response!", entity.isPresent());
assertThat(((AssertSchemaEntity) entity.get()).getSubject(), is(Optional.of("def")));
assertThat(((AssertSchemaEntity) entity.get()).getId(), is(Optional.of(100)));
assertThat(((AssertSchemaEntity) entity.get()).getExists(), is(false));
}
|
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception {
return newGetter(object, parent, modifier, method.getReturnType(), method::invoke,
(t, et) -> new MethodGetter(parent, method, modifier, t, et));
}
|
@Test
public void newMethodGetter_whenExtractingFromNull_Array_AndReducerSuffixInNotEmpty_thenReturnNullGetter()
throws Exception {
OuterObject object = OuterObject.nullInner("name");
Getter getter = GetterFactory.newMethodGetter(object, null, innersArrayMethod, "[any]");
Class<?> returnType = getter.getReturnType();
assertEquals(InnerObject.class, returnType);
}
|
@Override
public boolean enableSendingOldValues(final boolean forceMaterialization) {
if (queryableName != null) {
sendOldValues = true;
return true;
}
if (parent.enableSendingOldValues(forceMaterialization)) {
sendOldValues = true;
}
return sendOldValues;
}
|
@Test
public void shouldNotSetSendOldValuesOnParentIfMaterialized() {
new KTableTransformValues<>(parent, new NoOpValueTransformerWithKeySupplier<>(), QUERYABLE_NAME).enableSendingOldValues(true);
verify(parent, never()).enableSendingOldValues(anyBoolean());
}
|
public NonClosedTracking<RAW, BASE> trackNonClosed(Input<RAW> rawInput, Input<BASE> baseInput) {
NonClosedTracking<RAW, BASE> tracking = NonClosedTracking.of(rawInput, baseInput);
// 1. match by rule, line, line hash and message
match(tracking, LineAndLineHashAndMessage::new);
// 2. match issues with same rule, same line and same line hash, but not necessarily with same message
match(tracking, LineAndLineHashKey::new);
// 3. detect code moves by comparing blocks of codes
detectCodeMoves(rawInput, baseInput, tracking);
// 4. match issues with same rule, same message and same line hash
match(tracking, LineHashAndMessageKey::new);
// 5. match issues with same rule, same line and same message
match(tracking, LineAndMessageKey::new);
// 6. match issues with same rule and same line hash but different line and different message.
// See SONAR-2812
match(tracking, LineHashKey::new);
return tracking;
}
|
@Test
public void match_issues_with_same_rule_key_on_project_level() {
FakeInput baseInput = new FakeInput();
Issue base1 = baseInput.createIssue(RULE_MISSING_PACKAGE_INFO, "[com.test:abc] Missing package-info.java in package.");
Issue base2 = baseInput.createIssue(RULE_MISSING_PACKAGE_INFO, "[com.test:abc/def] Missing package-info.java in package.");
FakeInput rawInput = new FakeInput();
Issue raw1 = rawInput.createIssue(RULE_MISSING_PACKAGE_INFO, "[com.test:abc/def] Missing package-info.java in package.");
Issue raw2 = rawInput.createIssue(RULE_MISSING_PACKAGE_INFO, "[com.test:abc] Missing package-info.java in package.");
Tracking<Issue, Issue> tracking = tracker.trackNonClosed(rawInput, baseInput);
assertThat(tracking.getUnmatchedBases()).isEmpty();
assertThat(tracking.baseFor(raw1)).isEqualTo(base2);
assertThat(tracking.baseFor(raw2)).isEqualTo(base1);
}
|
public boolean isInRange(String ipAddress) throws UnknownHostException {
InetAddress address = InetAddress.getByName(ipAddress);
BigInteger start = new BigInteger(1, this.startAddress.getAddress());
BigInteger end = new BigInteger(1, this.endAddress.getAddress());
BigInteger target = new BigInteger(1, address.getAddress());
int st = start.compareTo(target);
int te = target.compareTo(end);
return (st == -1 || st == 0) && (te == -1 || te == 0);
}
|
@Test
void testIpv6() throws UnknownHostException {
CIDRUtils cidrUtils = new CIDRUtils("234e:0:4567::3d/64");
Assertions.assertTrue(cidrUtils.isInRange("234e:0:4567::3e"));
Assertions.assertTrue(cidrUtils.isInRange("234e:0:4567::ffff:3e"));
Assertions.assertFalse(cidrUtils.isInRange("234e:1:4567::3d"));
Assertions.assertFalse(cidrUtils.isInRange("234e:0:4567:1::3d"));
cidrUtils = new CIDRUtils("3FFE:FFFF:0:CC00::/54");
Assertions.assertTrue(cidrUtils.isInRange("3FFE:FFFF:0:CC00::dd"));
Assertions.assertTrue(cidrUtils.isInRange("3FFE:FFFF:0:CC00:0000:eeee:0909:dd"));
Assertions.assertTrue(cidrUtils.isInRange("3FFE:FFFF:0:CC0F:0000:eeee:0909:dd"));
Assertions.assertFalse(cidrUtils.isInRange("3EFE:FFFE:0:C107::dd"));
Assertions.assertFalse(cidrUtils.isInRange("1FFE:FFFE:0:CC00::dd"));
}
|
@Override
public List<ConnectorMetadataUpdateHandle> getPendingMetadataUpdateRequests()
{
ImmutableList.Builder<ConnectorMetadataUpdateHandle> result = ImmutableList.builder();
for (HiveMetadataUpdateHandle request : hiveMetadataRequestQueue) {
result.add(request);
}
return result.build();
}
|
@Test
public void testEmptyMetadataUpdateRequestQueue()
{
HiveMetadataUpdater hiveMetadataUpdater = new HiveMetadataUpdater(EXECUTOR);
assertEquals(hiveMetadataUpdater.getPendingMetadataUpdateRequests().size(), 0);
}
|
@Override
public String evaluate(EvaluationContext evaluationContext, String... args) {
if (args == null || args.length == 0) {
return "";
}
return args[getRandom().nextInt(args.length)];
}
|
@Test
void testSimpleEvaluation() {
List<String> values = List.of("one", "two", "three");
// Compute evaluation.
RandomValueELFunction function = new RandomValueELFunction();
String result = function.evaluate(null);
assertEquals("", result);
result = function.evaluate(null, "one", "two", "three");
assertTrue(values.contains(result));
}
|
public static DataMap bytesToDataMap(Map<String, String> headers, ByteString bytes) throws MimeTypeParseException, IOException
{
return getContentType(headers).getCodec().readMap(bytes);
}
|
@Test
public void testPSONByteStringToDataMap() throws MimeTypeParseException, IOException
{
DataMap expectedDataMap = createTestDataMap();
ByteString byteString = ByteString.copy(PSON_DATA_CODEC.mapToBytes(expectedDataMap));
DataMap dataMap = bytesToDataMap("application/x-pson", byteString);
Assert.assertEquals(dataMap, expectedDataMap);
Map<String, String> headers = Collections.singletonMap(RestConstants.HEADER_CONTENT_TYPE, "application/x-pson");
dataMap = DataMapConverter.bytesToDataMap(headers, byteString);
Assert.assertEquals(dataMap, expectedDataMap);
}
|
public Optional<String> retrieve(final String shortCode) throws IOException {
final URI uri = shortenerHost.resolve(shortCode);
final HttpRequest request = HttpRequest.newBuilder().uri(uri).GET().build();
try {
final HttpResponse<String> response = this.client.send(request, HttpResponse.BodyHandlers.ofString());
Metrics.counter(EXPAND_COUNTER_NAME, "responseCode", Integer.toString(response.statusCode())).increment();
return switch (response.statusCode()) {
case HttpStatus.SC_OK -> Optional.of(response.body());
case HttpStatus.SC_NOT_FOUND -> Optional.empty();
default -> throw new IOException("Failed to look up shortcode");
};
} catch (InterruptedException e) {
throw new IOException(e);
}
}
|
@Test
public void testUriResolution() throws IOException, InterruptedException {
final HttpClient httpClient = mock(HttpClient.class);
final ShortCodeExpander expander = new ShortCodeExpander(httpClient, "https://www.example.org/shortener/");
when(httpClient
.send(argThat(req -> req.uri().toString().equals("https://www.example.org/shortener/shorturl")), any()))
.thenReturn(new FakeResponse(200, "longurl"));
assertThat(expander.retrieve("shorturl").get()).isEqualTo("longurl");
}
|
@VisibleForTesting
void validateClientIdExists(Long id, String clientId) {
OAuth2ClientDO client = oauth2ClientMapper.selectByClientId(clientId);
if (client == null) {
return;
}
// 如果 id 为空,说明不用比较是否为相同 id 的客户端
if (id == null) {
throw exception(OAUTH2_CLIENT_EXISTS);
}
if (!client.getId().equals(id)) {
throw exception(OAUTH2_CLIENT_EXISTS);
}
}
|
@Test
public void testValidateClientIdExists_withId() {
// mock 数据
OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId("tudou");
oauth2ClientMapper.insert(client);
// 准备参数
Long id = randomLongId();
String clientId = "tudou";
// 调用,不会报错
assertServiceException(() -> oauth2ClientService.validateClientIdExists(id, clientId), OAUTH2_CLIENT_EXISTS);
}
|
public DataTable subTable(int fromRow, int fromColumn) {
return subTable(fromRow, fromColumn, height(), width());
}
|
@Test
void subTable_throws_for_large_to_row() {
DataTable table = createSimpleTable();
assertThrows(IndexOutOfBoundsException.class, () -> table.subTable(0, 0, 4, 1));
}
|
public BundleProcessor getProcessor(
BeamFnApi.ProcessBundleDescriptor descriptor,
List<RemoteInputDestination> remoteInputDesinations) {
checkState(
!descriptor.hasStateApiServiceDescriptor(),
"The %s cannot support a %s containing a state %s.",
BundleProcessor.class.getSimpleName(),
BeamFnApi.ProcessBundleDescriptor.class.getSimpleName(),
Endpoints.ApiServiceDescriptor.class.getSimpleName());
return getProcessor(descriptor, remoteInputDesinations, NoOpStateDelegator.INSTANCE);
}
|
@Test
public void testRegister() throws Exception {
ProcessBundleDescriptor descriptor1 =
ProcessBundleDescriptor.newBuilder().setId("descriptor1").build();
List<RemoteInputDestination> remoteInputs =
Collections.singletonList(
RemoteInputDestination.of(
(FullWindowedValueCoder)
FullWindowedValueCoder.of(VarIntCoder.of(), GlobalWindow.Coder.INSTANCE),
SDK_GRPC_READ_TRANSFORM));
sdkHarnessClient.getProcessor(descriptor1, remoteInputs);
verify(fnApiControlClient).registerProcessBundleDescriptor(descriptor1);
}
|
@Override
public int hashCode() {
if (hashCodeCache == -1) {
hashCodeCache = Objects.hash(urlAddress, urlParam);
}
return hashCodeCache;
}
|
@Test
void testHashcode() {
URL url1 = URL.valueOf("consumer://30.225.20.150/org.apache.dubbo.rpc.service.GenericService?application="
+ "dubbo-demo-api-consumer&category=consumers&check=false&dubbo=2.0.2&generic=true&interface="
+ "org.apache.dubbo.demo.DemoService&pid=7375&side=consumer&sticky=false×tamp=1599556506417");
URL url2 = URL.valueOf("consumer://30.225.20.150/org.apache.dubbo.rpc.service.GenericService?application="
+ "dubbo-demo-api-consumer&category=consumers&check=false&dubbo=2.0.2&generic=true&interface="
+ "org.apache.dubbo.demo.DemoService&pid=7375&side=consumer&sticky=false×tamp=2299556506417");
assertEquals(url1.hashCode(), url2.hashCode());
URL url3 = URL.valueOf("consumer://30.225.20.150/org.apache.dubbo.rpc.service.GenericService?application="
+ "dubbo-demo-api-consumer&category=consumers&check=false&dubbo=2.0.2&interface="
+ "org.apache.dubbo.demo.DemoService&pid=7375&side=consumer&sticky=false×tamp=2299556506417");
assertNotEquals(url2.hashCode(), url3.hashCode());
URL url4 = URL.valueOf("consumer://30.225.20.150/org.apache.dubbo.rpc.service.GenericService?application="
+ "dubbo-demo-api-consumer&category=consumers&check=true&dubbo=2.0.2&interface="
+ "org.apache.dubbo.demo.DemoService&pid=7375&side=consumer&sticky=false×tamp=2299556506417");
assertNotEquals(url3.hashCode(), url4.hashCode());
}
|
@Override
public O next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
O result = nextObject;
nextObject = null;
return result;
}
|
@Test
public void automatic_close_if_traversal_error() {
FailureCloseableIterator it = new FailureCloseableIterator();
try {
it.next();
fail();
} catch (IllegalStateException expected) {
assertThat(expected).hasMessage("expected failure");
assertThat(it.isClosed).isTrue();
}
}
|
@Override
public List<ConfigInfoWrapper> queryConfigInfoByNamespace(String tenant) {
if (Objects.isNull(tenant)) {
throw new IllegalArgumentException("tenantId can not be null");
}
String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant;
try {
ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(),
TableConstant.CONFIG_INFO);
return this.jt.query(
configInfoMapper.select(Arrays.asList("data_id", "group_id", "tenant_id", "app_name", "type"),
Collections.singletonList("tenant_id")), new Object[] {tenantTmp},
CONFIG_INFO_WRAPPER_ROW_MAPPER);
} catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null.
return Collections.EMPTY_LIST;
} catch (CannotGetJdbcConnectionException e) {
LogUtil.FATAL_LOG.error("[db-error] " + e, e);
throw e;
}
}
|
@Test
void testQueryConfigInfoByNamespace() {
//mock select config state
List<ConfigInfoWrapper> mockConfigs = new ArrayList<>();
mockConfigs.add(createMockConfigInfoWrapper(0));
mockConfigs.add(createMockConfigInfoWrapper(1));
mockConfigs.add(createMockConfigInfoWrapper(2));
String tenant = "tenant13245";
when(jdbcTemplate.query(anyString(), eq(new Object[] {tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))).thenReturn(mockConfigs);
//execute return mock obj
List<ConfigInfoWrapper> configInfoWrappers = externalConfigInfoPersistService.queryConfigInfoByNamespace(tenant);
//expect check
assertEquals(mockConfigs, configInfoWrappers);
//mock CannotGetJdbcConnectionException
when(jdbcTemplate.query(anyString(), eq(new Object[] {tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))).thenThrow(
new EmptyResultDataAccessException(2));
//execute return mock obj
List<ConfigInfoWrapper> configInfoWrapperNull = externalConfigInfoPersistService.queryConfigInfoByNamespace(tenant);
//expect check
assertEquals(Collections.EMPTY_LIST, configInfoWrapperNull);
//mock CannotGetJdbcConnectionException
when(jdbcTemplate.query(anyString(), eq(new Object[] {tenant}), eq(CONFIG_INFO_WRAPPER_ROW_MAPPER))).thenThrow(
new CannotGetJdbcConnectionException("mock exp1111"));
//expect throw exception.
try {
externalConfigInfoPersistService.queryConfigInfoByNamespace(tenant);
assertFalse(true);
} catch (Exception e) {
assertTrue(e instanceof CannotGetJdbcConnectionException);
assertTrue(e.getMessage().endsWith("mock exp1111"));
}
}
|
public List<List<ConfigInfoChanged>> splitList(List<ConfigInfoChanged> list, int count) {
List<List<ConfigInfoChanged>> result = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
result.add(new ArrayList<>());
}
for (int i = 0; i < list.size(); i++) {
ConfigInfoChanged config = list.get(i);
result.get(i % count).add(config);
}
return result;
}
|
@Test
void testSplitList() {
String dataId = "dataID";
int count = 5;
List<ConfigInfoChanged> configList = new ArrayList<>();
configList.add(create(dataId, 0));
configList.add(create(dataId, 1));
configList.add(create(dataId, 2));
configList.add(create(dataId, 3));
configList.add(create(dataId, 4));
configList.add(create(dataId, 5));
configList.add(create(dataId, 6));
configList.add(create(dataId, 7));
configList.add(create(dataId, 8));
List<List<ConfigInfoChanged>> lists = mergeDatumService.splitList(configList, count);
int originalCount = configList.size();
int actualCount = 0;
for (int i = 0; i < lists.size(); i++) {
List<ConfigInfoChanged> indexList = lists.get(i);
for (int j = 0; j < indexList.size(); j++) {
ConfigInfoChanged configInfoChanged = indexList.get(j);
actualCount++;
assertEquals(configInfoChanged, configList.get((j * count) + i));
}
}
assertEquals(originalCount, actualCount);
}
|
public static boolean fullyDeleteContents(final File dir) {
return fullyDeleteContents(dir, false);
}
|
@Test (timeout = 30000)
public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del), true);
// this time the directories with revoked permissions *should* be deleted:
validateAndSetWritablePermissions(false, ret);
}
|
@Override
public boolean equals(@Nullable Object object) {
if (object instanceof MetricsContainerStepMap) {
MetricsContainerStepMap metricsContainerStepMap = (MetricsContainerStepMap) object;
return Objects.equals(metricsContainers, metricsContainerStepMap.metricsContainers)
&& Objects.equals(unboundContainer, metricsContainerStepMap.unboundContainer);
}
return false;
}
|
@Test
public void testEquals() {
MetricsContainerStepMap metricsContainerStepMap = new MetricsContainerStepMap();
MetricsContainerStepMap equal = new MetricsContainerStepMap();
Assert.assertEquals(metricsContainerStepMap, equal);
Assert.assertEquals(metricsContainerStepMap.hashCode(), equal.hashCode());
}
|
public static String fromNamedReference(CharSequence s) {
if (s == null) {
return null;
}
final Integer code = SPECIALS.get(s.toString());
if (code != null) {
return "&#" + code + ";";
}
return null;
}
|
@Test
public void testFromNamedReference() {
CharSequence s = null;
String expResult = null;
String result = XmlEntity.fromNamedReference(s);
assertEquals(expResult, result);
s = "somethingWrong";
expResult = null;
result = XmlEntity.fromNamedReference(s);
assertEquals(expResult, result);
s = "amp";
expResult = "&";
result = XmlEntity.fromNamedReference(s);
assertEquals(expResult, result);
s = "acute";
expResult = "´";
result = XmlEntity.fromNamedReference(s);
assertEquals(expResult, result);
}
|
public void updateFromOther(FeedItem other) {
if (other.imageUrl != null) {
this.imageUrl = other.imageUrl;
}
if (other.title != null) {
title = other.title;
}
if (other.getDescription() != null) {
description = other.getDescription();
}
if (other.link != null) {
link = other.link;
}
if (other.pubDate != null && !other.pubDate.equals(pubDate)) {
pubDate = other.pubDate;
}
if (other.media != null) {
if (media == null) {
setMedia(other.media);
// reset to new if feed item did link to a file before
setNew();
} else if (media.compareWithOther(other.media)) {
media.updateFromOther(other.media);
}
}
if (other.paymentLink != null) {
paymentLink = other.paymentLink;
}
if (other.chapters != null) {
if (!hasChapters) {
chapters = other.chapters;
}
}
if (other.podcastIndexChapterUrl != null) {
podcastIndexChapterUrl = other.podcastIndexChapterUrl;
}
if (other.getTranscriptUrl() != null) {
podcastIndexTranscriptUrl = other.podcastIndexTranscriptUrl;
}
}
|
@Test
public void testUpdateFromOther_feedItemImageRemoved() {
feedItemImageRemoved();
original.updateFromOther(changedFeedItem);
assertFeedItemImageWasNotUpdated();
}
|
@Override
public Optional<RegistryAuthenticator> handleHttpResponseException(
ResponseException responseException) throws ResponseException, RegistryErrorException {
// Only valid for status code of '401 Unauthorized'.
if (responseException.getStatusCode() != HttpStatusCodes.STATUS_CODE_UNAUTHORIZED) {
throw responseException;
}
// Checks if the 'WWW-Authenticate' header is present.
String authenticationMethod = responseException.getHeaders().getAuthenticate();
if (authenticationMethod == null) {
throw new RegistryErrorExceptionBuilder(getActionDescription(), responseException)
.addReason("'WWW-Authenticate' header not found")
.build();
}
// Parses the header to retrieve the components.
try {
return RegistryAuthenticator.fromAuthenticationMethod(
authenticationMethod, registryEndpointRequestProperties, userAgent, httpClient);
} catch (RegistryAuthenticationFailedException ex) {
throw new RegistryErrorExceptionBuilder(getActionDescription(), ex)
.addReason("Failed get authentication method from 'WWW-Authenticate' header")
.build();
}
}
|
@Test
public void testHandleHttpResponseException_pass()
throws RegistryErrorException, ResponseException, MalformedURLException {
String authenticationMethod =
"Bearer realm=\"https://somerealm\",service=\"someservice\",scope=\"somescope\"";
Mockito.when(mockResponseException.getStatusCode())
.thenReturn(HttpStatusCodes.STATUS_CODE_UNAUTHORIZED);
Mockito.when(mockResponseException.getHeaders()).thenReturn(mockHeaders);
Mockito.when(mockHeaders.getAuthenticate()).thenReturn(authenticationMethod);
RegistryAuthenticator registryAuthenticator =
testAuthenticationMethodRetriever.handleHttpResponseException(mockResponseException).get();
Assert.assertEquals(
new URL("https://somerealm?service=someservice&scope=repository:someImageName:someScope"),
registryAuthenticator.getAuthenticationUrl(
null, Collections.singletonMap("someImageName", "someScope")));
}
|
public Map<TopicPartition, Long> endOffsets(Set<TopicPartition> partitions) {
if (partitions == null || partitions.isEmpty()) {
return Collections.emptyMap();
}
Map<TopicPartition, OffsetSpec> offsetSpecMap = partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest()));
ListOffsetsResult resultFuture = admin.listOffsets(offsetSpecMap, new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED));
// Get the individual result for each topic partition so we have better error messages
Map<TopicPartition, Long> result = new HashMap<>();
for (TopicPartition partition : partitions) {
try {
ListOffsetsResultInfo info = resultFuture.partitionResult(partition).get();
result.put(partition, info.offset());
} catch (ExecutionException e) {
Throwable cause = e.getCause();
String topic = partition.topic();
if (cause instanceof AuthorizationException) {
String msg = String.format("Not authorized to get the end offsets for topic '%s' on brokers at %s", topic, bootstrapServers);
throw new ConnectException(msg, cause);
} else if (cause instanceof UnsupportedVersionException) {
// Should theoretically never happen, because this method is the same as what the consumer uses and therefore
// should exist in the broker since before the admin client was added
String msg = String.format("API to get the get the end offsets for topic '%s' is unsupported on brokers at %s", topic, bootstrapServers);
throw new UnsupportedVersionException(msg, cause);
} else if (cause instanceof TimeoutException) {
String msg = String.format("Timed out while waiting to get end offsets for topic '%s' on brokers at %s", topic, bootstrapServers);
throw new TimeoutException(msg, cause);
} else if (cause instanceof LeaderNotAvailableException) {
String msg = String.format("Unable to get end offsets during leader election for topic '%s' on brokers at %s", topic, bootstrapServers);
throw new LeaderNotAvailableException(msg, cause);
} else if (cause instanceof org.apache.kafka.common.errors.RetriableException) {
throw (org.apache.kafka.common.errors.RetriableException) cause;
} else {
String msg = String.format("Error while getting end offsets for topic '%s' on brokers at %s", topic, bootstrapServers);
throw new ConnectException(msg, cause);
}
} catch (InterruptedException e) {
Thread.interrupted();
String msg = String.format("Interrupted while attempting to read end offsets for topic '%s' on brokers at %s", partition.topic(), bootstrapServers);
throw new RetriableException(msg, e);
}
}
return result;
}
|
@Test
public void endOffsetsShouldFailWithNonRetriableWhenAuthorizationFailureOccurs() {
String topicName = "myTopic";
TopicPartition tp1 = new TopicPartition(topicName, 0);
Set<TopicPartition> tps = Collections.singleton(tp1);
Long offset = null; // response should use error
Cluster cluster = createCluster(1, topicName, 1);
try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) {
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
env.kafkaClient().prepareResponse(listOffsetsResultWithClusterAuthorizationException(tp1, offset));
TopicAdmin admin = new TopicAdmin(env.adminClient());
ConnectException e = assertThrows(ConnectException.class, () -> admin.endOffsets(tps));
assertTrue(e.getMessage().contains("Not authorized to get the end offsets"));
}
}
|
static int evaluateLevenshteinDistance(LevenshteinDistance levenshteinDistance, String term, String text) {
logger.debug("evaluateLevenshteinDistance {} {}", term, text);
return levenshteinDistance.apply(term, text);
}
|
@Test
void evaluateLevenshteinDistanceSplitText() {
String toSearch = "brown fox";
String toScan = "brown fox";
LevenshteinDistance levenshteinDistance = new LevenshteinDistance(0);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(0);
levenshteinDistance = new LevenshteinDistance(1);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(0);
levenshteinDistance = new LevenshteinDistance(2);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(0);
toScan = "brown foxy";
levenshteinDistance = new LevenshteinDistance(0);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(-1);
levenshteinDistance = new LevenshteinDistance(1);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(1);
levenshteinDistance = new LevenshteinDistance(2);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(1);
toScan = "browny foxy";
levenshteinDistance = new LevenshteinDistance(0);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(-1);
levenshteinDistance = new LevenshteinDistance(1);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(-1);
levenshteinDistance = new LevenshteinDistance(2);
assertThat(KiePMMLTextIndex.evaluateLevenshteinDistance(levenshteinDistance, toSearch, toScan)).isEqualTo(2);
}
|
public static List<UpdateRequirement> forCreateTable(List<MetadataUpdate> metadataUpdates) {
Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null");
Builder builder = new Builder(null, false);
builder.require(new UpdateRequirement.AssertTableDoesNotExist());
metadataUpdates.forEach(builder::update);
return builder.build();
}
|
@Test
public void emptyUpdatesForCreateTable() {
assertThat(UpdateRequirements.forCreateTable(ImmutableList.of()))
.hasSize(1)
.hasOnlyElementsOfType(UpdateRequirement.AssertTableDoesNotExist.class);
}
|
public static String getShardingSphereDataNodePath() {
return String.join("/", "", ROOT_NODE, DATABASES_NODE);
}
|
@Test
void assertGetShardingSphereDataNodePath() {
assertThat(ShardingSphereDataNode.getShardingSphereDataNodePath(), is("/statistics/databases"));
}
|
public OffsetAndMetadata findNextCommitOffset(final String commitMetadata) {
boolean found = false;
long currOffset;
long nextCommitOffset = committedOffset;
for (KafkaSpoutMessageId currAckedMsg : ackedMsgs) { // complexity is that of a linear scan on a TreeMap
currOffset = currAckedMsg.offset();
if (currOffset == nextCommitOffset) {
// found the next offset to commit
found = true;
nextCommitOffset = currOffset + 1;
} else if (currOffset > nextCommitOffset) {
if (emittedOffsets.contains(nextCommitOffset)) {
LOG.debug("topic-partition [{}] has non-sequential offset [{}]."
+ " It will be processed in a subsequent batch.", tp, currOffset);
break;
} else {
/*
This case will arise in case of non-sequential offset being processed.
So, if the topic doesn't contain offset = nextCommitOffset (possible
if the topic is compacted or deleted), the consumer should jump to
the next logical point in the topic. Next logical offset should be the
first element after nextCommitOffset in the ascending ordered emitted set.
*/
LOG.debug("Processed non-sequential offset."
+ " The earliest uncommitted offset is no longer part of the topic."
+ " Missing offset: [{}], Processed: [{}]", nextCommitOffset, currOffset);
final Long nextEmittedOffset = emittedOffsets.ceiling(nextCommitOffset);
if (nextEmittedOffset != null && currOffset == nextEmittedOffset) {
LOG.debug("Found committable offset: [{}] after missing offset: [{}], skipping to the committable offset",
currOffset, nextCommitOffset);
found = true;
nextCommitOffset = currOffset + 1;
} else {
LOG.debug("Topic-partition [{}] has non-sequential offset [{}]."
+ " Next offset to commit should be [{}]", tp, currOffset, nextCommitOffset);
break;
}
}
} else {
throw new IllegalStateException("The offset [" + currOffset + "] is below the current nextCommitOffset "
+ "[" + nextCommitOffset + "] for [" + tp + "]."
+ " This should not be possible, and likely indicates a bug in the spout's acking or emit logic.");
}
}
OffsetAndMetadata nextCommitOffsetAndMetadata = null;
if (found) {
nextCommitOffsetAndMetadata = new OffsetAndMetadata(nextCommitOffset, commitMetadata);
LOG.debug("Topic-partition [{}] has offsets [{}-{}] ready to be committed."
+ " Processing will resume at offset [{}] upon spout restart",
tp, committedOffset, nextCommitOffsetAndMetadata.offset() - 1, nextCommitOffsetAndMetadata.offset());
} else {
LOG.debug("Topic-partition [{}] has no offsets ready to be committed", tp);
}
LOG.trace("{}", this);
return nextCommitOffsetAndMetadata;
}
|
@Test
public void testFindNextCommitOffsetWhenTooLowOffsetIsAcked() {
OffsetManager startAtHighOffsetManager = new OffsetManager(testTp, 10);
emitAndAckMessage(getMessageId(0));
OffsetAndMetadata nextCommitOffset = startAtHighOffsetManager.findNextCommitOffset(COMMIT_METADATA);
assertThat("Acking an offset earlier than the committed offset should have no effect", nextCommitOffset, is(nullValue()));
}
|
@Override
public String getName() {
return "Snappy";
}
|
@Test
public void testGetName() {
SnappyCompressionProvider provider =
(SnappyCompressionProvider) factory.getCompressionProviderByName( PROVIDER_NAME );
assertNotNull( provider );
assertEquals( PROVIDER_NAME, provider.getName() );
}
|
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) {
SourceConfig mergedConfig = clone(existingConfig);
if (!existingConfig.getTenant().equals(newConfig.getTenant())) {
throw new IllegalArgumentException("Tenants differ");
}
if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) {
throw new IllegalArgumentException("Namespaces differ");
}
if (!existingConfig.getName().equals(newConfig.getName())) {
throw new IllegalArgumentException("Function Names differ");
}
if (!StringUtils.isEmpty(newConfig.getClassName())) {
mergedConfig.setClassName(newConfig.getClassName());
}
if (!StringUtils.isEmpty(newConfig.getTopicName())) {
mergedConfig.setTopicName(newConfig.getTopicName());
}
if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) {
mergedConfig.setSerdeClassName(newConfig.getSerdeClassName());
}
if (!StringUtils.isEmpty(newConfig.getSchemaType())) {
mergedConfig.setSchemaType(newConfig.getSchemaType());
}
if (newConfig.getConfigs() != null) {
mergedConfig.setConfigs(newConfig.getConfigs());
}
if (newConfig.getSecrets() != null) {
mergedConfig.setSecrets(newConfig.getSecrets());
}
if (!StringUtils.isEmpty(newConfig.getLogTopic())) {
mergedConfig.setLogTopic(newConfig.getLogTopic());
}
if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees()
.equals(existingConfig.getProcessingGuarantees())) {
throw new IllegalArgumentException("Processing Guarantees cannot be altered");
}
if (newConfig.getParallelism() != null) {
mergedConfig.setParallelism(newConfig.getParallelism());
}
if (newConfig.getResources() != null) {
mergedConfig
.setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources()));
}
if (!StringUtils.isEmpty(newConfig.getArchive())) {
mergedConfig.setArchive(newConfig.getArchive());
}
if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) {
mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags());
}
if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) {
mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions());
}
if (isBatchSource(existingConfig) != isBatchSource(newConfig)) {
throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource");
}
if (newConfig.getBatchSourceConfig() != null) {
validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig());
mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig());
}
if (newConfig.getProducerConfig() != null) {
mergedConfig.setProducerConfig(newConfig.getProducerConfig());
}
return mergedConfig;
}
|
@Test
public void testMergeRuntimeFlags() {
SourceConfig sourceConfig = createSourceConfig();
SourceConfig newFunctionConfig = createUpdatedSourceConfig("runtimeFlags", "-Dfoo=bar2");
SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newFunctionConfig);
assertEquals(
mergedConfig.getRuntimeFlags(), "-Dfoo=bar2"
);
mergedConfig.setRuntimeFlags(sourceConfig.getRuntimeFlags());
assertEquals(
new Gson().toJson(sourceConfig),
new Gson().toJson(mergedConfig)
);
}
|
@Override
public void run() {
try {
backgroundJobServer.getJobSteward().notifyThreadOccupied();
MDCMapper.loadMDCContextFromJob(job);
performJob();
} catch (Exception e) {
if (isJobDeletedWhileProcessing(e)) {
// nothing to do anymore as Job is deleted
return;
} else if (isJobServerStopped(e)) {
updateJobStateToFailedAndRunJobFilters("Job processing was stopped as background job server has stopped", e);
Thread.currentThread().interrupt();
} else if (isJobNotFoundException(e)) {
updateJobStateToFailedAndRunJobFilters("Job method not found", e);
} else {
updateJobStateToFailedAndRunJobFilters("An exception occurred during the performance of the job", e);
}
} finally {
backgroundJobServer.getJobSteward().notifyThreadIdle();
MDC.clear();
}
}
|
@Test
@DisplayName("any exception other than InvocationTargetException stays unwrapped")
void anyExceptionOtherThanInvocationTargetExceptionIsNotUnwrapped() throws Exception {
var job = anEnqueuedJob().build();
var runner = mock(BackgroundJobRunner.class);
doThrow(new RuntimeException("test error")).when(runner).run(job);
when(backgroundJobServer.getBackgroundJobRunner(job)).thenReturn(runner);
var backgroundJobPerformer = new BackgroundJobPerformer(backgroundJobServer, job);
backgroundJobPerformer.run();
var lastFailure = job.getLastJobStateOfType(FailedState.class);
assertThat(lastFailure.isPresent()).isTrue();
assertThat(lastFailure.get().getExceptionMessage()).isEqualTo("test error");
assertThat(lastFailure.get().getException()).isInstanceOf(RuntimeException.class);
assertThat(lastFailure.get().getException().getMessage()).isEqualTo("test error");
}
|
void precheckMaxResultLimitOnLocalPartitions(String mapName) {
// check if feature is enabled
if (!isPreCheckEnabled) {
return;
}
// limit number of local partitions to check to keep runtime constant
PartitionIdSet localPartitions = mapServiceContext.getCachedOwnedPartitions();
int partitionsToCheck = min(localPartitions.size(), maxLocalPartitionsLimitForPreCheck);
if (partitionsToCheck == 0) {
return;
}
// calculate size of local partitions
int localPartitionSize = getLocalPartitionSize(mapName, localPartitions, partitionsToCheck);
if (localPartitionSize == 0) {
return;
}
// check local result size
long localResultLimit = getNodeResultLimit(partitionsToCheck);
if (localPartitionSize > localResultLimit * MAX_RESULT_LIMIT_FACTOR_FOR_PRECHECK) {
var localMapStatsProvider = mapServiceContext.getLocalMapStatsProvider();
if (localMapStatsProvider != null && localMapStatsProvider.hasLocalMapStatsImpl(mapName)) {
localMapStatsProvider.getLocalMapStatsImpl(mapName).incrementQueryResultSizeExceededCount();
}
throw new QueryResultSizeExceededException(maxResultLimit, " Result size exceeded in local pre-check.");
}
}
|
@Test(expected = QueryResultSizeExceededException.class)
public void testLocalPreCheckEnabledWitMorePartitionsThanPreCheckThresholdOverLimit() {
int[] partitionSizes = {1200, 1000, Integer.MIN_VALUE};
populatePartitions(partitionSizes);
initMocksWithConfiguration(200000, 2);
limiter.precheckMaxResultLimitOnLocalPartitions(ANY_MAP_NAME);
}
|
public Pair<Map<HostInfo, KsqlEntity>, Set<HostInfo>> fetchAllRemoteResults() {
final Set<HostInfo> remoteHosts = DiscoverRemoteHostsUtil.getRemoteHosts(
executionContext.getPersistentQueries(),
sessionProperties.getKsqlHostInfo()
);
if (remoteHosts.isEmpty() || sessionProperties.getInternalRequest()) {
return new Pair<>(ImmutableMap.of(), ImmutableSet.of());
}
final Set<HostInfo> unresponsiveHosts = new HashSet<>();
final ExecutorService executorService = Executors.newFixedThreadPool(remoteHosts.size());
try {
final Map<HostInfo, CompletableFuture<RestResponse<KsqlEntityList>>> futureResponses =
new HashMap<>();
for (HostInfo host : remoteHosts) {
futureResponses.put(host, fetchRemoteData(statement.getUnMaskedStatementText(), host,
executorService));
}
final ImmutableMap.Builder<HostInfo, KsqlEntity> results = ImmutableMap.builder();
for (final Map.Entry<HostInfo, CompletableFuture<RestResponse<KsqlEntityList>>> e
: futureResponses.entrySet()) {
try {
final RestResponse<KsqlEntityList> response =
e.getValue().get(
executionContext.getKsqlConfig().getLong(KSQL_FETCH_REMOTE_HOSTS_TIMEOUT_SECONDS),
TimeUnit.SECONDS);
if (response.isErroneous()) {
LOG.warn("Error response from host. host: {}, cause: {}",
e.getKey(), response.getErrorMessage().getMessage());
unresponsiveHosts.add(e.getKey());
} else {
results.put(e.getKey(), response.getResponse().get(0));
}
} catch (final Exception cause) {
LOG.warn("Failed to retrieve info from host: {}, statement: {}, cause: {}",
e.getKey(), statement.getMaskedStatementText(), cause);
unresponsiveHosts.add(e.getKey());
}
}
return new Pair<>(results.build(), unresponsiveHosts);
} finally {
executorService.shutdown();
}
}
|
@Test
public void testReturnsEmptyIfRequestIsInternal() {
Pair<Map<HostInfo, KsqlEntity>, Set<HostInfo>> remoteResults = augmenter.fetchAllRemoteResults();
assertThat(remoteResults.getLeft().entrySet(), hasSize(0));
assertThat(remoteResults.getRight(), hasSize(0));
}
|
@Override
public void init(final Properties props) {
this.props = props;
cryptographicAlgorithm = TypedSPILoader.getService(CryptographicAlgorithm.class, getType(), props);
}
|
@Test
void assertCreateNewInstanceWithEmptyDigestAlgorithm() {
assertThrows(AlgorithmInitializationException.class, () -> encryptAlgorithm.init(PropertiesBuilder.build(new Property("aes-key-value", "123456abc"),
new Property("digest-algorithm-name", ""))));
}
|
@Deprecated
public static RowMutationInformation of(MutationType mutationType, long sequenceNumber) {
checkArgument(sequenceNumber >= 0, "sequenceNumber must be non-negative");
return new AutoValue_RowMutationInformation(
mutationType, null, Long.toHexString(sequenceNumber));
}
|
@Test
public void givenAddlSegmentTooLarge_throws() {
IllegalArgumentException error =
assertThrows(
IllegalArgumentException.class,
() ->
RowMutationInformation.of(
RowMutationInformation.MutationType.UPSERT, "0/12345678901234567"));
assertEquals(
"changeSequenceNumber: 0/12345678901234567 does not match expected pattern: ^([0-9A-Fa-f]{1,16})(/([0-9A-Fa-f]{1,16})){0,3}$",
error.getMessage());
}
|
@Override
public Image call() throws LayerPropertyNotFoundException {
try (ProgressEventDispatcher ignored =
progressEventDispatcherFactory.create("building image format", 1);
TimerEventDispatcher ignored2 =
new TimerEventDispatcher(buildContext.getEventHandlers(), DESCRIPTION)) {
// Constructs the image.
Image.Builder imageBuilder = Image.builder(buildContext.getTargetFormat());
// Base image layers
baseImageLayers.forEach(imageBuilder::addLayer);
// Passthrough config and count non-empty history entries
int nonEmptyLayerCount = 0;
for (HistoryEntry historyObject : baseImage.getHistory()) {
imageBuilder.addHistory(historyObject);
if (!historyObject.hasCorrespondingLayer()) {
nonEmptyLayerCount++;
}
}
imageBuilder
.setArchitecture(baseImage.getArchitecture())
.setOs(baseImage.getOs())
.addEnvironment(baseImage.getEnvironment())
.addLabels(baseImage.getLabels())
.setHealthCheck(baseImage.getHealthCheck())
.addExposedPorts(baseImage.getExposedPorts())
.addVolumes(baseImage.getVolumes())
.setUser(baseImage.getUser())
.setWorkingDirectory(baseImage.getWorkingDirectory());
ContainerConfiguration containerConfiguration = buildContext.getContainerConfiguration();
// Add history elements for non-empty layers that don't have one yet
Instant layerCreationTime = containerConfiguration.getCreationTime();
for (int count = 0; count < baseImageLayers.size() - nonEmptyLayerCount; count++) {
imageBuilder.addHistory(
HistoryEntry.builder()
.setCreationTimestamp(layerCreationTime)
.setComment("auto-generated by Jib")
.build());
}
// Add built layers/configuration
for (PreparedLayer applicationLayer : applicationLayers) {
imageBuilder
.addLayer(applicationLayer)
.addHistory(
HistoryEntry.builder()
.setCreationTimestamp(layerCreationTime)
.setAuthor("Jib")
.setCreatedBy(buildContext.getToolName() + ":" + buildContext.getToolVersion())
.setComment(applicationLayer.getName())
.build());
}
imageBuilder
.addEnvironment(containerConfiguration.getEnvironmentMap())
.setCreated(containerConfiguration.getCreationTime())
.setEntrypoint(computeEntrypoint(baseImage, containerConfiguration))
.setProgramArguments(computeProgramArguments(baseImage, containerConfiguration))
.addExposedPorts(containerConfiguration.getExposedPorts())
.addVolumes(containerConfiguration.getVolumes())
.addLabels(containerConfiguration.getLabels());
if (containerConfiguration.getUser() != null) {
imageBuilder.setUser(containerConfiguration.getUser());
}
if (containerConfiguration.getWorkingDirectory() != null) {
imageBuilder.setWorkingDirectory(containerConfiguration.getWorkingDirectory().toString());
}
// Gets the container configuration content descriptor.
return imageBuilder.build();
}
}
|
@Test
public void test_generateHistoryObjects() {
Image image =
new BuildImageStep(
mockBuildContext,
mockProgressEventDispatcherFactory,
baseImage,
baseImageLayers,
applicationLayers)
.call();
// Make sure history is as expected
HistoryEntry expectedAddedBaseLayerHistory =
HistoryEntry.builder()
.setCreationTimestamp(Instant.EPOCH)
.setComment("auto-generated by Jib")
.build();
HistoryEntry expectedApplicationLayerHistoryDependencies =
HistoryEntry.builder()
.setCreationTimestamp(Instant.EPOCH)
.setAuthor("Jib")
.setCreatedBy("jib:null")
.setComment("dependencies")
.build();
HistoryEntry expectedApplicationLayerHistoryResources =
HistoryEntry.builder()
.setCreationTimestamp(Instant.EPOCH)
.setAuthor("Jib")
.setCreatedBy("jib:null")
.setComment("resources")
.build();
HistoryEntry expectedApplicationLayerHistoryClasses =
HistoryEntry.builder()
.setCreationTimestamp(Instant.EPOCH)
.setAuthor("Jib")
.setCreatedBy("jib:null")
.setComment("classes")
.build();
HistoryEntry expectedApplicationLayerHistoryExtrafiles =
HistoryEntry.builder()
.setCreationTimestamp(Instant.EPOCH)
.setAuthor("Jib")
.setCreatedBy("jib:null")
.setComment("extra files")
.build();
// Base layers (1 non-empty propagated, 2 empty propagated, 2 non-empty generated)
Assert.assertEquals(nonEmptyLayerHistory, image.getHistory().get(0));
Assert.assertEquals(emptyLayerHistory, image.getHistory().get(1));
Assert.assertEquals(emptyLayerHistory, image.getHistory().get(2));
Assert.assertEquals(expectedAddedBaseLayerHistory, image.getHistory().get(3));
Assert.assertEquals(expectedAddedBaseLayerHistory, image.getHistory().get(4));
// Application layers (4 generated)
Assert.assertEquals(expectedApplicationLayerHistoryDependencies, image.getHistory().get(5));
Assert.assertEquals(expectedApplicationLayerHistoryResources, image.getHistory().get(6));
Assert.assertEquals(expectedApplicationLayerHistoryClasses, image.getHistory().get(7));
Assert.assertEquals(expectedApplicationLayerHistoryExtrafiles, image.getHistory().get(8));
// Should be exactly 9 total
Assert.assertEquals(9, image.getHistory().size());
}
|
protected Map<String, CanaryScopePair> buildRequestScopes(
CanaryAnalysisExecutionRequest config, long interval, Duration intervalDuration) {
Map<String, CanaryScopePair> scopes = new HashMap<>();
config
.getScopes()
.forEach(
scope -> {
ScopeTimeConfig scopeTimeConfig =
calculateStartAndEndForJudgement(config, interval, intervalDuration);
CanaryScope controlScope =
new CanaryScope(
scope.getControlScope(),
scope.getControlLocation(),
scopeTimeConfig.start.minus(
Duration.ofMinutes(scope.getControlOffsetInMinutes())),
scopeTimeConfig.end.minus(
Duration.ofMinutes(scope.getControlOffsetInMinutes())),
config.getStep().getSeconds(),
scope.getExtendedScopeParams());
CanaryScope experimentScope =
new CanaryScope(
scope.getExperimentScope(),
scope.getExperimentLocation(),
scopeTimeConfig.start,
scopeTimeConfig.end,
config.getStep().getSeconds(),
scope.getExtendedScopeParams());
CanaryScopePair canaryScopePair =
CanaryScopePair.builder()
.controlScope(controlScope)
.experimentScope(experimentScope)
.build();
scopes.put(scope.getScopeName(), canaryScopePair);
});
return scopes;
}
|
@Test
public void
test_that_buildRequestScopes_has_expected_start_and_end_when_control_offset_is_supplied() {
int interval = 1;
String startIso = "2018-12-17T20:56:39.689Z";
Duration lifetimeDuration = Duration.ofMinutes(3L);
CanaryAnalysisExecutionRequest request =
CanaryAnalysisExecutionRequest.builder()
.scopes(
ImmutableList.of(
CanaryAnalysisExecutionRequestScope.builder()
.controlOffsetInMinutes(5L)
.startTimeIso(startIso)
.build()))
.build();
var requestScopes = stage.buildRequestScopes(request, interval, lifetimeDuration);
var defaultScope = requestScopes.get("default");
var expectedControlStartIso = "2018-12-17T20:51:39.689Z";
assertEquals(Instant.parse(startIso), defaultScope.getExperimentScope().getStart());
assertEquals(Instant.parse(expectedControlStartIso), defaultScope.getControlScope().getStart());
}
|
@Override
public boolean createReservation(ReservationId reservationId, String user,
Plan plan, ReservationDefinition contract) throws PlanningException {
LOG.info("placing the following ReservationRequest: " + contract);
try {
boolean res =
planner.createReservation(reservationId, user, plan, contract);
if (res) {
LOG.info("OUTCOME: SUCCESS, Reservation ID: "
+ reservationId.toString() + ", Contract: " + contract.toString());
} else {
LOG.info("OUTCOME: FAILURE, Reservation ID: "
+ reservationId.toString() + ", Contract: " + contract.toString());
}
return res;
} catch (PlanningException e) {
LOG.info("OUTCOME: FAILURE, Reservation ID: " + reservationId.toString()
+ ", Contract: " + contract.toString());
throw e;
}
}
|
@Test
public void testAllImpossible() throws PlanningException {
prepareBasicPlan();
// create an ALL request, with an impossible combination, it should be
// rejected, and allocation remain unchanged
ReservationDefinition rr = new ReservationDefinitionPBImpl();
rr.setArrival(100L);
rr.setDeadline(120L);
rr.setRecurrenceExpression(recurrenceExpression);
ReservationRequests reqs = new ReservationRequestsPBImpl();
reqs.setInterpreter(ReservationRequestInterpreter.R_ALL);
ReservationRequest r = ReservationRequest.newInstance(
Resource.newInstance(1024, 1), 55, 5, 10);
ReservationRequest r2 = ReservationRequest.newInstance(
Resource.newInstance(2048, 2), 55, 5, 20);
List<ReservationRequest> list = new ArrayList<ReservationRequest>();
list.add(r);
list.add(r2);
reqs.setReservationResources(list);
rr.setReservationRequests(reqs);
ReservationId reservationID = ReservationSystemTestUtil
.getNewReservationId();
boolean result = false;
try {
// submit to agent
result = agent.createReservation(reservationID, "u1", plan, rr);
fail();
} catch (PlanningException p) {
// expected
}
// validate results, we expect the second one to be accepted
assertFalse("Agent-based allocation failed", result);
assertTrue("Agent-based allocation failed", plan.getAllReservations()
.size() == 2);
System.out.println("--------AFTER ALL IMPOSSIBLE ALLOCATION (queue: "
+ reservationID + ")----------");
System.out.println(plan.toString());
System.out.println(plan.toCumulativeString());
}
|
public boolean hasLateTransaction(long currentTimeMs) {
long lastTimestamp = oldestTxnLastTimestamp;
return lastTimestamp > 0 && (currentTimeMs - lastTimestamp) > maxTransactionTimeoutMs + ProducerStateManager.LATE_TRANSACTION_BUFFER_MS;
}
|
@Test
public void testHasLateTransaction() {
long producerId1 = 39L;
short epoch1 = 2;
long producerId2 = 57L;
short epoch2 = 9;
// Start two transactions with a delay between them
appendClientEntry(stateManager, producerId1, epoch1, defaultSequence, 100, true);
assertFalse(stateManager.hasLateTransaction(time.milliseconds()));
time.sleep(500);
appendClientEntry(stateManager, producerId2, epoch2, defaultSequence, 150, true);
assertFalse(stateManager.hasLateTransaction(time.milliseconds()));
// Only the first transaction is late
time.sleep(lateTransactionTimeoutMs - 500 + 1);
assertTrue(stateManager.hasLateTransaction(time.milliseconds()));
// Both transactions are now late
time.sleep(500);
assertTrue(stateManager.hasLateTransaction(time.milliseconds()));
// Finish the first transaction
appendEndTxnMarker(stateManager, producerId1, epoch1, ControlRecordType.COMMIT, 200);
assertTrue(stateManager.hasLateTransaction(time.milliseconds()));
// Now finish the second transaction
appendEndTxnMarker(stateManager, producerId2, epoch2, ControlRecordType.COMMIT, 250);
assertFalse(stateManager.hasLateTransaction(time.milliseconds()));
}
|
@Override
public boolean supportsOpenCursorsAcrossCommit() {
return false;
}
|
@Test
void assertSupportsOpenCursorsAcrossCommit() {
assertFalse(metaData.supportsOpenCursorsAcrossCommit());
}
|
public static HttpClient create() {
return new HttpClientConnect(new HttpConnectionProvider());
}
|
@Test
void testConnectionNoLifeTimeElasticPoolHttp1() throws Exception {
ConnectionProvider provider =
ConnectionProvider.create("testConnectionNoLifeTimeElasticPoolHttp1", Integer.MAX_VALUE);
try {
ChannelId[] ids = doTestConnectionLifeTime(createServer(),
createClient(provider, () -> disposableServer.address()));
assertThat(ids[0]).isEqualTo(ids[1]);
}
finally {
provider.disposeLater()
.block(Duration.ofSeconds(5));
}
}
|
public static <T> Set<T> emptyIsNotFound(Set<T> item, String message) {
if (item == null || item.isEmpty()) {
log.error(message);
throw new ItemNotFoundException(message);
}
return item;
}
|
@Test(expected = ItemNotFoundException.class)
public void testEmptyIsNotFoundNullThrow() {
Tools.emptyIsNotFound(null, "Not found!");
fail("Should've thrown some thing");
}
|
@Override
public List<AlarmId> unassignDeletedUserAlarms(TenantId tenantId, UserId userId, String userTitle, long unassignTs) {
List<AlarmId> totalAlarmIds = new ArrayList<>();
PageLink pageLink = new PageLink(100, 0, null, new SortOrder("id", SortOrder.Direction.ASC));
while (true) {
PageData<AlarmId> pageData = alarmService.findAlarmIdsByAssigneeId(tenantId, userId, pageLink);
List<AlarmId> alarmIds = pageData.getData();
if (alarmIds.isEmpty()) {
break;
}
processAlarmsUnassignment(tenantId, userId, userTitle, alarmIds, unassignTs);
totalAlarmIds.addAll(alarmIds);
pageLink = pageLink.nextPageLink();
}
return totalAlarmIds;
}
|
@Test
public void testUnassignDeletedUserAlarms() throws ThingsboardException {
AlarmInfo alarm = new AlarmInfo();
alarm.setId(new AlarmId(UUID.randomUUID()));
when(alarmService.findAlarmIdsByAssigneeId(any(), any(), any()))
.thenReturn(new PageData<>(List.of(alarm.getId()), 0, 1, false))
.thenReturn(new PageData<>(Collections.EMPTY_LIST, 0, 0, false));
when(alarmSubscriptionService.unassignAlarm(any(), any(), anyLong()))
.thenReturn(AlarmApiCallResult.builder().successful(true).modified(true).alarm(alarm).build());
User user = new User();
user.setEmail("[email protected]");
user.setId(new UserId(UUID.randomUUID()));
service.unassignDeletedUserAlarms(new TenantId(UUID.randomUUID()), user.getId(), user.getTitle(), System.currentTimeMillis());
ObjectNode commentNode = JacksonUtil.newObjectNode();
commentNode.put("subtype", "ASSIGN");
commentNode.put("text", String.format("Alarm was unassigned because user %s - was deleted", user.getTitle()));
AlarmComment expectedAlarmComment = AlarmComment.builder()
.alarmId(alarm.getId())
.type(AlarmCommentType.SYSTEM)
.comment(commentNode)
.build();
verify(alarmCommentService, times(1))
.saveAlarmComment(eq(alarm), eq(expectedAlarmComment), eq(null));
}
|
public static Builder builder() {
return new Builder();
}
|
@Test
public void testEquality() {
Application a1 = baseBuilder.build();
Application a2 = DefaultApplication.builder(a1)
.build();
Application a3 = DefaultApplication.builder(baseBuilder)
.withFeaturesRepo(Optional.empty())
.build();
Application a4 = DefaultApplication.builder(baseBuilder)
.withOrigin(ORIGIN + "asd")
.build();
new EqualsTester()
.addEqualityGroup(a1, a2)
.addEqualityGroup(a3)
.addEqualityGroup(a4)
.testEquals();
}
|
@Override
public String call() throws RemoteServiceException {
var currentTime = System.nanoTime();
//Since currentTime and serverStartTime are both in nanoseconds, we convert it to
//seconds by diving by 10e9 and ensure floating point division by multiplying it
//with 1.0 first. We then check if it is greater or less than specified delay and then
//send the reply
if ((currentTime - serverStartTime) * 1.0 / (1000 * 1000 * 1000) < delay) {
//Can use Thread.sleep() here to block and simulate a hung server
throw new RemoteServiceException("Delayed service is down");
}
return "Delayed service is working";
}
|
@Test
void testParameterizedConstructor() throws RemoteServiceException {
var obj = new DelayedRemoteService(System.nanoTime()-2000*1000*1000,1);
assertEquals("Delayed service is working",obj.call());
}
|
public static String jmxSanitize(String name) {
return MBEAN_PATTERN.matcher(name).matches() ? name : ObjectName.quote(name);
}
|
@Test
public void testJmxSanitize() throws MalformedObjectNameException {
int unquoted = 0;
for (int i = 0; i < 65536; i++) {
char c = (char) i;
String value = "value" + c;
String jmxSanitizedValue = Sanitizer.jmxSanitize(value);
if (jmxSanitizedValue.equals(value))
unquoted++;
verifyJmx(jmxSanitizedValue, i);
String encodedValue = Sanitizer.sanitize(value);
verifyJmx(encodedValue, i);
// jmxSanitize should not sanitize URL-encoded values
assertEquals(encodedValue, Sanitizer.jmxSanitize(encodedValue));
}
assertEquals(68, unquoted); // a-zA-Z0-9-_% space and tab
}
|
@Override
public void close() {
}
|
@Test
public void shouldSucceed_justForwarded() throws ExecutionException, InterruptedException {
// Given:
when(pushRoutingOptions.getHasBeenForwarded()).thenReturn(true);
final PushRouting routing = new PushRouting();
// When:
final PushConnectionsHandle handle = handlePushRouting(routing);
context.runOnContext(v -> {
localPublisher.accept(LOCAL_ROW1);
localPublisher.accept(LOCAL_ROW2);
});
// Then:
verify(simpleKsqlClient, never()).makeQueryRequestStreamed(any(), any(), any(), any());
Set<List<?>> rows = waitOnRows(2);
handle.close();
assertThat(rows.contains(LOCAL_ROW1.value().values()), is(true));
assertThat(rows.contains(LOCAL_ROW2.value().values()), is(true));
}
|
@Override
public Connection connect(String url, Properties info) throws SQLException {
// calciteConnection is initialized with an empty Beam schema,
// we need to populate it with pipeline options, load table providers, etc
return JdbcConnection.initialize((CalciteConnection) super.connect(url, info));
}
|
@Test
public void testInternalConnect_boundedTable() throws Exception {
CalciteConnection connection =
JdbcDriver.connect(BOUNDED_TABLE, PipelineOptionsFactory.create());
Statement statement = connection.createStatement();
ResultSet resultSet = statement.executeQuery("SELECT * FROM test");
assertTrue(resultSet.next());
assertEquals(1, resultSet.getInt("id"));
assertEquals("first", resultSet.getString("name"));
assertFalse(resultSet.next());
}
|
public String getFingerprint() {
return fingerprint;
}
|
@Test
public void testIdenticalStreams() throws Exception {
final StreamListFingerprint fingerprint1 = new StreamListFingerprint(Lists.newArrayList(stream1));
final StreamListFingerprint fingerprint2 = new StreamListFingerprint(Lists.newArrayList(stream1));
final StreamListFingerprint fingerprint3 = new StreamListFingerprint(Lists.newArrayList(stream2));
assertEquals(fingerprint1.getFingerprint(), fingerprint2.getFingerprint());
assertNotEquals(fingerprint1.getFingerprint(), fingerprint3.getFingerprint());
}
|
@Override
public boolean match(Message msg, StreamRule rule) {
Double msgVal = getDouble(msg.getField(rule.getField()));
if (msgVal == null) {
return false;
}
Double ruleVal = getDouble(rule.getValue());
if (ruleVal == null) {
return false;
}
return rule.getInverted() ^ (msgVal < ruleVal);
}
|
@Test
public void testSuccessfulMatchWithNegativeValue() {
StreamRule rule = getSampleRule();
rule.setValue("-54354");
Message msg = getSampleMessage();
msg.addField("something", "-90000");
StreamRuleMatcher matcher = getMatcher(rule);
assertTrue(matcher.match(msg, rule));
}
|
public Optional<Node> localCorpusDispatchTarget() {
if (localCorpusDispatchTarget == null) return Optional.empty();
// Only use direct dispatch if the local group has sufficient coverage
Group localSearchGroup = groups.get(localCorpusDispatchTarget.group());
if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty();
// Only use direct dispatch if the local search node is not down
if (localCorpusDispatchTarget.isWorking() == Boolean.FALSE) return Optional.empty();
return Optional.of(localCorpusDispatchTarget);
}
|
@Test
void requireThatVipStatusIsDefaultDownButComesUpAfterPinging() {
try (State test = new State("cluster.1", 2, "a", "b")) {
assertTrue(test.searchCluster.localCorpusDispatchTarget().isEmpty());
assertFalse(test.vipStatus.isInRotation());
test.waitOneFullPingRound();
assertTrue(test.vipStatus.isInRotation());
}
}
|
public static <T extends NamedConfig> T getConfig(ConfigPatternMatcher configPatternMatcher,
Map<String, T> configs, String name,
Class clazz) {
return getConfig(configPatternMatcher, configs, name, clazz, (BiConsumer<T, String>) DEFAULT_NAME_SETTER);
}
|
@Test
public void getNonExistingConfig() {
QueueConfig newConfig = ConfigUtils.getConfig(configPatternMatcher, queueConfigs, "newConfig", QueueConfig.class);
assertEquals("newConfig", newConfig.getName());
assertEquals(1, newConfig.getBackupCount());
assertEquals(2, queueConfigs.size());
assertTrue(queueConfigs.containsKey("newConfig"));
assertTrue(queueConfigs.containsKey("default"));
}
|
@Override
public <OUT> ProcessConfigurableAndNonKeyedPartitionStream<OUT> process(
OneInputStreamProcessFunction<T, OUT> processFunction) {
validateStates(
processFunction.usesStates(),
new HashSet<>(
Arrays.asList(
StateDeclaration.RedistributionMode.NONE,
StateDeclaration.RedistributionMode.IDENTICAL)));
TypeInformation<OUT> outType =
StreamUtils.getOutputTypeForOneInputProcessFunction(processFunction, getType());
ProcessOperator<T, OUT> operator = new ProcessOperator<>(processFunction);
OneInputTransformation<T, OUT> outputTransform =
StreamUtils.getOneInputTransformation("Process", this, outType, operator);
environment.addOperator(outputTransform);
return StreamUtils.wrapWithConfigureHandle(
new NonKeyedPartitionStreamImpl<>(environment, outputTransform));
}
|
@Test
void testStateErrorWithTwoOutputStream() throws Exception {
ExecutionEnvironmentImpl env = StreamTestUtils.getEnv();
NonKeyedPartitionStreamImpl<Integer> stream =
new NonKeyedPartitionStreamImpl<>(
env, new TestingTransformation<>("t1", Types.INT, 1));
for (StateDeclaration stateDeclaration :
Arrays.asList(modeNoneStateDeclaration, modeIdenticalStateDeclaration)) {
assertThatThrownBy(
() ->
stream.process(
new StreamTestUtils.NoOpTwoOutputStreamProcessFunction(
new HashSet<>(
Collections.singletonList(
stateDeclaration)))))
.isInstanceOf(IllegalRedistributionModeException.class);
}
}
|
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
}
|
@Test
public void shouldNotCastDoubleTooBig() {
// When:
final Exception e = assertThrows(
ArithmeticException.class,
() -> cast(10.0, 2, 1)
);
// Then:
assertThat(e.getMessage(), containsString("Numeric field overflow"));
}
|
@Override
public YamlSingleRuleConfiguration swapToYamlConfiguration(final SingleRuleConfiguration data) {
YamlSingleRuleConfiguration result = new YamlSingleRuleConfiguration();
result.getTables().addAll(data.getTables());
data.getDefaultDataSource().ifPresent(result::setDefaultDataSource);
return result;
}
|
@Test
void assertSwapToYaml() {
assertThat(new YamlSingleRuleConfigurationSwapper().swapToYamlConfiguration(new SingleRuleConfiguration(Collections.emptyList(), "ds_0")).getDefaultDataSource(), is("ds_0"));
}
|
@Override
public void setControllers(List<ControllerInfo> controllers) {
DeviceId deviceId = getDeviceId();
checkNotNull(deviceId, MSG_DEVICE_ID_NULL);
MastershipService mastershipService = getHandler().get(MastershipService.class);
checkNotNull(mastershipService, MSG_MASTERSHIP_NULL);
if (!mastershipService.isLocalMaster(deviceId)) {
log.warn(
"I am not master for {}. " +
"Please use master {} to set controllers for this device",
deviceId, mastershipService.getMasterFor(deviceId));
return;
}
ObjectMapper mapper = new ObjectMapper();
// Create the object node to host the data
ObjectNode sendObjNode = mapper.createObjectNode();
// Insert header
ArrayNode ctrlsArrayNode = sendObjNode.putArray(PARAM_CTRL);
// Add each controller's information object
for (ControllerInfo ctrl : controllers) {
ObjectNode ctrlObjNode = mapper.createObjectNode();
ctrlObjNode.put(PARAM_CTRL_IP, ctrl.ip().toString());
ctrlObjNode.put(PARAM_CTRL_PORT, ctrl.port());
ctrlObjNode.put(PARAM_CTRL_TYPE, ctrl.type());
ctrlsArrayNode.add(ctrlObjNode);
}
// Post the controllers to the device
int response = getController().post(
deviceId, URL_CONTROLLERS_SET,
new ByteArrayInputStream(sendObjNode.toString().getBytes()), JSON);
if (!checkStatusCode(response)) {
log.error("Failed to set controllers on device {}", deviceId);
}
return;
}
|
@Test
public void testSetControllers() {
// Get device handler
DriverHandler driverHandler = null;
try {
driverHandler = driverService.createHandler(restDeviceId1);
} catch (Exception e) {
throw e;
}
assertThat(driverHandler, notNullValue());
// TODO: Fix this test
}
|
@Override
@SuppressWarnings("unchecked")
public <T> T getExtGroupRealization(Class<T> extensionType, String name) throws ExtensionException {
if (name == null) {
throw new ExtensionException("name can not be null");
}
if (extensionType == null) {
throw new ExtensionTypeException("extensionType can not be null");
}
if (!extensionType.isInterface()) {
throw new ExtensionTypeException("extensionType must be an interface");
}
if (!extensionType.isAnnotationPresent(ExtensionPoint.class)) {
throw new ExtensionTypeException("extensionType must be annotated with @ExtensionPoint");
}
Object instance = extensionInstances.get(makeKey(extensionType, name));
if (instance == null) {
throw new ExtensionNotFoundException("extension " + name + " not found");
}
if (!extensionType.isInstance(instance)) {
throw new ExtensionTypeException("extension " + name + " is not an instance of " + extensionType.getName());
}
return (T) instance;
}
|
@Test
public void testGetExtGroupRealization() throws ExtensionException {
ExtensionException exception;
DefaultExtGroupRealizationManager manager = new DefaultExtGroupRealizationManager();
exception = assertThrows(ExtensionException.class, () -> manager.getExtGroupRealization(null, null));
assertEquals("name can not be null", exception.getMessage());
exception = assertThrows(ExtensionException.class, () -> manager.getExtGroupRealization(null, ""));
assertEquals("extensionType can not be null", exception.getMessage());
exception = assertThrows(ExtensionException.class, () -> manager.getExtGroupRealization(ExtGroupRealization1.class, ""));
assertEquals("extensionType must be an interface", exception.getMessage());
exception = assertThrows(ExtensionException.class, () -> manager.getExtGroupRealization(IBusiness.class, ""));
assertEquals("extensionType must be annotated with @ExtensionPoint", exception.getMessage());
exception = assertThrows(ExtensionException.class, () -> manager.getExtGroupRealization(ExtA.class, "x"));
assertEquals("extension x not found", exception.getMessage());
manager.registerExtGroupRealization(new ExtGroupRealization2(), "ExtGroupRealization2");
ExtA extA = manager.getExtGroupRealization(ExtA.class, "ExtGroupRealization2");
}
|
public String process(final Expression expression) {
return formatExpression(expression);
}
|
@Test
public void shouldGenerateCorrectCodeForTime() {
// Given:
final TimeLiteral time = new TimeLiteral(new Time(185000));
// When:
final String java = sqlToJavaVisitor.process(time);
// Then:
assertThat(java, is("00:03:05"));
}
|
@Override
public V put(K key, V value) {
return map.put(key, value);
}
|
@Test
public void testPut() {
map.put(42, "oldValue");
String oldValue = adapter.put(42, "newValue");
assertEquals("oldValue", oldValue);
assertEquals("newValue", map.get(42));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.