focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
static AnnotatedClusterState generatedStateFrom(final Params params) {
final ContentCluster cluster = params.cluster;
final ClusterState workingState = ClusterState.emptyState();
final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>();
for (final NodeInfo nodeInfo : cluster.getNodeInfos()) {
final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params, nodeStateReasons);
workingState.setNodeState(nodeInfo.getNode(), nodeState);
}
takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params);
final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params);
if (reasonToBeDown.isPresent()) {
workingState.setClusterState(State.DOWN);
}
workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params));
return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons);
}
|
@Test
void cluster_not_down_if_more_than_min_count_of_storage_nodes_are_available() {
final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
.bringEntireClusterUp()
.reportStorageNodeState(0, State.DOWN);
final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(2);
final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:d"));
assertThat(state.getClusterStateReason(), equalTo(Optional.empty()));
}
|
public List<Pair<byte[], byte[]>> readRecords() throws IOException {
final List<Pair<byte[], byte[]>> commands = new ArrayList<>();
for (final String line : Files.readAllLines(getFile().toPath(), StandardCharsets.UTF_8)) {
final String commandId = line.substring(0, line.indexOf(KEY_VALUE_SEPARATOR_STR));
final String command = line.substring(line.indexOf(KEY_VALUE_SEPARATOR_STR) + 1);
commands.add(new Pair<>(
commandId.getBytes(StandardCharsets.UTF_8),
command.getBytes(StandardCharsets.UTF_8)
));
}
return commands;
}
|
@Test
public void shouldBeEmptyWhenReadAllCommandsFromEmptyFile() throws IOException {
// When
final List<?> commands = replayFile.readRecords();
// Then
assertThat(commands.size(), is(0));
}
|
@Override
public InputChannel getChannel(int channelIndex) {
int gateIndex = inputChannelToInputGateIndex[channelIndex];
return inputGatesByGateIndex
.get(gateIndex)
.getChannel(channelIndex - inputGateChannelIndexOffsets[gateIndex]);
}
|
@Test
void testUpdateInputChannel() throws Exception {
final SingleInputGate inputGate1 = createInputGate(1);
TestInputChannel inputChannel1 = new TestInputChannel(inputGate1, 0);
inputGate1.setInputChannels(inputChannel1);
final SingleInputGate inputGate2 = createInputGate(1);
TestingResultPartitionManager partitionManager =
new TestingResultPartitionManager(new NoOpResultSubpartitionView());
InputChannel unknownInputChannel2 =
InputChannelBuilder.newBuilder()
.setPartitionManager(partitionManager)
.buildUnknownChannel(inputGate2);
inputGate2.setInputChannels(unknownInputChannel2);
UnionInputGate unionInputGate = new UnionInputGate(inputGate1, inputGate2);
ResultPartitionID resultPartitionID = unknownInputChannel2.getPartitionId();
ResourceID location = ResourceID.generate();
inputGate2.updateInputChannel(
location,
createRemoteWithIdAndLocation(resultPartitionID.getPartitionId(), location));
assertThat(unionInputGate.getChannel(0)).isEqualTo(inputChannel1);
// Check that updated input channel is visible via UnionInputGate
assertThat(unionInputGate.getChannel(1)).isEqualTo(inputGate2.getChannel(0));
}
|
@GetMapping("/all")
public ShenyuAdminResult queryAllPlugins() {
List<PluginData> pluginDataList = pluginService.listAll();
return ShenyuAdminResult.success(ShenyuResultMessage.QUERY_SUCCESS, pluginDataList);
}
|
@Test
public void testQueryAllPlugins() throws Exception {
given(this.pluginService.listAll())
.willReturn(new ArrayList<>());
this.mockMvc.perform(MockMvcRequestBuilders.get("/plugin/all"))
.andExpect(status().isOk())
.andReturn();
}
|
public static int min(int a, int b, int c) {
return Math.min(Math.min(a, b), c);
}
|
@Test
public void testMin_3args() {
System.out.println("min");
int a = -1;
int b = 0;
int c = 1;
int expResult = -1;
int result = MathEx.min(a, b, c);
assertEquals(expResult, result);
}
|
@Override
public <T> T clone(T object) {
if (object instanceof String) {
return object;
} else if (object instanceof Collection) {
Object firstElement = findFirstNonNullElement((Collection) object);
if (firstElement != null && !(firstElement instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass());
return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
} else if (object instanceof Map) {
Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object);
if (firstEntry != null) {
Object key = firstEntry.getKey();
Object value = firstEntry.getValue();
if (!(key instanceof Serializable) || !(value instanceof Serializable)) {
JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass());
return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type);
}
}
} else if (object instanceof JsonNode) {
return (T) ((JsonNode) object).deepCopy();
}
if (object instanceof Serializable) {
try {
return (T) SerializationHelper.clone((Serializable) object);
} catch (SerializationException e) {
//it is possible that object itself implements java.io.Serializable, but underlying structure does not
//in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization
}
}
return jsonClone(object);
}
|
@Test
public void should_clone_map_of_serializable_key_and_value_with_null() {
Map<String, SerializableObject> original = new LinkedHashMap<>();
original.put("null", null);
original.put("key", new SerializableObject("value"));
Object cloned = serializer.clone(original);
assertEquals(original, cloned);
assertNotSame(original, cloned);
}
|
public static <InputT, OutputT> PTransform<PCollection<InputT>, PCollection<OutputT>> to(
Class<OutputT> clazz) {
return to(TypeDescriptor.of(clazz));
}
|
@Test
@Category(NeedsRunner.class)
public void testFromRowsUnboxingRow() {
PCollection<POJO1Nested> pojos =
pipeline
.apply(Create.of(new POJO1()))
.apply(Select.fieldNames("field3"))
.apply(Convert.to(TypeDescriptor.of(POJO1Nested.class)));
PAssert.that(pojos).containsInAnyOrder(new POJO1Nested());
pipeline.run();
}
|
public static void appendPrettyHexDump(final StringBuilder dump, final DirectBuffer buffer)
{
appendPrettyHexDump(dump, buffer, 0, buffer.capacity());
}
|
@Test
void shouldPrettyPrintHex()
{
final String contents = "Hello World!\nThis is a test String\nto print out.";
final ExpandableArrayBuffer buffer = new ExpandableArrayBuffer();
buffer.putStringAscii(0, contents);
final StringBuilder builder = new StringBuilder();
PrintBufferUtil.appendPrettyHexDump(builder, buffer);
assertThat(builder.toString(), containsString("0...Hello World!"));
}
|
public static XmlXStream getInstance() {
return s_instance;
}
|
@Test(expected=ForbiddenClassException.class, timeout=5000)
public void testVoidElementUnmarshalling() throws Exception {
XStream xstream = XmlXStream.getInstance();
xstream.fromXML("<void/>");
}
|
@Override
public int compareTo(Resource other) {
checkArgument(other != null && getClass() == other.getClass() && name.equals(other.name));
return value.compareTo(other.value);
}
|
@Test
void testCompareToFailDifferentName() {
// initialized as different anonymous classes
final Resource resource1 = new TestResource("name1", 0.0);
final Resource resource2 = new TestResource("name2", 0.0);
assertThatThrownBy(() -> resource1.compareTo(resource2))
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
public void unsubscribeService(Service service, Subscriber subscriber, String clientId) {
Service singleton = ServiceManager.getInstance().getSingletonIfExist(service).orElse(service);
Client client = clientManager.getClient(clientId);
checkClientIsLegal(client, clientId);
client.removeServiceSubscriber(singleton);
client.setLastUpdatedTime();
NotifyCenter.publishEvent(new ClientOperationEvent.ClientUnsubscribeServiceEvent(singleton, clientId));
}
|
@Test
void testUnSubscribeWhenClientNull() {
assertThrows(NacosRuntimeException.class, () -> {
when(clientManager.getClient(anyString())).thenReturn(null);
// Excepted exception
ephemeralClientOperationServiceImpl.unsubscribeService(service, subscriber, ipPortBasedClientId);
});
}
|
@Override
public boolean isEmpty() {
return sideInputs.isEmpty();
}
|
@Test
public void testIsEmptyFalse() {
PCollectionView<Iterable<String>> view =
Pipeline.create().apply(Create.of("1")).apply(View.asIterable());
SideInputHandler sideInputHandler =
new SideInputHandler(ImmutableList.of(view), InMemoryStateInternals.<Void>forKey(null));
assertFalse(sideInputHandler.isEmpty());
}
|
@ApiOperation(value = "Get all Widget types for specified Bundle (getBundleWidgetTypes)",
notes = "Returns an array of Widget Type objects that belong to specified Widget Bundle." + WIDGET_TYPE_DESCRIPTION + " " + SYSTEM_OR_TENANT_AUTHORITY_PARAGRAPH)
@PreAuthorize("hasAnyAuthority('SYS_ADMIN', 'TENANT_ADMIN', 'CUSTOMER_USER')")
@RequestMapping(value = "/widgetTypes", params = {"widgetsBundleId"}, method = RequestMethod.GET)
@ResponseBody
public List<WidgetType> getBundleWidgetTypes(
@Parameter(description = "Widget Bundle Id", required = true)
@RequestParam("widgetsBundleId") String strWidgetsBundleId) throws ThingsboardException {
WidgetsBundleId widgetsBundleId = new WidgetsBundleId(toUUID(strWidgetsBundleId));
return checkNotNull(widgetTypeService.findWidgetTypesByWidgetsBundleId(getTenantId(), widgetsBundleId));
}
|
@Test
public void testGetBundleWidgetTypes() throws Exception {
WidgetsBundle widgetsBundle = new WidgetsBundle();
widgetsBundle.setTitle("My widgets bundle");
widgetsBundle = doPost("/api/widgetsBundle", widgetsBundle, WidgetsBundle.class);
List<WidgetType> widgetTypes = new ArrayList<>();
for (int i = 0; i < 89; i++) {
WidgetTypeDetails widgetType = new WidgetTypeDetails();
widgetType.setName("Widget Type " + i);
widgetType.setDescriptor(JacksonUtil.fromString("{ \"someKey\": \"someValue\" }", JsonNode.class));
widgetTypes.add(new WidgetType(doPost("/api/widgetType", widgetType, WidgetTypeDetails.class)));
}
List<String> widgetTypeIds = widgetTypes.stream().map(type -> type.getId().getId().toString()).collect(Collectors.toList());
doPost("/api/widgetsBundle/" + widgetsBundle.getId().getId().toString() + "/widgetTypes", widgetTypeIds);
List<WidgetType> loadedWidgetTypes = doGetTyped("/api/widgetTypes?widgetsBundleId={widgetsBundleId}",
new TypeReference<>() {}, widgetsBundle.getId().getId().toString());
Collections.sort(widgetTypes, idComparator);
Collections.sort(loadedWidgetTypes, idComparator);
Assert.assertEquals(widgetTypes, loadedWidgetTypes);
loginCustomerUser();
List<WidgetType> loadedWidgetTypesCustomer = doGetTyped("/api/widgetTypes?widgetsBundleId={widgetsBundleId}",
new TypeReference<>() {}, widgetsBundle.getId().getId().toString());
Collections.sort(loadedWidgetTypesCustomer, idComparator);
Assert.assertEquals(widgetTypes, loadedWidgetTypesCustomer);
List<WidgetTypeDetails> customerLoadedWidgetTypesDetails = doGetTyped("/api/widgetTypesDetails?widgetsBundleId={widgetsBundleId}",
new TypeReference<>() {}, widgetsBundle.getId().getId().toString());
List<WidgetType> widgetTypesFromDetailsListCustomer = customerLoadedWidgetTypesDetails.stream().map(WidgetType::new).collect(Collectors.toList());
Collections.sort(widgetTypesFromDetailsListCustomer, idComparator);
Assert.assertEquals(widgetTypesFromDetailsListCustomer, loadedWidgetTypes);
loginSysAdmin();
List<WidgetType> sysAdminLoadedWidgetTypes = doGetTyped("/api/widgetTypes?widgetsBundleId={widgetsBundleId}",
new TypeReference<>() {}, widgetsBundle.getId().getId().toString());
Collections.sort(sysAdminLoadedWidgetTypes, idComparator);
Assert.assertEquals(widgetTypes, sysAdminLoadedWidgetTypes);
List<WidgetTypeDetails> sysAdminLoadedWidgetTypesDetails = doGetTyped("/api/widgetTypesDetails?widgetsBundleId={widgetsBundleId}",
new TypeReference<>() {}, widgetsBundle.getId().getId().toString());
List<WidgetType> widgetTypesFromDetailsListSysAdmin = sysAdminLoadedWidgetTypesDetails.stream().map(WidgetType::new).collect(Collectors.toList());
Collections.sort(widgetTypesFromDetailsListSysAdmin, idComparator);
Assert.assertEquals(widgetTypesFromDetailsListSysAdmin, loadedWidgetTypes);
}
|
long nextPullOffset(MessageQueue remoteQueue) {
if (!pullOffsetTable.containsKey(remoteQueue)) {
try {
pullOffsetTable.putIfAbsent(remoteQueue,
rocketmqPullConsumer.fetchConsumeOffset(remoteQueue, false));
} catch (MQClientException e) {
log.error("An error occurred in fetch consume offset process.", e);
}
}
return pullOffsetTable.get(remoteQueue);
}
|
@Test
public void testNextPullOffset() throws Exception {
MessageQueue messageQueue = new MessageQueue();
when(rocketmqPullConsume.fetchConsumeOffset(any(MessageQueue.class), anyBoolean()))
.thenReturn(123L);
assertThat(localMessageCache.nextPullOffset(new MessageQueue())).isEqualTo(123L);
}
|
@Nonnull
public static RestClientBuilder client() {
return client("localhost", DEFAULT_PORT);
}
|
@Test
public void given_clientAsString_whenReadFromElasticSource_thenFinishSuccessfully() {
ElasticsearchContainer container = ElasticSupport.elastic.get();
String httpHostAddress = container.getHttpHostAddress();
indexDocument("my-index", of("name", "Frantisek"));
Pipeline p = Pipeline.create();
p.readFrom(ElasticSources.elastic(
() -> ElasticClients.client(httpHostAddress),
SearchHit::getSourceAsString)
).writeTo(Sinks.list(results));
submitJob(p);
assertThat(results).hasSize(1);
}
|
public void asyncAddData(T data, AddDataCallback callback, Object ctx){
if (!batchEnabled){
if (state == State.CLOSING || state == State.CLOSED){
callback.addFailed(BUFFERED_WRITER_CLOSED_EXCEPTION, ctx);
return;
}
ByteBuf byteBuf = dataSerializer.serialize(data);
managedLedger.asyncAddEntry(byteBuf, DisabledBatchCallback.INSTANCE,
AsyncAddArgs.newInstance(callback, ctx, System.currentTimeMillis(), byteBuf));
return;
}
CompletableFuture
.runAsync(
() -> internalAsyncAddData(data, callback, ctx), singleThreadExecutorForWrite)
.exceptionally(e -> {
log.warn("Execute 'internalAsyncAddData' fail", e);
return null;
});
}
|
@Test
public void testMetricsStatsThatTriggeredByLargeSingleData() throws Exception {
// Use TwoLenSumDataSerializer for: write a little data once, then write a large data once.
int bytesSizePerRecordWhichInBatch = 4;
int batchedWriteMaxSize = 1024;
TwoLenSumDataSerializer dataSerializer =
new TwoLenSumDataSerializer(bytesSizePerRecordWhichInBatch, batchedWriteMaxSize);
int writeCount = 100;
// Both "directly write BK because the data too large" and "flush batch data because next data too large" will
// write BK, so expectWriteBKCount equals writeCount.
int expectWriteBKCount = writeCount;
int singleLargeDataRequestCount = expectWriteBKCount / 2;
int expectedBatchFlushTriggeredByLargeData = singleLargeDataRequestCount;
int expectedTotalBytesSize = expectedBatchFlushTriggeredByLargeData * bytesSizePerRecordWhichInBatch;
var callbackWithCounter = createCallBackWithCounter();
// Create TxnLogBufferedWriter.
var txnLogBufferedWriterContext = createTxnBufferedWriterContextWithMetrics(
dataSerializer, Integer.MAX_VALUE, batchedWriteMaxSize, Integer.MAX_VALUE);
var txnLogBufferedWriter = txnLogBufferedWriterContext.txnLogBufferedWriter;
// Add some data.
for (int i = 0; i < writeCount; i++){
txnLogBufferedWriter.asyncAddData(1, callbackWithCounter.callback, i);
}
// Wait for all data write finish.
Awaitility.await().atMost(2, TimeUnit.SECONDS).until(
() -> callbackWithCounter.finishCounter.get() + callbackWithCounter.failureCounter.get() == writeCount
);
assertEquals(callbackWithCounter.failureCounter.get(), 0);
assertEquals(txnLogBufferedWriterContext.mockedManagedLedger.writeCounter.get(), expectWriteBKCount);
verifyTheCounterMetrics(0,0,0,expectedBatchFlushTriggeredByLargeData);
verifyTheHistogramMetrics(expectedBatchFlushTriggeredByLargeData,
writeCount - singleLargeDataRequestCount, expectedTotalBytesSize);
// cleanup.
releaseTxnLogBufferedWriterContext(txnLogBufferedWriterContext);
// after close, verify the metrics change to 0.
verifyTheCounterMetrics(0,0,0,0);
verifyTheHistogramMetrics(0,0,0);
}
|
@Override
public void execute(EventNotificationContext ctx) throws EventNotificationException {
final SlackEventNotificationConfig config = (SlackEventNotificationConfig) ctx.notificationConfig();
LOG.debug("SlackEventNotification backlog size in method execute is [{}]", config.backlogSize());
try {
SlackMessage slackMessage = createSlackMessage(ctx, config);
slackClient.send(slackMessage, config.webhookUrl());
} catch (JsonProcessingException ex) {
String errorMessage = String.format(Locale.ENGLISH, "Error serializing Slack message object while sending the SlackEventNotification :: %s", ex.getMessage());
LOG.error(errorMessage, ex);
final Notification systemNotification = notificationService.buildNow()
.addNode(nodeId.getNodeId())
.addType(Notification.Type.GENERIC)
.addSeverity(Notification.Severity.URGENT)
.addDetail("title", "SlackEventNotification Failed")
.addDetail("description", errorMessage);
notificationService.publishIfFirst(systemNotification);
throw new EventNotificationException("There was an error serializing the Slack message object when sending the SlackEventNotification", ex);
} catch (TemporaryEventNotificationException exp) {
//scheduler needs to retry a TemporaryEventNotificationException
throw exp;
} catch (PermanentEventNotificationException exp) {
String errorMessage = String.format(Locale.ENGLISH, "Error sending the SlackEventNotification :: %s", exp.getMessage());
final Notification systemNotification = notificationService.buildNow()
.addNode(nodeId.getNodeId())
.addType(Notification.Type.GENERIC)
.addSeverity(Notification.Severity.URGENT)
.addDetail("title", "SlackEventNotification Failed")
.addDetail("description", errorMessage);
notificationService.publishIfFirst(systemNotification);
throw exp;
} catch (Exception exp) {
throw new EventNotificationException("There was an exception triggering the SlackEventNotification", exp);
}
}
|
@Test(expected = EventNotificationException.class)
public void executeWithInvalidWebhookUrl() throws EventNotificationException, JsonProcessingException {
givenGoodNotificationService();
givenSlackClientThrowsPermException();
//when execute is called with a invalid webhook URL, we expect a event notification exception
slackEventNotification.execute(eventNotificationContext);
}
|
@Produces
@DefaultBean
@Singleton
public JobRequestScheduler jobRequestScheduler(StorageProvider storageProvider) {
if (jobRunrBuildTimeConfiguration.jobScheduler().enabled()) {
return new JobRequestScheduler(storageProvider, emptyList());
}
return null;
}
|
@Test
void jobRequestSchedulerIsNotSetupWhenConfigured() {
when(jobSchedulerBuildTimeConfiguration.enabled()).thenReturn(false);
assertThat(jobRunrProducer.jobRequestScheduler(storageProvider)).isNull();
}
|
int parseAndConvert(String[] args) throws Exception {
Options opts = createOptions();
int retVal = 0;
try {
if (args.length == 0) {
LOG.info("Missing command line arguments");
printHelp(opts);
return 0;
}
CommandLine cliParser = new GnuParser().parse(opts, args);
if (cliParser.hasOption(CliOption.HELP.shortSwitch)) {
printHelp(opts);
return 0;
}
FSConfigToCSConfigConverter converter =
prepareAndGetConverter(cliParser);
converter.convert(converterParams);
String outputDir = converterParams.getOutputDirectory();
boolean skipVerification =
cliParser.hasOption(CliOption.SKIP_VERIFICATION.shortSwitch);
if (outputDir != null && !skipVerification) {
validator.validateConvertedConfig(
converterParams.getOutputDirectory());
}
} catch (ParseException e) {
String msg = "Options parsing failed: " + e.getMessage();
logAndStdErr(e, msg);
printHelp(opts);
retVal = -1;
} catch (PreconditionException e) {
String msg = "Cannot start FS config conversion due to the following"
+ " precondition error: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (UnsupportedPropertyException e) {
String msg = "Unsupported property/setting encountered during FS config "
+ "conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (ConversionException | IllegalArgumentException e) {
String msg = "Fatal error during FS config conversion: " + e.getMessage();
handleException(e, msg);
retVal = -1;
} catch (VerificationException e) {
Throwable cause = e.getCause();
String msg = "Verification failed: " + e.getCause().getMessage();
conversionOptions.handleVerificationFailure(cause, msg);
retVal = -1;
}
conversionOptions.handleParsingFinished();
return retVal;
}
|
@Test
public void testConvertFSConfigurationErrorHandling2() throws Exception {
setupFSConfigConversionFiles(true);
String[] args = getArgumentsAsArrayWithDefaults("-f",
FSConfigConverterTestCommons.FS_ALLOC_FILE,
"-r", FSConfigConverterTestCommons.CONVERSION_RULES_FILE, "-p");
FSConfigToCSConfigArgumentHandler argumentHandler =
createArgumentHandler();
Mockito.doThrow(ConversionException.class).when(mockConverter)
.convert(ArgumentMatchers.any(FSConfigToCSConfigConverterParams.class));
int retVal = argumentHandler.parseAndConvert(args);
assertEquals("Return value", -1, retVal);
assertTrue("Error content missing", fsTestCommons.getErrContent()
.toString().contains("Fatal error during FS config conversion"));
}
|
@Override
public int getDefaultTransactionIsolation() {
return 0;
}
|
@Test
void assertGetDefaultTransactionIsolation() {
assertThat(metaData.getDefaultTransactionIsolation(), is(0));
}
|
public static <T> void accept(Consumer<T> consumer, T target) {
if (consumer != null) {
consumer.accept(target);
}
}
|
@Test
public void testConsumer() {
final AtomicBoolean atomicBoolean = new AtomicBoolean();
CommonUtils.accept(param -> atomicBoolean.set(param == SLEEP_TIME), SLEEP_TIME);
assertTrue(atomicBoolean.get());
}
|
public void setLanguage(String languageString) {
Set<String> invalidCodes = new HashSet<>();
Set<String> validCodes = new HashSet<>();
getLangs(languageString, validCodes, invalidCodes);
if (!invalidCodes.isEmpty()) {
throw new IllegalArgumentException("Invalid language code(s): " + invalidCodes);
}
this.language = languageString;
userConfigured.add("language");
}
|
@Test
public void testBadLanguageCode() throws Exception {
TesseractOCRConfig tesseractOCRConfig = new TesseractOCRConfig();
assertThrows(IllegalArgumentException.class, () -> {
tesseractOCRConfig.setLanguage("kerplekistani");
});
}
|
@Override
public int unlink(String path) {
return AlluxioFuseUtils.call(LOG, () -> rmInternal(path),
FuseConstants.FUSE_UNLINK, "path=%s", path);
}
|
@Test
@DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu")
@Ignore
public void unlink() throws Exception {
AlluxioURI expectedPath = BASE_EXPECTED_URI.join("/foo/bar");
doNothing().when(mFileSystem).delete(expectedPath);
mFuseFs.unlink("/foo/bar");
verify(mFileSystem).delete(expectedPath);
}
|
@Override
public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo,
List<String> partNames, boolean areAllPartsFound) throws MetaException {
checkStatisticsList(colStatsWithSourceInfo);
ColumnStatisticsObj statsObj = null;
String colType;
String colName = null;
// check if all the ColumnStatisticsObjs contain stats and all the ndv are
// bitvectors
boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size();
NumDistinctValueEstimator ndvEstimator = null;
boolean areAllNDVEstimatorsMergeable = true;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
if (statsObj == null) {
colName = cso.getColName();
colType = cso.getColType();
statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType,
cso.getStatsData().getSetField());
LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats);
}
LongColumnStatsDataInspector columnStatsData = longInspectorFromStats(cso);
// check if we can merge NDV estimators
if (columnStatsData.getNdvEstimator() == null) {
areAllNDVEstimatorsMergeable = false;
break;
} else {
NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator();
if (ndvEstimator == null) {
ndvEstimator = estimator;
} else {
if (!ndvEstimator.canMerge(estimator)) {
areAllNDVEstimatorsMergeable = false;
break;
}
}
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable);
ColumnStatisticsData columnStatisticsData = initColumnStatisticsData();
if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) {
LongColumnStatsDataInspector aggregateData = null;
long lowerBound = 0;
long higherBound = 0;
double densityAvgSum = 0.0;
LongColumnStatsMerger merger = new LongColumnStatsMerger();
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
lowerBound = Math.max(lowerBound, newData.getNumDVs());
higherBound += newData.getNumDVs();
densityAvgSum += ((double) (newData.getHighValue() - newData.getLowValue())) / newData.getNumDVs();
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(merger.mergeLowValue(
merger.getLowValue(aggregateData), merger.getLowValue(newData)));
aggregateData.setHighValue(merger.mergeHighValue(
merger.getHighValue(aggregateData), merger.getHighValue(newData)));
aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls()));
aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs()));
}
}
if (areAllNDVEstimatorsMergeable && ndvEstimator != null) {
// if all the ColumnStatisticsObjs contain bitvectors, we do not need to
// use uniform distribution assumption because we can merge bitvectors
// to get a good estimation.
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
} else {
long estimation;
if (useDensityFunctionForNDVEstimation) {
// We have estimation, lowerbound and higherbound. We use estimation
// if it is between lowerbound and higherbound.
double densityAvg = densityAvgSum / partNames.size();
estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg);
if (estimation < lowerBound) {
estimation = lowerBound;
} else if (estimation > higherBound) {
estimation = higherBound;
}
} else {
estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner);
}
aggregateData.setNumDVs(estimation);
}
columnStatisticsData.setLongStats(aggregateData);
} else {
// TODO: bail out if missing stats are over a certain threshold
// we need extrapolation
LOG.debug("start extrapolation for {}", colName);
Map<String, Integer> indexMap = new HashMap<>();
for (int index = 0; index < partNames.size(); index++) {
indexMap.put(partNames.get(index), index);
}
Map<String, Double> adjustedIndexMap = new HashMap<>();
Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>();
// while we scan the css, we also get the densityAvg, lowerbound and
// higherbound when useDensityFunctionForNDVEstimation is true.
double densityAvgSum = 0.0;
if (!areAllNDVEstimatorsMergeable) {
// if not every partition uses bitvector for ndv, we just fall back to
// the traditional extrapolation methods.
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
LongColumnStatsData newData = cso.getStatsData().getLongStats();
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (newData.getHighValue() - newData.getLowValue())) / newData.getNumDVs();
}
adjustedIndexMap.put(partName, (double) indexMap.get(partName));
adjustedStatsMap.put(partName, cso.getStatsData());
}
} else {
// we first merge all the adjacent bitvectors that we could merge and
// derive new partition names and index.
StringBuilder pseudoPartName = new StringBuilder();
double pseudoIndexSum = 0;
int length = 0;
int curIndex = -1;
LongColumnStatsDataInspector aggregateData = null;
for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) {
ColumnStatisticsObj cso = csp.getColStatsObj();
String partName = csp.getPartName();
LongColumnStatsDataInspector newData = longInspectorFromStats(cso);
// newData.isSetBitVectors() should be true for sure because we
// already checked it before.
if (indexMap.get(partName) != curIndex) {
// There is bitvector, but it is not adjacent to the previous ones.
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (aggregateData.getHighValue() - aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
// reset everything
pseudoPartName = new StringBuilder();
pseudoIndexSum = 0;
length = 0;
ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator);
}
aggregateData = null;
}
curIndex = indexMap.get(partName);
pseudoPartName.append(partName);
pseudoIndexSum += curIndex;
length++;
curIndex++;
if (aggregateData == null) {
aggregateData = newData.deepCopy();
} else {
aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue()));
aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue()));
aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls());
}
ndvEstimator.mergeEstimators(newData.getNdvEstimator());
}
if (length > 0) {
// we have to set ndv
adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length);
aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues());
ColumnStatisticsData csd = new ColumnStatisticsData();
csd.setLongStats(aggregateData);
adjustedStatsMap.put(pseudoPartName.toString(), csd);
if (useDensityFunctionForNDVEstimation) {
densityAvgSum += ((double) (aggregateData.getHighValue() - aggregateData.getLowValue())) / aggregateData.getNumDVs();
}
}
}
extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(),
adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size());
}
LOG.debug(
"Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}",
colName, columnStatisticsData.getLongStats().getNumDVs(), partNames.size(),
colStatsWithSourceInfo.size());
KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo);
if (mergedKllHistogramEstimator != null) {
columnStatisticsData.getLongStats().setHistogram(mergedKllHistogramEstimator.serialize());
}
statsObj.setStatsData(columnStatisticsData);
return statsObj;
}
|
@Test
public void testAggregateMultipleStatsWhenSomeNullValues() throws MetaException {
List<String> partitions = Arrays.asList("part1", "part2");
ColumnStatisticsData data1 = new ColStatsBuilder<>(long.class).numNulls(1).numDVs(2)
.low(1L).high(2L).hll(1, 2).build();
ColumnStatisticsData data2 = new ColStatsBuilder<>(long.class).numNulls(2).numDVs(3).build();
List<ColStatsObjWithSourceInfo> statsList = Arrays.asList(
createStatsWithInfo(data1, TABLE, COL, partitions.get(0)),
createStatsWithInfo(data2, TABLE, COL, partitions.get(1)));
LongColumnStatsAggregator aggregator = new LongColumnStatsAggregator();
ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true);
ColumnStatisticsData expectedStats = new ColStatsBuilder<>(long.class).numNulls(3).numDVs(3)
.low(1L).high(2L).hll(1, 2).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = true;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
expectedStats = new ColStatsBuilder<>(long.class).numNulls(3).numDVs(4)
.low(1L).high(2L).hll(1, 2).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
aggregator.useDensityFunctionForNDVEstimation = false;
aggregator.ndvTuner = 1;
computedStatsObj = aggregator.aggregate(statsList, partitions, true);
expectedStats = new ColStatsBuilder<>(long.class).numNulls(3).numDVs(5)
.low(1L).high(2L).hll(1, 2).build();
assertEqualStatistics(expectedStats, computedStatsObj.getStatsData());
}
|
@Override
public Optional<DevOpsProjectCreator> getDevOpsProjectCreator(DbSession dbSession, Map<String, String> characteristics) {
String githubApiUrl = characteristics.get(DEVOPS_PLATFORM_URL);
String githubRepository = characteristics.get(DEVOPS_PLATFORM_PROJECT_IDENTIFIER);
if (githubApiUrl == null || githubRepository == null) {
return Optional.empty();
}
DevOpsProjectDescriptor devOpsProjectDescriptor = new DevOpsProjectDescriptor(ALM.GITHUB, githubApiUrl, githubRepository, null);
return dbClient.almSettingDao().selectByAlm(dbSession, ALM.GITHUB).stream()
.filter(almSettingDto -> devOpsProjectDescriptor.url().equals(almSettingDto.getUrl()))
.map(almSettingDto -> findInstallationIdAndCreateDevOpsProjectCreator(devOpsProjectDescriptor, almSettingDto))
.flatMap(Optional::stream)
.findFirst();
}
|
@Test
public void getDevOpsProjectCreator_whenOneValidAlmSettingAndPublicByDefaultAndAutoProvisioningEnabled_shouldInstantiateDevOpsProjectCreatorAndDefineAnAuthAppToken() {
AlmSettingDto almSettingDto = mockAlmSettingDto(true);
mockSuccessfulGithubInteraction();
when(projectDefaultVisibility.get(any()).isPrivate()).thenReturn(true);
mockValidGitHubSettings();
long authAppInstallationId = 32;
when(githubApplicationClient.getInstallationId(any(), eq(GITHUB_REPO_FULL_NAME))).thenReturn(Optional.of(authAppInstallationId));
when(githubApplicationClient.createAppInstallationToken(any(), eq(authAppInstallationId))).thenReturn(Optional.of(authAppInstallationToken));
when(DEV_OPS_PROJECT.devOpsPlatformIdentifier()).thenReturn(GITHUB_REPO_FULL_NAME);
when(devOpsProjectService.createDevOpsProject(almSettingDto, GITHUB_PROJECT_DESCRIPTOR, authAppInstallationToken)).thenReturn(DEV_OPS_PROJECT);
DevOpsProjectCreator devOpsProjectCreator = githubProjectCreatorFactory.getDevOpsProjectCreator(dbSession, VALID_GITHUB_PROJECT_COORDINATES).orElseThrow();
GithubProjectCreator expectedGithubProjectCreator = getExpectedGithubProjectCreator(true);
assertThat(devOpsProjectCreator).usingRecursiveComparison().isEqualTo(expectedGithubProjectCreator);
}
|
@Override
public QueryHeader build(final QueryResultMetaData queryResultMetaData,
final ShardingSphereDatabase database, final String columnName, final String columnLabel, final int columnIndex) throws SQLException {
String schemaName = null == database ? "" : database.getName();
String actualTableName = queryResultMetaData.getTableName(columnIndex);
String tableName;
boolean primaryKey;
if (null == actualTableName || null == database) {
tableName = actualTableName;
primaryKey = false;
} else {
tableName = getLogicTableName(database, actualTableName);
ShardingSphereSchema schema = database.getSchema(schemaName);
primaryKey = null != schema
&& Optional.ofNullable(schema.getTable(tableName)).map(optional -> optional.getColumn(columnName)).map(ShardingSphereColumn::isPrimaryKey).orElse(false);
}
int columnType = queryResultMetaData.getColumnType(columnIndex);
String columnTypeName = queryResultMetaData.getColumnTypeName(columnIndex);
int columnLength = queryResultMetaData.getColumnLength(columnIndex);
int decimals = queryResultMetaData.getDecimals(columnIndex);
boolean signed = queryResultMetaData.isSigned(columnIndex);
boolean notNull = queryResultMetaData.isNotNull(columnIndex);
boolean autoIncrement = queryResultMetaData.isAutoIncrement(columnIndex);
return new QueryHeader(schemaName, tableName, columnLabel, columnName, columnType, columnTypeName, columnLength, decimals, signed, primaryKey, notNull, autoIncrement);
}
|
@Test
void assertBuild() throws SQLException {
QueryResultMetaData queryResultMetaData = createQueryResultMetaData();
QueryHeader actual = new MySQLQueryHeaderBuilder().build(queryResultMetaData, createDatabase(), queryResultMetaData.getColumnName(1), queryResultMetaData.getColumnLabel(1), 1);
assertThat(actual.getSchema(), is(DefaultDatabase.LOGIC_NAME));
assertThat(actual.getTable(), is("t_logic_order"));
assertThat(actual.getColumnLabel(), is("order_id"));
assertThat(actual.getColumnName(), is("order_id"));
assertThat(actual.getColumnLength(), is(1));
assertThat(actual.getColumnType(), is(Types.INTEGER));
assertThat(actual.getDecimals(), is(1));
assertTrue(actual.isSigned());
assertTrue(actual.isPrimaryKey());
assertTrue(actual.isNotNull());
assertTrue(actual.isAutoIncrement());
}
|
public static Config getConfig(
Configuration configuration, @Nullable HostAndPort externalAddress) {
return getConfig(
configuration,
externalAddress,
null,
PekkoUtils.getForkJoinExecutorConfig(
ActorSystemBootstrapTools.getForkJoinExecutorConfiguration(configuration)));
}
|
@Test
void getConfigCustomKeyOrTruststoreType() {
final Configuration configuration = new Configuration();
configuration.set(SecurityOptions.SSL_INTERNAL_ENABLED, true);
configuration.set(SecurityOptions.SSL_INTERNAL_KEYSTORE_TYPE, "JKS");
configuration.set(SecurityOptions.SSL_INTERNAL_TRUSTSTORE_TYPE, "JKS");
final Config config =
PekkoUtils.getConfig(configuration, new HostAndPort("localhost", 31337));
final Config securityConfig = config.getConfig("pekko.remote.classic.netty.ssl.security");
assertThat(securityConfig.getString("key-store-type")).isEqualTo("JKS");
assertThat(securityConfig.getString("trust-store-type")).isEqualTo("JKS");
}
|
public static long addClamped(long a, long b) {
long sum = a + b;
return sumHadOverflow(a, b, sum)
? (a >= 0 ? Long.MAX_VALUE : Long.MIN_VALUE)
: sum;
}
|
@Test
public void when_addClamped_then_doesNotOverflow() {
// no overflow
assertEquals(0, addClamped(0, 0));
assertEquals(1, addClamped(1, 0));
assertEquals(-1, addClamped(-1, 0));
assertEquals(-1, addClamped(Long.MAX_VALUE, Long.MIN_VALUE));
assertEquals(-1, addClamped(Long.MIN_VALUE, Long.MAX_VALUE));
// overflow over MAX_VALUE
assertEquals(Long.MAX_VALUE, addClamped(Long.MAX_VALUE, 1));
assertEquals(Long.MAX_VALUE, addClamped(Long.MAX_VALUE, Long.MAX_VALUE));
// overflow over MIN_VALUE
assertEquals(Long.MIN_VALUE, addClamped(Long.MIN_VALUE, -1));
assertEquals(Long.MIN_VALUE, addClamped(Long.MIN_VALUE, Long.MIN_VALUE));
}
|
@Override
public int handleBeat(String namespaceId, String serviceName, String ip, int port, String cluster,
RsInfo clientBeat, BeatInfoInstanceBuilder builder) throws NacosException {
Service service = getService(namespaceId, serviceName, true);
String clientId = IpPortBasedClient.getClientId(ip + InternetAddressUtil.IP_PORT_SPLITER + port, true);
IpPortBasedClient client = (IpPortBasedClient) clientManager.getClient(clientId);
if (null == client || !client.getAllPublishedService().contains(service)) {
if (null == clientBeat) {
return NamingResponseCode.RESOURCE_NOT_FOUND;
}
Instance instance = builder.setBeatInfo(clientBeat).setServiceName(serviceName).build();
registerInstance(namespaceId, serviceName, instance);
client = (IpPortBasedClient) clientManager.getClient(clientId);
}
if (!ServiceManager.getInstance().containSingleton(service)) {
throw new NacosException(NacosException.SERVER_ERROR,
"service not found: " + serviceName + "@" + namespaceId);
}
if (null == clientBeat) {
clientBeat = new RsInfo();
clientBeat.setIp(ip);
clientBeat.setPort(port);
clientBeat.setCluster(cluster);
clientBeat.setServiceName(serviceName);
}
ClientBeatProcessorV2 beatProcessor = new ClientBeatProcessorV2(namespaceId, clientBeat, client);
HealthCheckReactor.scheduleNow(beatProcessor);
client.setLastUpdatedTime();
return NamingResponseCode.OK;
}
|
@Test
void testHandleBeat() throws NacosException {
IpPortBasedClient ipPortBasedClient = Mockito.mock(IpPortBasedClient.class);
when(clientManager.getClient(Mockito.anyString())).thenReturn(ipPortBasedClient);
when(ipPortBasedClient.getAllPublishedService()).thenReturn(Collections.emptyList());
RsInfo rsInfo = new RsInfo();
rsInfo.setMetadata(new HashMap<>(1));
int res = instanceOperatorClient.handleBeat("A", "C", "1.1.1.1", 8848, "D", rsInfo, BeatInfoInstanceBuilder.newBuilder());
assertEquals(NamingResponseCode.OK, res);
}
|
public static Expression convert(Filter[] filters) {
Expression expression = Expressions.alwaysTrue();
for (Filter filter : filters) {
Expression converted = convert(filter);
Preconditions.checkArgument(
converted != null, "Cannot convert filter to Iceberg: %s", filter);
expression = Expressions.and(expression, converted);
}
return expression;
}
|
@Test
public void testNotIn() {
Not filter = Not.apply(In.apply("col", new Integer[] {1, 2}));
Expression actual = SparkFilters.convert(filter);
Expression expected =
Expressions.and(Expressions.notNull("col"), Expressions.notIn("col", 1, 2));
Assert.assertEquals("Expressions should match", expected.toString(), actual.toString());
}
|
public void setFilePaths(String... filePaths) {
Path[] paths = new Path[filePaths.length];
for (int i = 0; i < paths.length; i++) {
paths[i] = new Path(filePaths[i]);
}
setFilePaths(paths);
}
|
@Test
void testSinglePathGetOnMultiPathIF() {
final MultiDummyFileInputFormat format = new MultiDummyFileInputFormat();
final String myPath = "/an/imaginary/path";
final String myPath2 = "/an/imaginary/path2";
format.setFilePaths(myPath, myPath2);
assertThatThrownBy(format::getFilePath).isInstanceOf(UnsupportedOperationException.class);
}
|
public float get(int index) {
return nonzeros[index];
}
|
@Test
public void testGet() {
System.out.println("get");
assertEquals(0.9, sparse.get(0, 0), 1E-7);
assertEquals(0.8, sparse.get(2, 2), 1E-7);
assertEquals(0.5, sparse.get(1, 1), 1E-7);
assertEquals(0.0, sparse.get(2, 0), 1E-7);
assertEquals(0.0, sparse.get(0, 2), 1E-7);
assertEquals(0.4, sparse.get(0, 1), 1E-7);
}
|
static CommitMeta buildCommitMetadata(String commitMsg, Map<String, String> catalogOptions) {
return catalogOptions(CommitMeta.builder().message(commitMsg), catalogOptions).build();
}
|
@Test
public void testAuthorIsNullWithoutJvmUser() {
String jvmUserName = System.getProperty("user.name");
try {
System.clearProperty("user.name");
CommitMeta commitMeta = NessieUtil.buildCommitMetadata("commit msg", ImmutableMap.of());
assertThat(commitMeta.getAuthor()).isNull();
} finally {
System.setProperty("user.name", jvmUserName);
}
}
|
public static String normalize(String string) {
if (string == null) {
return null;
}
if (string.length() > 63) {
string = string.substring(0, 63);
}
string = StringUtils.stripEnd(string, "-");
string = StringUtils.stripEnd(string, ".");
string = StringUtils.stripEnd(string, "_");
return string;
}
|
@Test
void normalize() {
assertThat(ScriptService.normalize(null), nullValue());
assertThat(ScriptService.normalize("a-normal-string"), is("a-normal-string"));
assertThat(ScriptService.normalize("very.very.very.very.very.very.very.very.very.very.very.very.long.namespace"), is("very.very.very.very.very.very.very.very.very.very.very.very.lon"));
}
|
static Map<String, ValueExtractor> instantiateExtractors(List<AttributeConfig> attributeConfigs,
ClassLoader classLoader) {
Map<String, ValueExtractor> extractors = createHashMap(attributeConfigs.size());
for (AttributeConfig config : attributeConfigs) {
if (extractors.containsKey(config.getName())) {
throw new IllegalArgumentException("Could not add " + config
+ ". Extractor for this attribute name already added.");
}
extractors.put(config.getName(), instantiateExtractor(config, classLoader));
}
return extractors;
}
|
@Test
public void instantiate_extractors_wrongType() {
// GIVEN
AttributeConfig string = new AttributeConfig("iq", "java.lang.String");
// WHEN
assertThatThrownBy(() -> instantiateExtractors(singletonList(string)))
.isInstanceOf(IllegalArgumentException.class);
}
|
@Override
public int getRootComponentRef() {
checkState(rootComponentRef.isInitialized(), "Root component ref has not been set");
return rootComponentRef.getProperty();
}
|
@Test
public void getRootComponentRef() {
AnalysisMetadataHolderImpl underTest = new AnalysisMetadataHolderImpl(editionProvider);
underTest.setRootComponentRef(10);
assertThat(underTest.getRootComponentRef()).isEqualTo(10);
}
|
@Override
public void replay(
long offset,
long producerId,
short producerEpoch,
CoordinatorRecord record
) throws RuntimeException {
ApiMessageAndVersion key = record.key();
ApiMessageAndVersion value = record.value();
switch (key.version()) {
case 0:
case 1:
offsetMetadataManager.replay(
offset,
producerId,
(OffsetCommitKey) key.message(),
(OffsetCommitValue) Utils.messageOrNull(value)
);
break;
case 2:
groupMetadataManager.replay(
(GroupMetadataKey) key.message(),
(GroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 3:
groupMetadataManager.replay(
(ConsumerGroupMetadataKey) key.message(),
(ConsumerGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 4:
groupMetadataManager.replay(
(ConsumerGroupPartitionMetadataKey) key.message(),
(ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 5:
groupMetadataManager.replay(
(ConsumerGroupMemberMetadataKey) key.message(),
(ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 6:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMetadataKey) key.message(),
(ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 7:
groupMetadataManager.replay(
(ConsumerGroupTargetAssignmentMemberKey) key.message(),
(ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 8:
groupMetadataManager.replay(
(ConsumerGroupCurrentMemberAssignmentKey) key.message(),
(ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
case 9:
groupMetadataManager.replay(
(ShareGroupPartitionMetadataKey) key.message(),
(ShareGroupPartitionMetadataValue) Utils.messageOrNull(value)
);
break;
case 10:
groupMetadataManager.replay(
(ShareGroupMemberMetadataKey) key.message(),
(ShareGroupMemberMetadataValue) Utils.messageOrNull(value)
);
break;
case 11:
groupMetadataManager.replay(
(ShareGroupMetadataKey) key.message(),
(ShareGroupMetadataValue) Utils.messageOrNull(value)
);
break;
case 12:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMetadataKey) key.message(),
(ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value)
);
break;
case 13:
groupMetadataManager.replay(
(ShareGroupTargetAssignmentMemberKey) key.message(),
(ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value)
);
break;
case 14:
groupMetadataManager.replay(
(ShareGroupCurrentMemberAssignmentKey) key.message(),
(ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value)
);
break;
default:
throw new IllegalStateException("Received an unknown record type " + key.version()
+ " in " + record);
}
}
|
@Test
public void testReplayConsumerGroupMemberMetadataWithNullValue() {
GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class);
OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class);
CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class);
CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class);
GroupCoordinatorShard coordinator = new GroupCoordinatorShard(
new LogContext(),
groupMetadataManager,
offsetMetadataManager,
Time.SYSTEM,
new MockCoordinatorTimer<>(Time.SYSTEM),
mock(GroupCoordinatorConfig.class),
coordinatorMetrics,
metricsShard
);
ConsumerGroupMemberMetadataKey key = new ConsumerGroupMemberMetadataKey();
coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord(
new ApiMessageAndVersion(key, (short) 5),
null
));
verify(groupMetadataManager, times(1)).replay(key, null);
}
|
@Description("converts the string to upper case")
@ScalarFunction("upper")
@LiteralParameters("x")
@SqlType("char(x)")
public static Slice charUpper(@SqlType("char(x)") Slice slice)
{
return upper(slice);
}
|
@Test
public void testCharUpper()
{
assertFunction("UPPER(CAST('' AS CHAR(10)))", createCharType(10), padRight("", 10));
assertFunction("UPPER(CAST('Hello World' AS CHAR(11)))", createCharType(11), padRight("HELLO WORLD", 11));
assertFunction("UPPER(CAST('what!!' AS CHAR(6)))", createCharType(6), padRight("WHAT!!", 6));
assertFunction("UPPER(CAST('\u00D6sterreich' AS CHAR(10)))", createCharType(10), padRight(upperByCodePoint("\u00D6") + "STERREICH", 10));
assertFunction("UPPER(CAST('From\uD801\uDC2DTo' AS CHAR(7)))", createCharType(7), padRight("FROM" + upperByCodePoint("\uD801\uDC2D") + "TO", 7));
}
|
@Override
public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain) throws IOException, ServletException {
String path = ((HttpServletRequest) req).getRequestURI().replaceFirst(((HttpServletRequest) req).getContextPath(), "");
MAX_AGE_BY_PATH.entrySet().stream()
.filter(m -> path.startsWith(m.getKey()))
.map(Map.Entry::getValue)
.findFirst()
.ifPresent(maxAge -> ((HttpServletResponse) resp).addHeader(CACHE_CONTROL_HEADER, format(MAX_AGE_TEMPLATE, maxAge)));
chain.doFilter(req, resp);
}
|
@Test
public void does_nothing_on_home() throws Exception {
HttpServletRequest request = newRequest("/");
underTest.doFilter(request, response, chain);
verifyNoInteractions(response);
}
|
public MountTable getMountPoint(final String path) throws IOException {
verifyMountTable();
return findDeepest(RouterAdmin.normalizeFileSystemPath(path));
}
|
@Test
public void testGetMountPointOfConsecutiveSlashes() throws IOException {
// Check get the mount table entry for a path
MountTable mtEntry;
mtEntry = mountTable.getMountPoint("///");
assertEquals("/", mtEntry.getSourcePath());
mtEntry = mountTable.getMountPoint("///user//");
assertEquals("/user", mtEntry.getSourcePath());
mtEntry = mountTable.getMountPoint("/user///a");
assertEquals("/user/a", mtEntry.getSourcePath());
mtEntry = mountTable.getMountPoint("/user/a////");
assertEquals("/user/a", mtEntry.getSourcePath());
mtEntry = mountTable.getMountPoint("///user/a/11//");
assertEquals("/user/a", mtEntry.getSourcePath());
mtEntry = mountTable.getMountPoint("/user///a1///");
assertEquals("/user", mtEntry.getSourcePath());
}
|
public static Object construct(Object something) throws Exception {
if (something instanceof String) {
return Class.forName((String)something).getConstructor().newInstance();
} else if (something instanceof Map) {
// keys are the class name, values are the parameters.
for (Map.Entry<String, Object> entry : ((Map<String, Object>) something).entrySet()) {
if (entry.getValue() instanceof Map) {
return constructByNamedParams(Class.forName(entry.getKey()), (Map)entry.getValue());
} else if (entry.getValue() instanceof List) {
return constructByParameterizedConstructor(Class.forName(entry.getKey()), (List)entry.getValue());
}
}
}
return null;
}
|
@Test(expected = Exception.class)
public void classWithoutMatchedConstructor_constructed_failsWhenNoDefault() throws Exception {
Map<String, List<Map<String, Object>>> constructMap = new HashMap<>();
List<Map<String, Object>> params = new ArrayList<>();
params.add(Collections.singletonMap("java.lang.String", "Hello"));
constructMap.put("com.networknt.service.ClassWithoutDefaultConstructor", params);
ServiceUtil.construct(constructMap);
}
|
@Override
public void init(final InternalProcessorContext<Void, Void> context) {
super.init(context);
this.context = context;
try {
keySerializer = prepareKeySerializer(keySerializer, context, this.name());
} catch (ConfigException | StreamsException e) {
throw new StreamsException(String.format("Failed to initialize key serdes for sink node %s", name()), e, context.taskId());
}
try {
valSerializer = prepareValueSerializer(valSerializer, context, this.name());
} catch (final ConfigException | StreamsException e) {
throw new StreamsException(String.format("Failed to initialize value serdes for sink node %s", name()), e, context.taskId());
}
}
|
@Test
public void shouldThrowStreamsExceptionOnUndefinedKeySerde() {
utilsMock.when(() -> WrappingNullableUtils.prepareKeySerializer(any(), any(), any()))
.thenThrow(new ConfigException("Please set StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG"));
final Throwable exception = assertThrows(StreamsException.class, () -> sink.init(context));
assertThat(
exception.getMessage(),
equalTo("Failed to initialize key serdes for sink node anyNodeName")
);
assertThat(
exception.getCause().getMessage(),
equalTo("Please set StreamsConfig#DEFAULT_KEY_SERDE_CLASS_CONFIG")
);
}
|
public Connection getConnection() {
return getConfig().isShared() ? pooledConnection() : singleUseConnection();
}
|
@Test
public void shared_connection_should_be_initialized_lazy() {
jdbcDataConnection = new JdbcDataConnection(new DataConnectionConfig()
.setName(TEST_NAME)
.setProperty("jdbcUrl", "invalid-jdbc-url")
.setShared(true));
assertThatThrownBy(() -> jdbcDataConnection.getConnection())
.hasRootCauseInstanceOf(SQLException.class)
.hasRootCauseMessage("No suitable driver");
}
|
public static RuntimeException wrapIf(boolean condition, Throwable t) {
if (condition) {
return wrap(t);
}
if (t instanceof RuntimeException) {
return (RuntimeException) t;
}
return new RuntimeException(t);
}
|
@Test
public void testWrapIfOnlyWrapsWhenTrue() {
IOException cause = new IOException();
RuntimeException wrapped = UserCodeException.wrapIf(true, cause);
assertThat(wrapped, is(instanceOf(UserCodeException.class)));
}
|
@CheckForNull
static BundleParams getBundleParameters(String restOfPath) {
if (restOfPath == null || restOfPath.length() == 0) {
return null;
}
String[] pathTokens = restOfPath.split("/");
List<String> bundleParameters = new ArrayList<>();
for (String pathToken : pathTokens) {
if (pathToken.length() > 0) {
bundleParameters.add(urlDecode(pathToken));
}
}
// Path should be prefixed with /blue/rest/i18n.
// Let's remove those.
if (bundleParameters.get(0).equals("blue")) {
bundleParameters.remove(0);
}
if (bundleParameters.get(0).equals("rest")) {
bundleParameters.remove(0);
}
if (bundleParameters.get(0).equals("i18n")) {
bundleParameters.remove(0);
}
if (bundleParameters.size() != 3 && bundleParameters.size() != 4) {
return null;
}
BundleParams bundleParams = new BundleParams(
bundleParameters.get(0),
bundleParameters.get(1),
bundleParameters.get(2)
);
if (bundleParameters.size() == 4) {
// https://www.w3.org/International/questions/qa-lang-priorities
// in case we have regions/countries in the language query parameter
String locale = bundleParameters.get(3);
String[] localeTokens = locale.split("-|_");
bundleParams.language = localeTokens[0];
if (localeTokens.length > 1) {
bundleParams.country = localeTokens[1];
if (localeTokens.length > 2) {
bundleParams.variant = localeTokens[2];
}
}
}
return bundleParams;
}
|
@Test
public void test_getBundleParameters_invalid_url() {
Assert.assertNull(BlueI18n.getBundleParameters("pluginx/1.0.0"));
Assert.assertNull(BlueI18n.getBundleParameters("/pluginx/1.0.0"));
}
|
public static Schema inferSchema(Object value) {
if (value instanceof String) {
return Schema.STRING_SCHEMA;
} else if (value instanceof Boolean) {
return Schema.BOOLEAN_SCHEMA;
} else if (value instanceof Byte) {
return Schema.INT8_SCHEMA;
} else if (value instanceof Short) {
return Schema.INT16_SCHEMA;
} else if (value instanceof Integer) {
return Schema.INT32_SCHEMA;
} else if (value instanceof Long) {
return Schema.INT64_SCHEMA;
} else if (value instanceof Float) {
return Schema.FLOAT32_SCHEMA;
} else if (value instanceof Double) {
return Schema.FLOAT64_SCHEMA;
} else if (value instanceof byte[] || value instanceof ByteBuffer) {
return Schema.BYTES_SCHEMA;
} else if (value instanceof List) {
return inferListSchema((List<?>) value);
} else if (value instanceof Map) {
return inferMapSchema((Map<?, ?>) value);
} else if (value instanceof Struct) {
return ((Struct) value).schema();
}
return null;
}
|
@Test
public void shouldInferNoSchemaForMapContainingObject() {
Schema listSchema = Values.inferSchema(Collections.singletonMap(new Object(), new Object()));
assertNull(listSchema);
}
|
@Deprecated
public Map<String, Object> get() {
return CLIENT_ATTACHMENT.get().get();
}
|
@Test
public void testClearAttachmentMap() {
RpcServerContextAttachment attachment = new RpcServerContextAttachment();
RpcServerContextAttachment.ObjectAttachmentMap objectAttachmentMap =
new RpcServerContextAttachment.ObjectAttachmentMap(attachment);
objectAttachmentMap.put("key_1", "value_1");
objectAttachmentMap.put("key_2", "value_2");
objectAttachmentMap.put("key_3", "value_3");
Assertions.assertEquals(3, objectAttachmentMap.size());
objectAttachmentMap.clear();
Assertions.assertEquals(null, objectAttachmentMap.get(new Object()));
Assertions.assertEquals(0, objectAttachmentMap.size());
Assertions.assertEquals(true, objectAttachmentMap.isEmpty());
}
|
@Override
public boolean shouldCareAbout(Object entity) {
return securityConfigClasses.stream().anyMatch(aClass -> aClass.isAssignableFrom(entity.getClass()));
}
|
@Test
public void shouldCareAboutSecurityAuthConfigChange() {
SecurityConfigChangeListener securityConfigChangeListener = new SecurityConfigChangeListener() {
@Override
public void onEntityConfigChange(Object entity) {
}
};
assertThat(securityConfigChangeListener.shouldCareAbout(new SecurityAuthConfig()), is(true));
}
|
public Statement buildStatement(final ParserRuleContext parseTree) {
return build(Optional.of(getSources(parseTree)), parseTree);
}
|
@Test
public void shouldGetCorrectLocationsComplexCtas() {
// Given:
final String statementString =
// 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
"CREATE TABLE customer_bookings\n" + // 1
" WITH (KAFKA_TOPIC = 'customer_bookings', KEY_FORMAT = 'JSON', VALUE_FORMAT = 'JSON') AS\n" + // 2
" SELECT C.EMAIL,\n" + // 3
" B.id,\n" + // 4
" B.flight_id,\n" + // 5
" COUNT(*)\n" + // 6
" FROM bookings B\n" + // 7
" INNER JOIN customers C ON B.customer_id = C.id\n" + // 8
" WINDOW TUMBLING (SIZE 1 HOUR, GRACE PERIOD 2 HOURS)\n" + // 9
" WHERE B.customer_id > 0 AND INSTR(C.EMAIL, '@') > 0\n" + // 10
" GROUP BY C.EMAIL, B.ID, B.flight_id\n" + // 11
" HAVING COUNT(*) > 0;"; // 12
// 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
final SingleStatementContext stmt = givenQuery(statementString);
// When:
final CreateTableAsSelect result = ((CreateTableAsSelect) builder.buildStatement(stmt));
// Then:
assertTrue(result.getLocation().isPresent());
final NodeLocation createTableLocation = result.getLocation().get();
assertThat(createTableLocation.getStartLineNumber(), is(1));
assertThat(createTableLocation.getLength(), is(OptionalInt.of(statementString.length() - 1)));
final Query query = result.getQuery();
assertTrue(query.getLocation().isPresent());
final NodeLocation queryLocation = query.getLocation().get();
assertThat(queryLocation.getStartLineNumber(), is(3));
assertThat(queryLocation.getStartColumnNumber(), is(3));
assertThat(queryLocation.getLength(),
is(OptionalInt.of((
"SELECT C.EMAIL,\n" +
" B.id,\n" +
" B.flight_id,\n" +
" COUNT(*)\n" +
" FROM bookings B\n" +
" INNER JOIN customers C ON B.customer_id = C.id\n" +
" WINDOW TUMBLING (SIZE 1 HOUR, GRACE PERIOD 2 HOURS)\n" +
" WHERE B.customer_id > 0 AND INSTR(C.EMAIL, '@') > 0\n" +
" GROUP BY C.EMAIL, B.ID, B.flight_id\n" +
" HAVING COUNT(*) > 0").length())));
final Select select = query.getSelect();
assertTrue(select.getLocation().isPresent());
final NodeLocation selectLocation = select.getLocation().get();
assertThat(selectLocation.getStartLineNumber(), is(3));
assertThat(selectLocation.getStartColumnNumber(), is(3));
assertThat(selectLocation.getLength(),
is(OptionalInt.of("SELECT".length())));
final Join join = (Join) query.getFrom();
assertTrue(join.getLocation().isPresent());
final NodeLocation joinLocation = join.getLocation().get();
assertThat(joinLocation.getStartLineNumber(), is(7));
assertThat(joinLocation.getStartColumnNumber(), is(8));
assertThat(joinLocation.getLength(),
is(OptionalInt.of((
"bookings B\n" +
" INNER JOIN customers C ON B.customer_id = C.id").length())));
assertTrue(query.getWindow().isPresent());
final WindowExpression window = query.getWindow().get();
assertTrue(window.getLocation().isPresent());
final NodeLocation windowLocation = window.getLocation().get();
assertThat(windowLocation.getStartLineNumber(), is(9));
assertThat(windowLocation.getStartColumnNumber(), is(10));
assertThat(windowLocation.getLength(),
is(OptionalInt.of(("TUMBLING (SIZE 1 HOUR, GRACE PERIOD 2 HOURS)").length())));
assertTrue(query.getWhere().isPresent());
final LogicalBinaryExpression where = (LogicalBinaryExpression) query.getWhere().get();
assertTrue(where.getLocation().isPresent());
final NodeLocation whereLocation = where.getLocation().get();
assertThat(whereLocation.getStartLineNumber(), is(10));
assertThat(whereLocation.getStartColumnNumber(), is(27));
assertThat(whereLocation.getLength(), is(OptionalInt.of(3)));
assertTrue(query.getGroupBy().isPresent());
final GroupBy groupBy = query.getGroupBy().get();
assertTrue(groupBy.getLocation().isPresent());
final NodeLocation groupByLocation = groupBy.getLocation().get();
assertThat(groupByLocation.getStartLineNumber(), is(11));
assertThat(groupByLocation.getStartColumnNumber(), is(12));
assertThat(groupByLocation.getLength(),
is(OptionalInt.of("C.EMAIL, B.ID, B.flight_id".length())));
assertTrue(query.getHaving().isPresent());
final ComparisonExpression having = (ComparisonExpression) query.getHaving().get();
assertTrue(having.getLocation().isPresent());
final NodeLocation havingLocation = having.getLocation().get();
assertThat(havingLocation.getStartLineNumber(), is(12));
assertThat(havingLocation.getStartColumnNumber(), is(19));
assertThat(havingLocation.getLength(), is(OptionalInt.of(1)));
}
|
@VisibleForTesting
static void createDocumentationFile(
String title,
DocumentingRestEndpoint restEndpoint,
RestAPIVersion apiVersion,
Path outputFile)
throws IOException {
final OpenAPI openApi = createDocumentation(title, restEndpoint, apiVersion);
Files.deleteIfExists(outputFile);
Files.write(outputFile, Yaml.pretty(openApi).getBytes(StandardCharsets.UTF_8));
}
|
@Test
void testDuplicateOperationIdsAreRejected(@TempDir Path tmpDir) {
final Path file = tmpDir.resolve("openapi_spec.yaml");
assertThatThrownBy(
() ->
OpenApiSpecGenerator.createDocumentationFile(
"title",
DocumentingRestEndpoint.forRestHandlerSpecifications(
new TestEmptyMessageHeaders("operation1"),
new TestEmptyMessageHeaders("operation1")),
RuntimeRestAPIVersion.V0,
file))
.isInstanceOf(IllegalStateException.class)
.hasMessageContaining("Duplicate OperationId");
}
|
public static <T extends Comparable<T>> void assertTypeValid(
Column<T> foundColumn, PrimitiveTypeName primitiveType) {
Class<T> foundColumnType = foundColumn.getColumnType();
ColumnPath columnPath = foundColumn.getColumnPath();
Set<PrimitiveTypeName> validTypeDescriptors = classToParquetType.get(foundColumnType);
if (validTypeDescriptors == null) {
StringBuilder message = new StringBuilder();
message.append("Column ")
.append(columnPath.toDotString())
.append(" was declared as type: ")
.append(foundColumnType.getName())
.append(" which is not supported in FilterPredicates.");
Set<Class<?>> supportedTypes = parquetTypeToClass.get(primitiveType);
if (supportedTypes != null) {
message.append(" Supported types for this column are: ").append(supportedTypes);
} else {
message.append(" There are no supported types for columns of " + primitiveType);
}
throw new IllegalArgumentException(message.toString());
}
if (!validTypeDescriptors.contains(primitiveType)) {
StringBuilder message = new StringBuilder();
message.append("FilterPredicate column: ")
.append(columnPath.toDotString())
.append("'s declared type (")
.append(foundColumnType.getName())
.append(") does not match the schema found in file metadata. Column ")
.append(columnPath.toDotString())
.append(" is of type: ")
.append(primitiveType)
.append("\nValid types for this column are: ")
.append(parquetTypeToClass.get(primitiveType));
throw new IllegalArgumentException(message.toString());
}
}
|
@Test
public void testUnsupportedType() {
try {
assertTypeValid(invalidColumn, PrimitiveTypeName.INT32);
fail("This should throw!");
} catch (IllegalArgumentException e) {
assertEquals(
"Column invalid.column was declared as type: "
+ "org.apache.parquet.filter2.predicate.TestValidTypeMap$InvalidColumnType which is not supported "
+ "in FilterPredicates. Supported types for this column are: [class java.lang.Integer]",
e.getMessage());
}
}
|
public static void checkServiceNameFormat(String combineServiceName) {
String[] split = combineServiceName.split(Constants.SERVICE_INFO_SPLITER);
if (split.length <= 1) {
throw new IllegalArgumentException(
"Param 'serviceName' is illegal, it should be format as 'groupName@@serviceName'");
}
if (split[0].isEmpty()) {
throw new IllegalArgumentException("Param 'serviceName' is illegal, groupName can't be empty");
}
}
|
@Test
void testCheckServiceNameFormat() {
String validServiceName = "group@@serviceName";
NamingUtils.checkServiceNameFormat(validServiceName);
}
|
public void shutdown(final Callback<None> callback)
{
_managerStarted = false;
for (ZooKeeperAnnouncer server : _servers)
{
server.shutdown();
}
Callback<None> zkCloseCallback = new CallbackAdapter<None, None>(callback)
{
@Override
protected None convertResponse(None none) throws Exception
{
_zkConnection.shutdown();
return none;
}
};
if (_store != null)
{
_store.shutdown(zkCloseCallback);
}
else
{
zkCloseCallback.onSuccess(None.none());
}
}
|
@Test
public void testMarkDownDuringDisconnection()
throws Exception
{
ZooKeeperAnnouncer announcer = getZooKeeperAnnouncer(_cluster, _uri, WEIGHT);
ZooKeeperConnectionManager manager = createManager(true, announcer);
ZooKeeperEphemeralStore<UriProperties> store = createAndStartUriStore();
UriProperties properties = store.get(_cluster);
assertNotNull(properties);
assertEquals(properties.getPartitionDataMap(URI.create(_uri)).get(DefaultPartitionAccessor.DEFAULT_PARTITION_ID).getWeight(), WEIGHT);
assertEquals(properties.Uris().size(), 1);
_zkServer.shutdown(false);
FutureCallback<None> markDownCallback = new FutureCallback<>();
announcer.markDown(markDownCallback);
// ugly, but we need to wait for a while just so that Disconnect event is propagated
// to the caller before we restart zk sever.
Thread.sleep(1000);
_zkServer.restart();
markDownCallback.get();
properties = store.get(_cluster);
assertNotNull(properties);
assertEquals(properties.Uris().size(), 0);
shutdownManager(manager);
}
|
public static boolean isIPv6(String addr) {
return InetAddressValidator.isIPv6Address(removeBrackets(addr));
}
|
@Test
void testIsIPv6() {
assertTrue(InternetAddressUtil.isIPv6("[::1]"));
assertFalse(InternetAddressUtil.isIPv6("127.0.0.1"));
assertFalse(InternetAddressUtil.isIPv6("er34234"));
}
|
@Override
public int hashCode() {
return Objects.hash(status, causes, details);
}
|
@Test
public void hashcode_is_based_on_content() {
NodeHealth.Builder builder = testSupport.randomBuilder();
NodeHealth underTest = builder.build();
assertThat(builder.build().hashCode())
.isEqualTo(underTest.hashCode());
}
|
@Override
public Path touch(final Path file, final TransferStatus status) throws BackgroundException {
status.setChecksum(write.checksum(file, status).compute(new NullInputStream(0L), status));
return super.touch(file, status);
}
|
@Test
public void testTouchFileStartWithDot() throws Exception {
final Path container = new Path("cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume));
final Path test = new Path(container, String.format(".%s.", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file));
new AzureTouchFeature(session, null).touch(test, new TransferStatus());
new AzureDeleteFeature(session, null).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback());
}
|
public static Map<String, String> inputFiles(RunContext runContext, Object inputs) throws Exception {
return FilesService.inputFiles(runContext, Collections.emptyMap(), inputs);
}
|
@Test
void overrideExistingInputFile() throws Exception {
RunContext runContext = runContextFactory.of();
FilesService.inputFiles(runContext, Map.of("file.txt", "content"));
FilesService.inputFiles(runContext, Map.of("file.txt", "overriden content"));
String fileContent = FileUtils.readFileToString(runContext.workingDir().path().resolve("file.txt").toFile(), "UTF-8");
assertThat(fileContent, is("overriden content"));
}
|
@Udf(description = "Returns the inverse (arc) tangent of y / x")
public Double atan2(
@UdfParameter(
value = "y",
description = "The ordinate (y) coordinate."
) final Integer y,
@UdfParameter(
value = "x",
description = "The abscissa (x) coordinate."
) final Integer x
) {
return atan2(y == null ? null : y.doubleValue(), x == null ? null : x.doubleValue());
}
|
@Test
public void shouldHandleZeroYPositiveX() {
assertThat(udf.atan2(0.0, 0.24), closeTo(0.0, 0.000000000000001));
assertThat(udf.atan2(0.0, 7.1), closeTo(0.0, 0.000000000000001));
assertThat(udf.atan2(0, 3), closeTo(0.0, 0.000000000000001));
assertThat(udf.atan2(0L, 2L), closeTo(0.0, 0.000000000000001));
}
|
@Override
public void showPreviewForKey(
Keyboard.Key key, Drawable icon, View parentView, PreviewPopupTheme previewPopupTheme) {
KeyPreview popup = getPopupForKey(key, parentView, previewPopupTheme);
Point previewPosition =
mPositionCalculator.calculatePositionForPreview(
key, previewPopupTheme, getLocationInWindow(parentView));
popup.showPreviewForKey(key, icon, previewPosition);
}
|
@Test
public void testNoPopupWhenTextSizeIsZero() {
mTheme.setPreviewKeyTextSize(0);
KeyPreviewsManager underTest =
new KeyPreviewsManager(getApplicationContext(), mPositionCalculator, 3);
Assert.assertNull(getLatestCreatedPopupWindow());
underTest.showPreviewForKey(mTestKeys[0], "y", mKeyboardView, mTheme);
Assert.assertNull(getLatestCreatedPopupWindow());
}
|
@Override
public void pluginJarUpdated(BundleOrPluginFileDetails bundleOrPluginFileDetails) {
final GoPluginBundleDescriptor bundleDescriptor = goPluginBundleDescriptorBuilder.build(bundleOrPluginFileDetails);
try {
LOGGER.info("Plugin update starting: {}", bundleOrPluginFileDetails.file());
validateIfExternalPluginRemovingBundledPlugin(bundleDescriptor);
validateIfSamePluginUpdated(bundleDescriptor);
validatePluginCompatibilityWithCurrentOS(bundleDescriptor);
validatePluginCompatibilityWithGoCD(bundleDescriptor);
removePlugin(bundleDescriptor);
addPlugin(bundleOrPluginFileDetails, bundleDescriptor);
} finally {
LOGGER.info("Plugin update finished: {}", bundleOrPluginFileDetails.file());
}
}
|
@Test
void shouldNotTryAndUpdateManifestOfAnUpdatedInvalidPlugin() throws Exception {
DefaultPluginJarChangeListener spy = spy(listener);
String pluginId = "plugin-id";
File pluginFile = new File(pluginWorkDir, PLUGIN_JAR_FILE_NAME);
copyPluginToTheDirectory(pluginWorkDir, PLUGIN_JAR_FILE_NAME);
File expectedBundleDirectoryForInvalidPlugin = new File(bundleDir, PLUGIN_JAR_FILE_NAME);
File bundleDirectoryForOldPlugin = new File(bundleDir, "descriptor-aware-test-plugin-old.jar");
FileUtils.forceMkdir(bundleDirectoryForOldPlugin);
String pluginJarFileLocation = pluginFile.getAbsolutePath();
GoPluginBundleDescriptor descriptorForInvalidPlugin = new GoPluginBundleDescriptor(GoPluginDescriptor.builder()
.id("testplugin.descriptorValidator")
.bundleLocation(expectedBundleDirectoryForInvalidPlugin)
.pluginJarFileLocation(pluginJarFileLocation)
.pluginJarFileLocation(pluginJarFileLocation)
.isBundledPlugin(true)
.build())
.markAsInvalid(List.of("For a test"), null);
Bundle oldBundle = mock(Bundle.class);
final GoPluginDescriptor oldPluginDescriptor = GoPluginDescriptor.builder()
.id("some.old.id")
.bundleLocation(bundleDirectoryForOldPlugin)
.pluginJarFileLocation("some/path/to/plugin.jar")
.isBundledPlugin(true)
.build();
GoPluginBundleDescriptor oldBundleDescriptor = new GoPluginBundleDescriptor(oldPluginDescriptor).setBundle(oldBundle);
when(goPluginBundleDescriptorBuilder.build(new BundleOrPluginFileDetails(pluginFile, true, pluginWorkDir))).thenReturn(descriptorForInvalidPlugin);
when(registry.getPlugin(pluginId)).thenReturn(oldPluginDescriptor);
when(registry.unloadPlugin(descriptorForInvalidPlugin)).thenReturn(oldBundleDescriptor);
doNothing().when(registry).loadPlugin(descriptorForInvalidPlugin);
spy.pluginJarUpdated(new BundleOrPluginFileDetails(pluginFile, true, pluginWorkDir));
assertThat(expectedBundleDirectoryForInvalidPlugin).exists();
assertThat(bundleDirectoryForOldPlugin).doesNotExist();
verify(registry).unloadPlugin(descriptorForInvalidPlugin);
verify(pluginLoader).unloadPlugin(oldBundleDescriptor);
verify(registry).loadPlugin(descriptorForInvalidPlugin);
verifyNoMoreInteractions(osgiManifestGenerator);
verifyNoMoreInteractions(pluginLoader);
}
|
@Override
public MplsLabel decode(int value) {
return MplsLabel.mplsLabel(value);
}
|
@Test
public void testDecode() {
assertThat(sut.decode(100), is(MplsLabel.mplsLabel(100)));
}
|
public static Event[] fromJson(final String json) throws IOException {
return fromJson(json, BasicEventFactory.INSTANCE);
}
|
@Test(expected=IOException.class)
public void testFromJsonWithInvalidJsonString() throws Exception {
Event.fromJson("gabeutch");
}
|
@Override
public Set<Device> allocateDevices(Set<Device> availableDevices, int count,
Map<String, String> env) {
// Can consider topology, utilization.etc
Set<Device> allocated = new HashSet<>();
int number = 0;
for (Device d : availableDevices) {
allocated.add(d);
number++;
if (number == count) {
break;
}
}
return allocated;
}
|
@Test
public void testAllocateSingleDevice()
throws ResourceHandlerException, IOException {
setupTestDirectoryWithScript();
plugin = new NECVEPlugin(envProvider, defaultSearchDirs, udevUtil);
Set<Device> available = new HashSet<>();
Device device = getTestDevice(0);
available.add(device);
Set<Device> allocated = plugin.allocateDevices(available, 1, env);
assertEquals("No. of devices", 1, allocated.size());
Device allocatedDevice = allocated.iterator().next();
assertSame("Device", device, allocatedDevice);
}
|
@Override
public void run() {
updateElasticSearchHealthStatus();
updateFileSystemMetrics();
}
|
@Test
public void when_elasticsearch_yellow_status_is_updated_to_green() {
ClusterHealthResponse clusterHealthResponse = new ClusterHealthResponse();
clusterHealthResponse.setStatus(ClusterHealthStatus.YELLOW);
when(esClient.clusterHealth(any())).thenReturn(clusterHealthResponse);
underTest.run();
verify(serverMonitoringMetrics, times(1)).setElasticSearchStatusToGreen();
verifyNoMoreInteractions(serverMonitoringMetrics);
}
|
@VisibleForTesting
protected Collection<StreamsMetadata> getStreamsMetadata() {
if (sharedRuntimesEnabled && kafkaStreams instanceof KafkaStreamsNamedTopologyWrapper) {
return ((KafkaStreamsNamedTopologyWrapper) kafkaStreams)
.streamsMetadataForStore(storeName, queryId);
}
return kafkaStreams.streamsMetadataForStore(storeName);
}
|
@Test
public void shouldUseNamedTopologyWhenSharedRuntimeIsEnabledForStreamsMetadataForStore() {
// Given:
final KsLocator locator = new KsLocator(STORE_NAME, kafkaStreamsNamedTopologyWrapper, topology,
keySerializer, LOCAL_HOST_URL, true, "queryId");
// When:
locator.getStreamsMetadata();
// Then:
Mockito.verify(kafkaStreamsNamedTopologyWrapper).streamsMetadataForStore(STORE_NAME, "queryId");
}
|
static <PojoT extends SdkPojo, BuilderT extends SdkBuilder<BuilderT, PojoT> & SdkPojo>
AwsBuilderFactory<PojoT, BuilderT> builderFactory(Class<PojoT> clazz) {
Generic pojoType = new ForLoadedType(clazz).asGenericType();
MethodDescription builderMethod =
pojoType.getDeclaredMethods().filter(named("builder").and(isStatic())).getOnly();
Generic providerType =
Generic.Builder.parameterizedType(FACTORY_TYPE, pojoType, builderMethod.getReturnType())
.build();
try {
return (AwsBuilderFactory<PojoT, BuilderT>)
BYTE_BUDDY
.with(new ByteBuddyUtils.InjectPackageStrategy(clazz))
.subclass(providerType)
.method(named("get"))
.intercept(MethodCall.invoke(builderMethod))
.make()
.load(ReflectHelpers.findClassLoader(), getClassLoadingStrategy(clazz))
.getLoaded()
.getDeclaredConstructor()
.newInstance();
} catch (ReflectiveOperationException e) {
throw new RuntimeException("Unable to generate builder factory for " + clazz, e);
}
}
|
@Test
public void generateBuilderFactory() {
AwsBuilderFactory<SendMessageRequest, SendMessageRequest.Builder> factory =
AwsSchemaUtils.builderFactory(SendMessageRequest.class);
assertThat(factory.getClass().getPackage()).isEqualTo(SendMessageRequest.class.getPackage());
assertThat(factory.get()).isInstanceOf(SendMessageRequest.Builder.class);
assertThat(factory.sdkFields()).isEqualTo(SendMessageRequest.builder().sdkFields());
}
|
@Override
public MoveApplicationAcrossQueuesResponse moveApplicationAcrossQueues(
MoveApplicationAcrossQueuesRequest request)
throws YarnException, IOException {
if (request == null || request.getApplicationId() == null || request.getTargetQueue() == null) {
routerMetrics.incrMoveApplicationAcrossQueuesFailedRetrieved();
String msg = "Missing moveApplicationAcrossQueues request or " +
"applicationId or target queue.";
RouterAuditLogger.logFailure(user.getShortUserName(), MOVE_APPLICATION_ACROSS_QUEUES, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, msg);
RouterServerUtil.logAndThrowException(msg);
}
long startTime = clock.getTime();
SubClusterId subClusterId = null;
ApplicationId applicationId = request.getApplicationId();
try {
subClusterId = federationFacade
.getApplicationHomeSubCluster(applicationId);
} catch (YarnException e) {
routerMetrics.incrMoveApplicationAcrossQueuesFailedRetrieved();
String errMsgFormat = "Application %s does not exist in FederationStateStore.";
RouterAuditLogger.logFailure(user.getShortUserName(), MOVE_APPLICATION_ACROSS_QUEUES, UNKNOWN,
TARGET_CLIENT_RM_SERVICE, String.format(errMsgFormat, applicationId));
RouterServerUtil.logAndThrowException(e, errMsgFormat, applicationId);
}
ApplicationClientProtocol clientRMProxy = getClientRMProxyForSubCluster(subClusterId);
MoveApplicationAcrossQueuesResponse response = null;
try {
response = clientRMProxy.moveApplicationAcrossQueues(request);
} catch (Exception e) {
routerMetrics.incrMoveApplicationAcrossQueuesFailedRetrieved();
RouterServerUtil.logAndThrowException(e,
"Unable to moveApplicationAcrossQueues for %s to SubCluster %s.", applicationId,
subClusterId.getId());
}
if (response == null) {
LOG.error("No response when moveApplicationAcrossQueues "
+ "the applicationId {} to Queue {} In SubCluster {}.",
request.getApplicationId(), request.getTargetQueue(), subClusterId.getId());
}
long stopTime = clock.getTime();
RouterAuditLogger.logSuccess(user.getShortUserName(), MOVE_APPLICATION_ACROSS_QUEUES,
TARGET_CLIENT_RM_SERVICE, applicationId, subClusterId);
routerMetrics.succeededMoveApplicationAcrossQueuesRetrieved(stopTime - startTime);
return response;
}
|
@Test
public void testMoveApplicationAcrossQueues() throws Exception {
LOG.info("Test FederationClientInterceptor : MoveApplication AcrossQueues request.");
// null request
LambdaTestUtils.intercept(YarnException.class, "Missing moveApplicationAcrossQueues request " +
"or applicationId or target queue.", () -> interceptor.moveApplicationAcrossQueues(null));
// normal request
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
SubmitApplicationRequest request = mockSubmitApplicationRequest(appId);
// Submit the application
SubmitApplicationResponse response = interceptor.submitApplication(request);
Assert.assertNotNull(response);
Assert.assertNotNull(stateStoreUtil.queryApplicationHomeSC(appId));
SubClusterId subClusterId = interceptor.getApplicationHomeSubCluster(appId);
Assert.assertNotNull(subClusterId);
MockRM mockRM = interceptor.getMockRMs().get(subClusterId);
mockRM.waitForState(appId, RMAppState.ACCEPTED);
RMApp rmApp = mockRM.getRMContext().getRMApps().get(appId);
mockRM.waitForState(rmApp.getCurrentAppAttempt().getAppAttemptId(),
RMAppAttemptState.SCHEDULED);
MockNM nm = interceptor.getMockNMs().get(subClusterId);
nm.nodeHeartbeat(true);
MockRM.waitForState(rmApp.getCurrentAppAttempt(), RMAppAttemptState.ALLOCATED);
mockRM.sendAMLaunched(rmApp.getCurrentAppAttempt().getAppAttemptId());
MoveApplicationAcrossQueuesRequest acrossQueuesRequest =
MoveApplicationAcrossQueuesRequest.newInstance(appId, "root.target");
MoveApplicationAcrossQueuesResponse acrossQueuesResponse =
interceptor.moveApplicationAcrossQueues(acrossQueuesRequest);
Assert.assertNotNull(acrossQueuesResponse);
}
|
public static int[] computePhysicalIndices(
List<TableColumn> logicalColumns,
DataType physicalType,
Function<String, String> nameRemapping) {
Map<TableColumn, Integer> physicalIndexLookup =
computePhysicalIndices(logicalColumns.stream(), physicalType, nameRemapping);
return logicalColumns.stream().mapToInt(physicalIndexLookup::get).toArray();
}
|
@Test
void testFieldMappingReordered() {
int[] indices =
TypeMappingUtils.computePhysicalIndices(
TableSchema.builder()
.field("f1", DataTypes.BIGINT())
.field("f0", DataTypes.STRING())
.build()
.getTableColumns(),
ROW(FIELD("f0", DataTypes.STRING()), FIELD("f1", DataTypes.BIGINT())),
Function.identity());
assertThat(indices).isEqualTo(new int[] {1, 0});
}
|
public Range<PartitionKey> handleNewSinglePartitionDesc(Map<ColumnId, Column> schema, SingleRangePartitionDesc desc,
long partitionId, boolean isTemp) throws DdlException {
Range<PartitionKey> range;
try {
range = checkAndCreateRange(schema, desc, isTemp);
setRangeInternal(partitionId, isTemp, range);
} catch (IllegalArgumentException e) {
// Range.closedOpen may throw this if (lower > upper)
throw new DdlException("Invalid key range: " + e.getMessage());
}
idToDataProperty.put(partitionId, desc.getPartitionDataProperty());
idToReplicationNum.put(partitionId, desc.getReplicationNum());
idToInMemory.put(partitionId, desc.isInMemory());
idToStorageCacheInfo.put(partitionId, desc.getDataCacheInfo());
return range;
}
|
@Test(expected = DdlException.class)
public void testSmallInt() throws DdlException, AnalysisException {
Column k1 = new Column("k1", new ScalarType(PrimitiveType.SMALLINT), true, null, "", "");
partitionColumns.add(k1);
singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1",
new PartitionKeyDesc(Lists.newArrayList(new PartitionValue("-32768"))),
null));
partitionInfo = new RangePartitionInfo(partitionColumns);
for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) {
singleRangePartitionDesc.analyze(1, null);
partitionInfo.handleNewSinglePartitionDesc(MetaUtils.buildIdToColumn(partitionColumns),
singleRangePartitionDesc, 20000L, false);
}
}
|
@Override
public BufferBuilder requestBufferBlocking(Object owner) {
checkIsInitialized();
reclaimBuffersIfNeeded(0);
MemorySegment memorySegment = bufferQueue.poll();
if (memorySegment == null) {
memorySegment = requestBufferBlockingFromPool();
}
if (memorySegment == null) {
memorySegment = checkNotNull(requestBufferBlockingFromQueue());
}
incNumRequestedBuffer(owner);
return new BufferBuilder(
checkNotNull(memorySegment), segment -> recycleBuffer(owner, segment));
}
|
@Test
void testRecycleBuffersAfterPoolSizeDecreased() throws IOException {
int numBuffers = 10;
BufferPool bufferPool = globalPool.createBufferPool(1, numBuffers);
TieredStorageMemoryManagerImpl storageMemoryManager =
createStorageMemoryManager(
bufferPool,
Collections.singletonList(new TieredStorageMemorySpec(this, 0)));
for (int i = 0; i < numBuffers; i++) {
BufferBuilder builder = storageMemoryManager.requestBufferBlocking(this);
requestedBuffers.add(builder);
}
bufferPool.setNumBuffers(numBuffers / 2);
for (int i = 0; i < numBuffers; i++) {
recycleBufferBuilder(requestedBuffers.get(i));
assertThat(bufferPool.bestEffortGetNumOfUsedBuffers())
.isEqualTo(Math.max(numBuffers / 2, numBuffers - (i + 1)));
}
}
|
@Override
public Collection<String> getLogicTableNames() {
return logicalTableMapper;
}
|
@Test
void assertGetLogicTableMapper() {
assertThat(new LinkedList<>(ruleAttribute.getLogicTableNames()), is(Collections.singletonList("foo_tbl")));
}
|
public Connection connection(Connection connection) {
// It is common to implement both interfaces
if (connection instanceof XAConnection) {
return xaConnection((XAConnection) connection);
}
return TracingConnection.create(connection, this);
}
|
@Test void connection_wrapsXaInput() {
abstract class Both implements XAConnection, Connection {
}
assertThat(jmsTracing.connection(mock(Both.class)))
.isInstanceOf(XAConnection.class);
}
|
public MaterializedConfiguration getConfiguration() {
MaterializedConfiguration conf = new SimpleMaterializedConfiguration();
FlumeConfiguration fconfig = getFlumeConfiguration();
AgentConfiguration agentConf = fconfig.getConfigurationFor(getAgentName());
if (agentConf != null) {
Map<String, ChannelComponent> channelComponentMap = Maps.newHashMap();
Map<String, SourceRunner> sourceRunnerMap = Maps.newHashMap();
Map<String, SinkRunner> sinkRunnerMap = Maps.newHashMap();
try {
loadChannels(agentConf, channelComponentMap);
loadSources(agentConf, channelComponentMap, sourceRunnerMap);
loadSinks(agentConf, channelComponentMap, sinkRunnerMap);
Set<String> channelNames = new HashSet<String>(channelComponentMap.keySet());
for (String channelName : channelNames) {
ChannelComponent channelComponent = channelComponentMap.get(channelName);
if (channelComponent.components.isEmpty()) {
LOGGER.warn("Channel {} has no components connected"
+ " and has been removed.", channelName);
channelComponentMap.remove(channelName);
Map<String, Channel> nameChannelMap =
channelCache.get(channelComponent.channel.getClass());
if (nameChannelMap != null) {
nameChannelMap.remove(channelName);
}
} else {
LOGGER.info("Channel {} connected to {}",
channelName, channelComponent.components.toString());
conf.addChannel(channelName, channelComponent.channel);
}
}
for (Map.Entry<String, SourceRunner> entry : sourceRunnerMap.entrySet()) {
conf.addSourceRunner(entry.getKey(), entry.getValue());
}
for (Map.Entry<String, SinkRunner> entry : sinkRunnerMap.entrySet()) {
conf.addSinkRunner(entry.getKey(), entry.getValue());
}
} catch (InstantiationException ex) {
LOGGER.error("Failed to instantiate component", ex);
} finally {
channelComponentMap.clear();
sourceRunnerMap.clear();
sinkRunnerMap.clear();
}
} else {
LOGGER.warn("No configuration found for this host:{}", getAgentName());
}
return conf;
}
|
@Test
public void testReusableChannelNotReusedLater() throws Exception {
String agentName = "agent1";
Map<String, String> propertiesReusable = getPropertiesForChannel(agentName,
RecyclableChannel.class
.getName());
Map<String, String> propertiesDispoable = getPropertiesForChannel(agentName,
DisposableChannel.class
.getName());
MemoryConfigurationProvider provider =
new MemoryConfigurationProvider(agentName, propertiesReusable);
MaterializedConfiguration config1 = provider.getConfiguration();
Channel channel1 = config1.getChannels().values().iterator().next();
assertTrue(channel1 instanceof RecyclableChannel);
provider.setProperties(propertiesDispoable);
MaterializedConfiguration config2 = provider.getConfiguration();
Channel channel2 = config2.getChannels().values().iterator().next();
assertTrue(channel2 instanceof DisposableChannel);
provider.setProperties(propertiesReusable);
MaterializedConfiguration config3 = provider.getConfiguration();
Channel channel3 = config3.getChannels().values().iterator().next();
assertTrue(channel3 instanceof RecyclableChannel);
assertNotSame(channel1, channel3);
}
|
public static long getProcessId(final long fallback) {
// Note: may fail in some JVM implementations
// therefore fallback has to be provided
// something like '<pid>@<hostname>', at least in SUN / Oracle JVMs
final String jvmName = ManagementFactory.getRuntimeMXBean().getName();
final int index = jvmName.indexOf('@');
if (index < 1) {
// part before '@' empty (index = 0) / '@' not found (index = -1)
return fallback;
}
try {
return Long.parseLong(jvmName.substring(0, index));
} catch (final NumberFormatException e) {
// ignore
}
return fallback;
}
|
@Test
public void test_getProcessId() {
long pid = Utils.getProcessId(-1);
assertNotEquals(-1, pid);
System.out.println("test pid:" + pid);
}
|
@Override
public void execute(Runnable command) {
_delegateExecutor.execute(toAccountingRunnable(command));
}
|
@Test
public void testBoundsWithinThreadCount()
throws BrokenBarrierException, InterruptedException {
SchedulerGroupAccountant accountant = mock(SchedulerGroupAccountant.class);
// Test below relies on jobs > limit
final int limit = 3;
final int jobs = 5;
// we want total threads > limit
Executor es = Executors.newFixedThreadPool(2 * limit);
final BoundedAccountingExecutor bes = new BoundedAccountingExecutor(es, limit, accountant);
final Syncer syncer = new Syncer();
// barrier parties: all the executables plus 1 for main testing thread
// startup barrier pauses main thread till all the threads have started
// validation barrier allows for validation to complete before proceeding further
syncer._startupBarrier = new CyclicBarrier(limit + 1);
syncer._validationBarrier = new CyclicBarrier(limit + 1);
// start adding jobs in new thread. We need to add jobs in new thread
// because the thread adding jobs is expected to block at limit
new Thread(new Runnable() {
@Override
public void run() {
for (int i = 0; i < jobs; i++) {
bes.execute(new Runnable() {
@Override
public void run() {
try {
_running.incrementAndGet();
syncer._startupBarrier.await();
syncer._validationBarrier.await();
_running.decrementAndGet();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
});
}
}
}).start();
syncer._startupBarrier.await();
// At this point, 'limit' jobs should have executed the startupBarrier.await() call.
// The other jobs should be waiting on a semaphore for permits inside the BoundedAccountingExecutor.execute()
// so they have not executed running.incrementAndGet() yet.
assertEquals(_running.get(), limit);
verify(accountant, times(limit)).incrementThreads();
// reset will clear the counts on incrementThreads
reset(accountant);
final int pendingJobs = jobs - limit;
// Before the pendingJobs get to startupBarrier, reset it to a different value
// since we cannot change the limit of the CyclicBarrier once created.
// The new limit will be pending jobs plus the await we will call in this thread.
syncer._startupBarrier = new CyclicBarrier(pendingJobs + 1);
// Now let the running threads complete and call running.decrementAndGet. As
// they exit, the pending jobs will acquire permits and start to increment
// the running counter and wait on startupBarrier.await().
syncer._validationBarrier.await();
// verify additional jobs are run as soon as current job finishes
syncer._validationBarrier = new CyclicBarrier(pendingJobs + 1);
// When we run the test in a small number of cores, it is possible that the running jobs
// have not yet gotten to execute running.decrementAndGet(), but the pending jobs have already
// done the increment. So, we need to wait until we check the running counter to equal the
// pending jobs.
TestUtils.waitForCondition(aVoid -> _running.get() == pendingJobs, 10_000,
"Invalid number of running jobs" + _running.get());
// Now that there are no jobs running, we can let the new ones in.
// All the pending jobs will wait on the validationBarrier after we let them pass
// the startupbarrier below.
syncer._startupBarrier.await();
verify(accountant, times(pendingJobs)).incrementThreads();
syncer._validationBarrier.await();
}
|
public Organization getOrganizationById(Long id) {
Optional<Organization> conf = organizationRepository.findById(id);
if (!conf.isPresent()) {
throw new NotFoundException("Could not find organization with id: " + id);
}
return conf.get();
}
|
@Test
public void getOrganizationById() {
Optional<Organization> organizationOptional = Optional.of(newOrganization());
when(repositoryMock.findById(anyLong())).thenReturn(organizationOptional);
Organization result = organizationServiceMock.getOrganizationById(1L);
verify(repositoryMock, times(1)).findById(anyLong());
assertEquals(organizationOptional.get().getId(), result.getId());
}
|
public void start() {
configService.addListener(configListener);
interfaceService.addListener(interfaceListener);
setUpConnectivity();
}
|
@Test
public void testNoPeerInterface() {
IpAddress ip = IpAddress.valueOf("1.1.1.1");
bgpSpeakers.clear();
bgpSpeakers.add(new BgpConfig.BgpSpeakerConfig(Optional.of("foo"),
VlanId.NONE, s1Eth100, Collections.singleton(ip)));
reset(interfaceService);
interfaceService.addListener(anyObject(InterfaceListener.class));
expect(interfaceService.getMatchingInterface(ip)).andReturn(null).anyTimes();
replay(interfaceService);
// We don't expect any intents in this case
reset(intentSynchronizer);
replay(intentSynchronizer);
peerConnectivityManager.start();
verify(intentSynchronizer);
}
|
public Table getTable(String dbName, String tableName) {
return get(tableCache, DatabaseTableName.of(dbName, tableName));
}
|
@Test
public void testGetTransactionalTable() {
CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore(
metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false);
// get insert only table
com.starrocks.catalog.Table table = cachingHiveMetastore.getTable("transactional_db", "insert_only");
Assert.assertNotNull(table);
// get full acid table
Assert.assertThrows(StarRocksConnectorException.class, () -> {
cachingHiveMetastore.getTable("transactional_db", "full_acid");
});
}
|
@SuppressWarnings("unchecked")
public static <T> T initialize(Map<String, String> properties) {
String factoryImpl =
PropertyUtil.propertyAsString(properties, S3FileIOProperties.CLIENT_FACTORY, null);
if (Strings.isNullOrEmpty(factoryImpl)) {
return (T) AwsClientFactories.from(properties);
}
return (T) loadClientFactory(factoryImpl, properties);
}
|
@Test
public void testS3FileIOImplCatalogPropertyDefined() {
Map<String, String> properties = Maps.newHashMap();
properties.put(
S3FileIOProperties.CLIENT_FACTORY,
"org.apache.iceberg.aws.s3.DefaultS3FileIOAwsClientFactory");
Object factoryImpl = S3FileIOAwsClientFactories.initialize(properties);
assertThat(factoryImpl)
.as(
"should instantiate an object of type S3FileIOAwsClientFactory when s3.client-factory-impl is set")
.isInstanceOf(S3FileIOAwsClientFactory.class);
}
|
public char charAt(final int index)
{
if (index < 0 || index >= length)
{
throw new StringIndexOutOfBoundsException("index=" + index + " length=" + length);
}
return (char)buffer.getByte(offset + index);
}
|
@Test
void shouldThrowExceptionWhenCharAtCalledWithNoBuffer()
{
assertThrows(StringIndexOutOfBoundsException.class, () -> asciiSequenceView.charAt(0));
}
|
static ApiError validateQuotaKeyValue(
Map<String, ConfigDef.ConfigKey> validKeys,
String key,
double value
) {
// Ensure we have an allowed quota key
ConfigDef.ConfigKey configKey = validKeys.get(key);
if (configKey == null) {
return new ApiError(Errors.INVALID_REQUEST, "Invalid configuration key " + key);
}
if (value <= 0.0) {
return new ApiError(Errors.INVALID_REQUEST, "Quota " + key + " must be greater than 0");
}
// Ensure the quota value is valid
switch (configKey.type()) {
case DOUBLE:
return ApiError.NONE;
case SHORT:
if (value > Short.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for a SHORT.");
}
return getErrorForIntegralQuotaValue(value, key);
case INT:
if (value > Integer.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for an INT.");
}
return getErrorForIntegralQuotaValue(value, key);
case LONG: {
if (value > Long.MAX_VALUE) {
return new ApiError(Errors.INVALID_REQUEST,
"Proposed value for " + key + " is too large for a LONG.");
}
return getErrorForIntegralQuotaValue(value, key);
}
default:
return new ApiError(Errors.UNKNOWN_SERVER_ERROR,
"Unexpected config type " + configKey.type() + " should be Long or Double");
}
}
|
@Test
public void testValidateQuotaKeyValueForNegativeQuota() {
assertEquals(new ApiError(Errors.INVALID_REQUEST, "Quota consumer_byte_rate must be greater than 0"),
ClientQuotaControlManager.validateQuotaKeyValue(
VALID_CLIENT_ID_QUOTA_KEYS, "consumer_byte_rate", -2.0));
}
|
@Override
public String buildRemoteURL(String baseRepositoryURL, String referencePath) {
// Rebuild a downloadable URL to retrieve file.
String remoteUrl = baseRepositoryURL.substring(0, baseRepositoryURL.lastIndexOf("/"));
String pathToAppend = referencePath;
while (pathToAppend.startsWith("../")) {
remoteUrl = remoteUrl.substring(0, remoteUrl.lastIndexOf("/"));
pathToAppend = pathToAppend.substring(3);
}
if (pathToAppend.startsWith("./")) {
pathToAppend = pathToAppend.substring(2);
}
if (pathToAppend.startsWith("/")) {
pathToAppend = pathToAppend.substring(1);
}
remoteUrl += "/" + pathToAppend;
return remoteUrl;
}
|
@Test
void testBuildRemoteURL() {
SimpleReferenceURLBuilder builder = new SimpleReferenceURLBuilder();
assertEquals("https://raw.githubusercontent.com/microcks/microcks/main/samples/schema-ref.yml",
builder.buildRemoteURL(BASE_URL, "schema-ref.yml"));
assertEquals("https://raw.githubusercontent.com/microcks/microcks/main/samples/schema-ref.yml",
builder.buildRemoteURL(BASE_URL, "./schema-ref.yml"));
assertEquals("https://raw.githubusercontent.com/microcks/microcks/main/refs/schema-ref.yml",
builder.buildRemoteURL(BASE_URL, "../refs/schema-ref.yml"));
assertEquals("https://raw.githubusercontent.com/microcks/microcks/main/refs/sub/schema-ref.yml",
builder.buildRemoteURL(BASE_URL, "../refs/sub/schema-ref.yml"));
assertEquals("https://raw.githubusercontent.com/microcks/microcks/refs/sub/schema-ref.yml",
builder.buildRemoteURL(BASE_URL, "../../refs/sub/schema-ref.yml"));
}
|
public static InstrumentedThreadFactory defaultThreadFactory(MetricRegistry registry, String name) {
return new InstrumentedThreadFactory(Executors.defaultThreadFactory(), registry, name);
}
|
@Test
public void testDefaultThreadFactoryWithName() throws Exception {
final ThreadFactory threadFactory = InstrumentedExecutors.defaultThreadFactory(registry, "tf");
threadFactory.newThread(new NoopRunnable());
assertThat(registry.meter("tf.created").getCount()).isEqualTo(1L);
final Field delegateField = InstrumentedThreadFactory.class.getDeclaredField("delegate");
delegateField.setAccessible(true);
final ThreadFactory delegate = (ThreadFactory) delegateField.get(threadFactory);
assertThat(delegate.getClass().getCanonicalName()).isEqualTo("java.util.concurrent.Executors.DefaultThreadFactory");
}
|
List<Integer> getClosingTagsOffsets() {
return closingTagsOffsets;
}
|
@Test
public void should_extract_upper_bounds_from_serialized_rules() {
List<Integer> offsets = decorationDataHolder.getClosingTagsOffsets();
assertThat(offsets.get(0)).isEqualTo(8);
assertThat(offsets.get(1)).isEqualTo(52);
assertThat(offsets.get(2)).isEqualTo(67);
assertThat(offsets.get(3)).isEqualTo(75);
assertThat(offsets.get(4)).isEqualTo(85);
assertThat(offsets.get(5)).isEqualTo(95);
assertThat(offsets.get(6)).isEqualTo(130);
assertThat(offsets.get(7)).isEqualTo(130);
assertThat(offsets.get(8)).isEqualTo(145);
}
|
public static boolean isSystemGroup(String group) {
if (StringUtils.isBlank(group)) {
return false;
}
String groupInLowerCase = group.toLowerCase();
for (String prefix : SYSTEM_GROUP_PREFIX_LIST) {
if (groupInLowerCase.startsWith(prefix)) {
return true;
}
}
return false;
}
|
@Test
public void testIsSystemGroup_NonSystemGroup_ReturnsFalse() {
String group = "FooGroup";
boolean result = BrokerMetricsManager.isSystemGroup(group);
assertThat(result).isFalse();
}
|
@Override public long get(long key1, int key2) {
return super.get0(key1, key2);
}
|
@Test
public void testGet() {
final long key1 = randomKey();
final int key2 = randomKey();
final SlotAssignmentResult slot = insert(key1, key2);
assertTrue(slot.isNew());
final long valueAddress2 = hsa.get(key1, key2);
assertEquals(slot.address(), valueAddress2);
}
|
@VisibleForTesting
void updateQueues(String args, SchedConfUpdateInfo updateInfo) {
if (args == null) {
return;
}
ArrayList<QueueConfigInfo> queueConfigInfos = new ArrayList<>();
for (String arg : args.split(";")) {
queueConfigInfos.add(getQueueConfigInfo(arg));
}
updateInfo.setUpdateQueueInfo(queueConfigInfos);
}
|
@Test(timeout = 10000)
public void testUpdateQueuesWithCommaInValue() {
SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo();
cli.updateQueues("root.a:a1=a1Val1\\,a1Val2 a1Val3,a2=a2Val1\\,a2Val2",
schedUpdateInfo);
List<QueueConfigInfo> updateQueueInfo = schedUpdateInfo
.getUpdateQueueInfo();
Map<String, String> paramValues = new HashMap<>();
paramValues.put("a1", "a1Val1,a1Val2 a1Val3");
paramValues.put("a2", "a2Val1,a2Val2");
validateQueueConfigInfo(updateQueueInfo, 0, "root.a", paramValues);
}
|
public static FST<Long> buildFST(SortedMap<String, Integer> input)
throws IOException {
PositiveIntOutputs fstOutput = PositiveIntOutputs.getSingleton();
FSTCompiler.Builder<Long> fstCompilerBuilder = new FSTCompiler.Builder<>(FST.INPUT_TYPE.BYTE4, fstOutput);
FSTCompiler<Long> fstCompiler = fstCompilerBuilder.build();
IntsRefBuilder scratch = new IntsRefBuilder();
for (Map.Entry<String, Integer> entry : input.entrySet()) {
fstCompiler.add(Util.toUTF16(entry.getKey(), scratch), entry.getValue().longValue());
}
return FST.fromFSTReader(fstCompiler.compile(), fstCompiler.getFSTReader());
}
|
@Test
public void testFSTBuilder()
throws IOException {
SortedMap<String, Integer> x = new TreeMap<>();
x.put("hello-world", 12);
x.put("hello-world123", 21);
x.put("still", 123);
FST<Long> fst = FSTBuilder.buildFST(x);
File outputFile = new File(TEMP_DIR, "test.lucene");
FileOutputStream fileOutputStream = new FileOutputStream(outputFile);
OutputStreamDataOutput d = new OutputStreamDataOutput(fileOutputStream);
fst.save(d, d);
fileOutputStream.close();
Outputs<Long> outputs = PositiveIntOutputs.getSingleton();
File fstFile = new File(outputFile.getAbsolutePath());
PinotDataBuffer pinotDataBuffer =
PinotDataBuffer.mapFile(fstFile, true, 0, fstFile.length(), ByteOrder.BIG_ENDIAN, "");
PinotBufferIndexInput indexInput = new PinotBufferIndexInput(pinotDataBuffer, 0L, fstFile.length());
List<Long> results = RegexpMatcher.regexMatch("hello.*123", fst);
Assert.assertEquals(results.size(), 1);
Assert.assertEquals(results.get(0).longValue(), 21L);
results = RegexpMatcher.regexMatch(".*world", fst);
Assert.assertEquals(results.size(), 1);
Assert.assertEquals(results.get(0).longValue(), 12L);
}
|
@Override
@Cacheable(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE, key = "#code",
unless = "#result == null")
public NotifyTemplateDO getNotifyTemplateByCodeFromCache(String code) {
return notifyTemplateMapper.selectByCode(code);
}
|
@Test
public void testGetNotifyTemplateByCodeFromCache() {
// mock 数据
NotifyTemplateDO dbNotifyTemplate = randomPojo(NotifyTemplateDO.class);
notifyTemplateMapper.insert(dbNotifyTemplate);
// 准备参数
String code = dbNotifyTemplate.getCode();
// 调用
NotifyTemplateDO notifyTemplate = notifyTemplateService.getNotifyTemplateByCodeFromCache(code);
// 断言
assertPojoEquals(dbNotifyTemplate, notifyTemplate);
}
|
@Override
public void trace(String msg) {
logger.trace(msg);
}
|
@Test
void testTrace() {
jobRunrDashboardLogger.trace("trace");
verify(slfLogger).trace("trace");
}
|
@Override
public void apply(IntentOperationContext<ProtectionEndpointIntent> context) {
Optional<IntentData> toUninstall = context.toUninstall();
Optional<IntentData> toInstall = context.toInstall();
List<ProtectionEndpointIntent> uninstallIntents = context.intentsToUninstall();
List<ProtectionEndpointIntent> installIntents = context.intentsToInstall();
if (!toInstall.isPresent() && !toUninstall.isPresent()) {
intentInstallCoordinator.intentInstallSuccess(context);
return;
}
if (toUninstall.isPresent()) {
IntentData intentData = toUninstall.get();
trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources());
uninstallIntents.forEach(installable ->
trackerService.removeTrackedResources(intentData.intent().key(),
installable.resources()));
}
if (toInstall.isPresent()) {
IntentData intentData = toInstall.get();
trackerService.addTrackedResources(intentData.key(), intentData.intent().resources());
installIntents.forEach(installable ->
trackerService.addTrackedResources(intentData.key(),
installable.resources()));
}
List<Stage> stages = new ArrayList<>();
stages.add(new Stage(uninstallIntents.stream()
.map(i -> Pair.of(i, REMOVE))
.collect(Collectors.toList())));
stages.add(new Stage(installIntents.stream()
.map(i -> Pair.of(i, ADD))
.collect(Collectors.toList())));
for (Stage stage : stages) {
log.debug("applying Stage {}", stage);
try {
// wait for stage completion
stage.apply();
stage.listeners().forEach(networkConfigService::removeListener);
} catch (IntentException e) {
log.error("Stage {} failed, reason: {}", stage, e.toString());
intentInstallCoordinator.intentInstallFailed(context);
return;
}
}
// All stage success
intentInstallCoordinator.intentInstallSuccess(context);
}
|
@Test
public void testUninstallIntents() {
List<Intent> intentsToUninstall = createProtectionIntents(CP2);
List<Intent> intentsToInstall = Lists.newArrayList();
IntentData toUninstall = new IntentData(createP2PIntent(),
IntentState.INSTALLING,
new WallClockTimestamp());
IntentData toInstall = null;
IntentOperationContext<ProtectionEndpointIntent> operationContext;
IntentInstallationContext context = new IntentInstallationContext(toUninstall, toInstall);
operationContext = new IntentOperationContext(intentsToUninstall, intentsToInstall, context);
installer.apply(operationContext);
assertEquals(intentInstallCoordinator.successContext, operationContext);
}
|
static void setConstructor(final String segmentName,
final String generatedClassName,
final ConstructorDeclaration constructorDeclaration,
final String kiePMMLModelClass,
final boolean isInterpreted,
final double weight) {
setConstructorSuperNameInvocation(generatedClassName, constructorDeclaration, segmentName);
final BlockStmt body = constructorDeclaration.getBody();
final ExplicitConstructorInvocationStmt superStatement =
CommonCodegenUtils.getExplicitConstructorInvocationStmt(body)
.orElseThrow(() -> new KiePMMLException(String.format(MISSING_CONSTRUCTOR_IN_BODY, body)));
final Expression instantiationExpression = getInstantiationExpression(kiePMMLModelClass, isInterpreted);
String modelInstantiationString = instantiationExpression.toString();
CommonCodegenUtils.setExplicitConstructorInvocationStmtArgument(superStatement, "model",
modelInstantiationString);
CommonCodegenUtils.setAssignExpressionValue(body, "weight", new DoubleLiteralExpr(weight));
CommonCodegenUtils.setAssignExpressionValue(body, "id", new StringLiteralExpr(segmentName));
}
|
@Test
void setConstructorNoInterpreted() {
ConstructorDeclaration constructorDeclaration = MODEL_TEMPLATE.getDefaultConstructor().get();
String segmentName = "SEGMENTNAME";
String generatedClassName = "GENERATEDCLASSNAME";
String kiePMMLModelClass = "KIEPMMLMODELCLASS";
double weight = 12.22;
KiePMMLSegmentFactory.setConstructor(segmentName,
generatedClassName,
constructorDeclaration,
kiePMMLModelClass,
false,
weight);
Map<Integer, Expression> superInvocationExpressionsMap = new HashMap<>();
superInvocationExpressionsMap.put(0, new NameExpr(String.format("\"%s\"", segmentName)));
ClassOrInterfaceType classOrInterfaceType = parseClassOrInterfaceType(kiePMMLModelClass);
ObjectCreationExpr objectCreationExpr = new ObjectCreationExpr();
objectCreationExpr.setType(classOrInterfaceType);
superInvocationExpressionsMap.put(3, new NameExpr(objectCreationExpr.toString()));
Map<String, Expression> assignExpressionMap = new HashMap<>();
assignExpressionMap.put("weight", new DoubleLiteralExpr(weight));
assignExpressionMap.put("id", new StringLiteralExpr(segmentName));
assertThat(commonEvaluateConstructor(constructorDeclaration, generatedClassName,
superInvocationExpressionsMap, assignExpressionMap)).isTrue();
}
|
@Override
public ValueRange range(TemporalField field) {
return offsetTime.range(field);
}
|
@Test
void range() {
Arrays.stream(ChronoField.values()).filter(offsetTime::isSupported)
.forEach(field -> assertEquals(offsetTime.range(field), zoneTime.range(field)));
}
|
@Override
public double p(double x) {
double e = Math.exp(-(x - mu) / scale);
return e / (scale * (1.0 + e) * (1.0 + e));
}
|
@Test
public void testP() {
System.out.println("p");
LogisticDistribution instance = new LogisticDistribution(2.0, 1.0);
instance.rand();
assertEquals(0.1050736, instance.p(0.001), 1E-7);
assertEquals(0.1057951, instance.p(0.01), 1E-7);
assertEquals(0.1131803, instance.p(0.1), 1E-7);
assertEquals(0.1217293, instance.p(0.2), 1E-7);
assertEquals(0.1491465, instance.p(0.5), 1E-7);
assertEquals(0.1966119, instance.p(1.0), 1E-7);
assertEquals(0.25, instance.p(2.0), 1E-7);
assertEquals(0.04517666, instance.p(5.0), 1E-7);
assertEquals(0.0003352377, instance.p(10.0), 1E-7);
}
|
@SuppressWarnings("WeakerAccess")
public Map<String, Object> getMainConsumerConfigs(final String groupId, final String clientId, final int threadIdx) {
final Map<String, Object> consumerProps = getCommonConsumerConfigs();
// Get main consumer override configs
final Map<String, Object> mainConsumerProps = originalsWithPrefix(MAIN_CONSUMER_PREFIX);
consumerProps.putAll(mainConsumerProps);
// this is a hack to work around StreamsConfig constructor inside StreamsPartitionAssignor to avoid casting
consumerProps.put(APPLICATION_ID_CONFIG, groupId);
// add group id, client id with stream client id prefix, and group instance id
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
final String groupInstanceId = (String) consumerProps.get(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG);
// Suffix each thread consumer with thread.id to enforce uniqueness of group.instance.id.
if (groupInstanceId != null) {
consumerProps.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId + "-" + threadIdx);
}
// add configs required for stream partition assignor
consumerProps.put(UPGRADE_FROM_CONFIG, getString(UPGRADE_FROM_CONFIG));
consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG));
consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG));
consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG));
consumerProps.put(ACCEPTABLE_RECOVERY_LAG_CONFIG, getLong(ACCEPTABLE_RECOVERY_LAG_CONFIG));
consumerProps.put(MAX_WARMUP_REPLICAS_CONFIG, getInt(MAX_WARMUP_REPLICAS_CONFIG));
consumerProps.put(PROBING_REBALANCE_INTERVAL_MS_CONFIG, getLong(PROBING_REBALANCE_INTERVAL_MS_CONFIG));
consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamsPartitionAssignor.class.getName());
consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_NON_OVERLAP_COST_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG, getString(RACK_AWARE_ASSIGNMENT_STRATEGY_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, getList(RACK_AWARE_ASSIGNMENT_TAGS_CONFIG));
consumerProps.put(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG, getInt(RACK_AWARE_ASSIGNMENT_TRAFFIC_COST_CONFIG));
consumerProps.put(TASK_ASSIGNOR_CLASS_CONFIG, getString(TASK_ASSIGNOR_CLASS_CONFIG));
// disable auto topic creation
consumerProps.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false");
// verify that producer batch config is no larger than segment size, then add topic configs required for creating topics
final Map<String, Object> topicProps = originalsWithPrefix(TOPIC_PREFIX, false);
final Map<String, Object> producerProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames());
if (topicProps.containsKey(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)) &&
producerProps.containsKey(ProducerConfig.BATCH_SIZE_CONFIG)) {
final int segmentSize = Integer.parseInt(topicProps.get(topicPrefix(TopicConfig.SEGMENT_BYTES_CONFIG)).toString());
final int batchSize = Integer.parseInt(producerProps.get(ProducerConfig.BATCH_SIZE_CONFIG).toString());
if (segmentSize < batchSize) {
throw new IllegalArgumentException(String.format("Specified topic segment size %d is is smaller than the configured producer batch size %d, this will cause produced batch not able to be appended to the topic",
segmentSize,
batchSize));
}
}
consumerProps.putAll(topicProps);
return consumerProps;
}
|
@Test
public void shouldAllowSettingConsumerIsolationLevelIfEosDisabled() {
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, READ_UNCOMMITTED.toString());
final StreamsConfig streamsConfig = new StreamsConfig(props);
final Map<String, Object> consumerConfigs = streamsConfig.getMainConsumerConfigs(groupId, clientId, threadIdx);
assertThat(
consumerConfigs.get(ConsumerConfig.ISOLATION_LEVEL_CONFIG),
equalTo(READ_UNCOMMITTED.toString())
);
}
|
public static boolean isAllInventoryTasksFinished(final Collection<PipelineTask> inventoryTasks) {
if (inventoryTasks.isEmpty()) {
log.warn("inventoryTasks is empty");
}
return inventoryTasks.stream().allMatch(each -> each.getTaskProgress().getPosition() instanceof IngestFinishedPosition);
}
|
@Test
void assertAllInventoryTasksAreFinishedWhenNotAllTasksAreFinished() {
AtomicReference<IngestPosition> finishedPosition = new AtomicReference<>(new IngestFinishedPosition());
AtomicReference<IngestPosition> unfinishedPosition = new AtomicReference<>(new IngestPlaceholderPosition());
InventoryTask actualTask1 = new InventoryTask("foo_id_1", mock(ExecuteEngine.class), mock(ExecuteEngine.class), mock(Dumper.class), mock(Importer.class), finishedPosition);
InventoryTask actualTask2 = new InventoryTask("foo_id_2", mock(ExecuteEngine.class), mock(ExecuteEngine.class), mock(Dumper.class), mock(Importer.class), unfinishedPosition);
assertFalse(PipelineJobProgressDetector.isAllInventoryTasksFinished(Arrays.asList(actualTask1, actualTask2)));
}
|
public static void optimize(Pipeline pipeline) {
// Compute which Schema fields are (or conversely, are not) accessed in a pipeline.
FieldAccessVisitor fieldAccessVisitor = new FieldAccessVisitor();
pipeline.traverseTopologically(fieldAccessVisitor);
// Find transforms in this pipeline which both: 1. support projection pushdown and 2. output
// unused fields.
ProjectionProducerVisitor pushdownProjectorVisitor =
new ProjectionProducerVisitor(fieldAccessVisitor.getPCollectionFieldAccess());
pipeline.traverseTopologically(pushdownProjectorVisitor);
Map<ProjectionProducer<PTransform<?, ?>>, Map<PCollection<?>, FieldAccessDescriptor>>
pushdownOpportunities = pushdownProjectorVisitor.getPushdownOpportunities();
// Translate target PCollections to their output TupleTags.
PCollectionOutputTagVisitor outputTagVisitor =
new PCollectionOutputTagVisitor(pushdownOpportunities);
pipeline.traverseTopologically(outputTagVisitor);
Map<ProjectionProducer<PTransform<?, ?>>, Map<TupleTag<?>, FieldAccessDescriptor>>
taggedFieldAccess = outputTagVisitor.getTaggedFieldAccess();
// For each eligible transform, replace it with a modified transform that omits the unused
// fields.
for (Entry<ProjectionProducer<PTransform<?, ?>>, Map<TupleTag<?>, FieldAccessDescriptor>>
entry : taggedFieldAccess.entrySet()) {
for (Entry<TupleTag<?>, FieldAccessDescriptor> outputFields : entry.getValue().entrySet()) {
LOG.info(
"Optimizing transform {}: output {} will contain reduced field set {}",
entry.getKey(),
outputFields.getKey(),
outputFields.getValue().fieldNamesAccessed());
}
PTransformMatcher matcher = application -> application.getTransform() == entry.getKey();
PushdownOverrideFactory<?, ?> overrideFactory =
new PushdownOverrideFactory<>(entry.getValue());
pipeline.replaceAll(ImmutableList.of(PTransformOverride.of(matcher, overrideFactory)));
}
}
|
@Test
public void testSourceDoesNotImplementPushdownProjector() {
Pipeline p = Pipeline.create();
SimpleSource source =
new SimpleSource(FieldAccessDescriptor.withFieldNames("foo", "bar", "baz"));
p.apply(source)
.apply(new FieldAccessTransform(FieldAccessDescriptor.withFieldNames("foo", "bar")));
ProjectionPushdownOptimizer.optimize(p);
Assert.assertTrue(pipelineHasTransform(p, source));
}
|
public static Boolean getBoolean(Map<String, Object> map, String name) {
return getValue(map, name, Boolean.class, "a boolean");
}
|
@Test
public void testGetBooleanParameter() throws Exception {
Map<String, Object> o = makeCloudDictionary();
Assert.assertTrue(getBoolean(o, "singletonBooleanKey", false));
Assert.assertFalse(getBoolean(o, "missingKey", false));
try {
getBoolean(o, "emptyKey", false);
Assert.fail("should have thrown an exception");
} catch (Exception exn) {
assertThat(exn.toString(), Matchers.containsString("not a boolean"));
}
}
|
public static Properties getProperties(File file) throws AnalysisException {
try (BufferedReader utf8Reader = Files.newBufferedReader(file.toPath(), StandardCharsets.UTF_8)) {
return getProperties(utf8Reader);
} catch (IOException | IllegalArgumentException e) {
throw new AnalysisException("Error parsing PyPA core-metadata file", e);
}
}
|
@Test
public void getProperties_should_support_colon_in_headerValue() throws IOException {
String payload = "Metadata-Version: 2.2\r\n"
+ "Description: My value contains a : colon\r\n";
Properties props = PyPACoreMetadataParser.getProperties(new BufferedReader(new StringReader(payload)));
Assert.assertEquals("2.2", props.getProperty("Metadata-Version"));
Assert.assertEquals("My value contains a : colon", props.getProperty("Description"));
}
|
public static AggregationUnit create(final AggregationType type, final boolean isDistinct) {
switch (type) {
case MAX:
return new ComparableAggregationUnit(false);
case MIN:
return new ComparableAggregationUnit(true);
case SUM:
return isDistinct ? new DistinctSumAggregationUnit() : new AccumulationAggregationUnit();
case COUNT:
return isDistinct ? new DistinctCountAggregationUnit() : new AccumulationAggregationUnit();
case AVG:
return isDistinct ? new DistinctAverageAggregationUnit() : new AverageAggregationUnit();
case BIT_XOR:
return new BitXorAggregationUnit();
default:
throw new UnsupportedSQLOperationException(type.name());
}
}
|
@Test
void assertCreateDistinctCountAggregationUnit() {
assertThat(AggregationUnitFactory.create(AggregationType.COUNT, true), instanceOf(DistinctCountAggregationUnit.class));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.