focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
public boolean contains(final Object value)
{
return contains((int)value);
}
|
@Test
void initiallyContainsNoElements()
{
for (int i = 0; i < 10_000; i++)
{
assertFalse(testSet.contains(i));
}
}
|
@DELETE
@Path("{id}")
@Timed
@ApiOperation(value = "Delete index set")
@AuditEvent(type = AuditEventTypes.INDEX_SET_DELETE)
@ApiResponses(value = {
@ApiResponse(code = 403, message = "Unauthorized"),
@ApiResponse(code = 404, message = "Index set not found"),
})
public void delete(@ApiParam(name = "id", required = true)
@PathParam("id") String id,
@ApiParam(name = "delete_indices")
@QueryParam("delete_indices") @DefaultValue("true") boolean deleteIndices) {
checkPermission(RestPermissions.INDEXSETS_DELETE, id);
final IndexSet indexSet = getIndexSet(indexSetRegistry, id);
final IndexSet defaultIndexSet = indexSetRegistry.getDefault();
if (indexSet.equals(defaultIndexSet)) {
throw new BadRequestException("Default index set <" + indexSet.getConfig().id() + "> cannot be deleted!");
}
if (indexSetService.delete(id) == 0) {
throw new NotFoundException("Couldn't delete index set with ID <" + id + ">");
} else {
if (deleteIndices) {
try {
systemJobManager.submit(indexSetCleanupJobFactory.create(indexSet));
} catch (SystemJobConcurrencyException e) {
LOG.error("Error running system job", e);
}
}
}
}
|
@Test
public void deleteDenied() {
notPermitted();
expectedException.expect(ForbiddenException.class);
expectedException.expectMessage("Not authorized to access resource id <id>");
try {
indexSetsResource.delete("id", false);
} finally {
verifyNoMoreInteractions(indexSetService);
}
}
|
String buildCustomMessage(EventNotificationContext ctx, TeamsEventNotificationConfig config, String template) throws PermanentEventNotificationException {
final List<MessageSummary> backlog = getMessageBacklog(ctx, config);
Map<String, Object> model = getCustomMessageModel(ctx, config.type(), backlog, config.timeZone());
try {
return templateEngine.transform(template, model);
} catch (Exception e) {
String error = "Invalid Custom Message template.";
LOG.error("{} [{}]", error, e.toString());
throw new PermanentEventNotificationException(error + e, e.getCause());
}
}
|
@Test(expected = PermanentEventNotificationException.class)
public void buildCustomMessageWithInvalidTemplate() throws EventNotificationException {
teamsEventNotificationConfig = buildInvalidTemplate();
teamsEventNotification.buildCustomMessage(eventNotificationContext, teamsEventNotificationConfig, "Title: ${does't exist}");
}
|
public static <T> Object create(Class<T> iface, T implementation,
RetryPolicy retryPolicy) {
return RetryProxy.create(iface,
new DefaultFailoverProxyProvider<T>(iface, implementation),
retryPolicy);
}
|
@Test
public void testRetryByException() throws UnreliableException {
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
UnreliableInterface unreliable = (UnreliableInterface)
RetryProxy.create(UnreliableInterface.class, unreliableImpl,
retryByException(RETRY_FOREVER, exceptionToPolicyMap));
unreliable.failsOnceThenSucceeds();
try {
unreliable.alwaysFailsWithFatalException();
fail("Should fail");
} catch (FatalException e) {
// expected
}
}
|
@Override
public void monitor(RedisServer master) {
connection.sync(RedisCommands.SENTINEL_MONITOR, master.getName(), master.getHost(),
master.getPort().intValue(), master.getQuorum().intValue());
}
|
@Test
public void testMonitor() {
Collection<RedisServer> masters = connection.masters();
RedisServer master = masters.iterator().next();
master.setName(master.getName() + ":");
connection.monitor(master);
}
|
public boolean hasLogicTable(final String logicTable) {
return shardingTables.containsKey(logicTable);
}
|
@Test
void assertHasLogicTable() {
assertTrue(createBindingTableRule().hasLogicTable("Logic_Table"));
}
|
@Override
public Object deserialize(Asn1ObjectInputStream in, Class<? extends Object> type, Asn1ObjectMapper mapper) {
final Asn1Entity entity = type.getAnnotation(Asn1Entity.class);
final Fields fields = new FieldSet(entity.partial(), mapper.getFields(type));
return readFields(mapper, in, fields, ObjectUtils.newInstance(type));
}
|
@Test
public void shouldDeserializeWithOptional() {
assertEquals(new Set(1, 2, 3), deserialize(
new SetConverter(), Set.class, new byte[] {
(byte) 0x81, 1, 0x01, (byte) 0x82, 1, 0x02, (byte) 0x83, 1, 0x03
}
));
}
|
public List<MessageQueue> fetchPublishMessageQueues(String topic) throws MQClientException {
try {
TopicRouteData topicRouteData = this.mQClientFactory.getMQClientAPIImpl().getTopicRouteInfoFromNameServer(topic, timeoutMillis);
if (topicRouteData != null) {
TopicPublishInfo topicPublishInfo = MQClientInstance.topicRouteData2TopicPublishInfo(topic, topicRouteData);
if (topicPublishInfo != null && topicPublishInfo.ok()) {
return parsePublishMessageQueues(topicPublishInfo.getMessageQueueList());
}
}
} catch (Exception e) {
throw new MQClientException("Can not find Message Queue for this topic, " + topic, e);
}
throw new MQClientException("Unknow why, Can not find Message Queue for this topic, " + topic, null);
}
|
@Test
public void assertFetchPublishMessageQueues() throws MQClientException {
List<MessageQueue> queueList = mqAdminImpl.fetchPublishMessageQueues(defaultTopic);
assertNotNull(queueList);
assertEquals(6, queueList.size());
for (MessageQueue each : queueList) {
assertEquals(defaultTopic, each.getTopic());
assertEquals(defaultBroker, each.getBrokerName());
}
}
|
@Override
public ImportResult importItem(UUID jobId,
IdempotentImportExecutor idempotentExecutor,
TokensAndUrlAuthData authData, ContactsModelWrapper data) throws Exception{
JCardReader reader = new JCardReader(data.getVCards());
try {
// TODO(olsona): address any other problems that might arise in conversion
List<VCard> vCardList = reader.readAll();
PeopleService.People peopleService = getOrCreatePeopleService(authData).people();
for (VCard vCard : vCardList) {
Person person = convert(vCard);
idempotentExecutor.executeAndSwallowIOExceptions(
vCard.toString(),
vCard.getFormattedName().toString(),
() -> peopleService.createContact(person).execute().toPrettyString());
}
return ImportResult.OK;
} catch (IOException e) {
return new ImportResult(e);
}
}
|
@Test
public void importFirstResources() throws Exception {
// Set up: small number of VCards to be imported
int numberOfVCards = 5;
List<VCard> vCardList = new LinkedList<>();
for (int i = 0; i < numberOfVCards; i++) {
StructuredName structuredName = new StructuredName();
structuredName.setFamily("Family" + i);
structuredName.setParameter(SOURCE_PARAM_NAME_TYPE, CONTACT_SOURCE_TYPE);
VCard vCard = new VCard();
vCard.setStructuredName(structuredName);
vCard.setFormattedName("First " + structuredName.getFamily());
vCardList.add(vCard);
}
String vCardString = GoogleContactsExporter.makeVCardString(vCardList);
ContactsModelWrapper wrapper = new ContactsModelWrapper(vCardString);
// Run test
contactsService.importItem(UUID.randomUUID(), executor,null, wrapper);
// Check that the right methods were called
verify(people, times(numberOfVCards)).createContact(any(Person.class));
verify(createContact, times(numberOfVCards)).execute();
}
|
@Override
public boolean equals(Object obj) {
if (obj instanceof SqlDaySecondInterval) {
SqlDaySecondInterval other = ((SqlDaySecondInterval) obj);
return millis == other.millis;
}
return false;
}
|
@Test
public void testEquals() {
SqlDaySecondInterval value = new SqlDaySecondInterval(1);
checkEquals(value, new SqlDaySecondInterval(1), true);
checkEquals(value, new SqlDaySecondInterval(2), false);
}
|
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "lookupConstraints is ImmutableList")
public List<LookupConstraint> getLookupConstraints() {
return lookupConstraints;
}
|
@Test
public void shouldExtractKeyFromNonEqualComparison() {
// Given:
final Expression expression = new ComparisonExpression(
Type.GREATER_THAN,
new UnqualifiedColumnReferenceExp(ColumnName.of("K")),
new IntegerLiteral(1)
);
final QueryFilterNode filterNode = new QueryFilterNode(
NODE_ID,
source,
expression,
metaStore,
ksqlConfig,
false,
plannerOptions
);
// When:
final List<LookupConstraint> keys = filterNode.getLookupConstraints();
// Then:
assertThat(keys.size(), is(1));
assertThat(keys.get(0), is(instanceOf(NonKeyConstraint.class)));
}
|
@Override
public Map<String, Node> allActive() {
return transformMap(delegate.allActive());
}
|
@Test
public void testAllActive() throws NodeNotFoundException {
assertThat(nodeService.allActive().keySet()).isEmpty();
nodeService.registerServer(nodeId.getNodeId(), true, TRANSPORT_URI, LOCAL_CANONICAL_HOSTNAME);
assertThat(nodeService.allActive().keySet()).containsExactly(nodeId.getNodeId());
}
|
@Override
public PageData<WidgetsBundle> findAllTenantWidgetsBundlesByTenantId(WidgetsBundleFilter widgetsBundleFilter, PageLink pageLink) {
return findTenantWidgetsBundlesByTenantIds(Arrays.asList(widgetsBundleFilter.getTenantId().getId(), NULL_UUID), widgetsBundleFilter, pageLink);
}
|
@Test
public void testTagsSearchInFindAllWidgetsBundlesByTenantId() {
for (var entry : SHOULD_FIND_SEARCH_TO_TAGS_MAP.entrySet()) {
String searchText = entry.getKey();
String[] tags = entry.getValue();
WidgetsBundle systemWidgetBundle = createSystemWidgetBundle("Test Widget Bundle Alias", "Test Widget Bundle Title");
WidgetType widgetType = createAndSaveWidgetType(TenantId.SYS_TENANT_ID, 0, "Test Widget Type Name", "Test Widget Type Description", tags);
widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(systemWidgetBundle.getId(), widgetType.getId(), 0));
PageData<WidgetsBundle> widgetTypes = widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(
WidgetsBundleFilter.fullSearchFromTenantId(TenantId.SYS_TENANT_ID), new PageLink(10, 0, searchText)
);
assertThat(widgetTypes.getData()).hasSize(1);
assertThat(widgetTypes.getData().get(0).getId()).isEqualTo(systemWidgetBundle.getId());
widgetTypeDao.removeWidgetTypeFromWidgetsBundle(systemWidgetBundle.getUuidId(), widgetType.getUuidId());
widgetTypeDao.removeById(TenantId.SYS_TENANT_ID, widgetType.getUuidId());
widgetsBundleDao.removeById(TenantId.SYS_TENANT_ID, systemWidgetBundle.getUuidId());
}
for (var entry : SHOULDNT_FIND_SEARCH_TO_TAGS_MAP.entrySet()) {
String searchText = entry.getKey();
String[] tags = entry.getValue();
WidgetsBundle systemWidgetBundle = createSystemWidgetBundle("Test Widget Bundle Alias", "Test Widget Bundle Title");
WidgetType widgetType = createAndSaveWidgetType(TenantId.SYS_TENANT_ID, 0, "Test Widget Type Name", "Test Widget Type Description", tags);
widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(systemWidgetBundle.getId(), widgetType.getId(), 0));
PageData<WidgetsBundle> widgetTypes = widgetsBundleDao.findAllTenantWidgetsBundlesByTenantId(
WidgetsBundleFilter.fullSearchFromTenantId(TenantId.SYS_TENANT_ID), new PageLink(10, 0, searchText)
);
assertThat(widgetTypes.getData()).hasSize(0);
widgetTypeDao.removeWidgetTypeFromWidgetsBundle(systemWidgetBundle.getUuidId(), widgetType.getUuidId());
widgetTypeDao.removeById(TenantId.SYS_TENANT_ID, widgetType.getUuidId());
widgetsBundleDao.removeById(TenantId.SYS_TENANT_ID, systemWidgetBundle.getUuidId());
}
widgetsBundles = new ArrayList<>();
widgetTypeList = new ArrayList<>();
}
|
static final String[] getPrincipalNames(String keytabFileName) throws IOException {
Keytab keytab = Keytab.loadKeytab(new File(keytabFileName));
Set<String> principals = new HashSet<>();
List<PrincipalName> entries = keytab.getPrincipals();
for (PrincipalName entry : entries) {
principals.add(entry.getName().replace("\\", "/"));
}
return principals.toArray(new String[0]);
}
|
@Test
public void testGetPrincipalNamesFromKeytab() throws IOException {
createKeyTab(testKeytab, testPrincipals);
// read all principals in the keytab file
String[] principals = KerberosUtil.getPrincipalNames(testKeytab);
Assert.assertNotNull("principals cannot be null", principals);
int expectedSize = 0;
List<String> principalList = Arrays.asList(principals);
for (String principal : testPrincipals) {
Assert.assertTrue("missing principal "+principal,
principalList.contains(principal));
expectedSize++;
}
Assert.assertEquals(expectedSize, principals.length);
}
|
public static String toString(Object obj) {
return String.valueOf(obj);
}
|
@Test
public void upperFirstTest() {
final StringBuilder sb = new StringBuilder("KEY");
final String s = StrUtil.upperFirst(sb);
assertEquals(s, sb.toString());
}
|
public static IpAddress valueOf(int value) {
byte[] bytes =
ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array();
return new IpAddress(Version.INET, bytes);
}
|
@Test(expected = IllegalArgumentException.class)
public void testInvalidValueOfArrayInvalidOffsetIPv6() {
IpAddress ipAddress;
byte[] value;
value = new byte[] {11, 22, 33, // Preamble
0x11, 0x11, 0x22, 0x22,
0x33, 0x33, 0x44, 0x44,
0x55, 0x55, 0x66, 0x66,
0x77, 0x77,
(byte) 0x88, (byte) 0x88,
44, 55}; // Extra bytes
ipAddress = IpAddress.valueOf(IpAddress.Version.INET6, value, 6);
}
|
public boolean submitProcessingErrors(Message message) {
return submitProcessingErrorsInternal(message, message.processingErrors());
}
|
@Test
public void submitProcessingErrors_nothingSubmittedAndMessageNotFilteredOut_ifSubmissionEnabledAndDuplicatesAreKeptAndMessageDoesntSupportFailureHandling() throws Exception {
// given
final Message msg = Mockito.mock(Message.class);
when(msg.getMessageId()).thenReturn("msg-x");
when(msg.supportsFailureHandling()).thenReturn(false);
when(msg.processingErrors()).thenReturn(List.of(
new Message.ProcessingError(() -> "Cause 1", "Message 1", "Details 1"),
new Message.ProcessingError(() -> "Cause 2", "Message 2", "Details 2")
));
when(failureHandlingConfiguration.submitProcessingFailures()).thenReturn(true);
when(failureHandlingConfiguration.keepFailedMessageDuplicate()).thenReturn(true);
// when
final boolean notFilterOut = underTest.submitProcessingErrors(msg);
// then
assertThat(notFilterOut).isTrue();
verifyNoInteractions(failureSubmissionQueue);
}
|
@Override
public synchronized void editSchedule() {
updateConfigIfNeeded();
long startTs = clock.getTime();
CSQueue root = scheduler.getRootQueue();
Resource clusterResources = Resources.clone(scheduler.getClusterResource());
containerBasedPreemptOrKill(root, clusterResources);
if (LOG.isDebugEnabled()) {
LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms.");
}
}
|
@Test
public void testZeroGuar() {
int[][] qData = new int[][] {
// / A B C D E F
{ 200, 100, 0, 99, 100, 10, 90 }, // abs
{ 200, 200, 200, 200, 200, 200, 200 }, // maxCap
{ 170, 80, 60, 20, 90, 90, 0 }, // used
{ 10, 0, 0, 0, 10, 0, 10 }, // pending
{ 0, 0, 0, 0, 0, 0, 0 }, // reserved
{ 4, 2, 1, 1, 2, 1, 1 }, // apps
{ -1, -1, 1, 1, -1, 1, 1 }, // req granularity
{ 2, 2, 0, 0, 2, 0, 0 }, // subqueues
};
ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
policy.editSchedule();
// verify capacity taken from A1, not B1 despite B1 being far over
// its absolute guaranteed capacity
verify(mDisp, never()).handle(argThat(new IsPreemptionRequestFor(appA)));
}
|
@Around(CLIENT_INTERFACE_PUBLISH_CONFIG_RPC)
Object publishConfigAroundRpc(ProceedingJoinPoint pjp, ConfigPublishRequest request, RequestMeta meta)
throws Throwable {
final ConfigChangePointCutTypes configChangePointCutType = ConfigChangePointCutTypes.PUBLISH_BY_RPC;
final List<ConfigChangePluginService> pluginServices = getPluginServices(
configChangePointCutType);
// didn't enabled or add relative plugin
if (pluginServices.isEmpty()) {
return pjp.proceed();
}
ConfigChangeRequest configChangeRequest = new ConfigChangeRequest(configChangePointCutType);
configChangeRequest.setArg("dataId", request.getDataId());
configChangeRequest.setArg("group", request.getGroup());
configChangeRequest.setArg("tenant", request.getTenant());
configChangeRequest.setArg("content", request.getContent());
configChangeRequest.setArg("type", request.getAdditionParam("type"));
configChangeRequest.setArg("tag", request.getAdditionParam("tag"));
configChangeRequest.setArg("configTags", request.getAdditionParam("config_tags"));
configChangeRequest.setArg("desc", request.getAdditionParam("desc"));
configChangeRequest.setArg("effect", request.getAdditionParam("effect"));
configChangeRequest.setArg("appName", request.getAdditionParam("appName"));
configChangeRequest.setArg("srcIp", meta.getClientIp());
configChangeRequest.setArg("requestIpApp", request.getAdditionParam("requestIpApp"));
configChangeRequest.setArg("srcUser", request.getAdditionParam("src_user"));
configChangeRequest.setArg("use", request.getAdditionParam("use"));
return configChangeServiceHandle(pjp, pluginServices, configChangeRequest);
}
|
@Test
void testPublishConfigAroundRpc() throws Throwable {
Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_BEFORE_TYPE);
ProceedingJoinPoint proceedingJoinPoint = Mockito.mock(ProceedingJoinPoint.class);
ConfigPublishRequest request = new ConfigPublishRequest();
RequestMeta requestMeta = new RequestMeta();
ConfigPublishResponse configPublishResponse = ConfigPublishResponse.buildSuccessResponse();
Mockito.when(proceedingJoinPoint.proceed(any())).thenReturn(configPublishResponse);
//execute
Object o = configChangeAspect.publishConfigAroundRpc(proceedingJoinPoint, request, requestMeta);
//expect
Mockito.verify(configChangePluginService, Mockito.times(1))
.execute(any(ConfigChangeRequest.class), any(ConfigChangeResponse.class));
assertEquals(configPublishResponse, o);
}
|
private int addValueMeta( RowMetaInterface rowMeta, String fieldName ) {
ValueMetaInterface valueMeta = new ValueMetaString( fieldName );
valueMeta.setOrigin( getStepname() );
// add if doesn't exist
int index = -1;
if ( !rowMeta.exists( valueMeta ) ) {
index = rowMeta.size();
rowMeta.addValueMeta( valueMeta );
} else {
index = rowMeta.indexOfValue( fieldName );
}
return index;
}
|
@Test
public void readInputWithDefaultValues() throws Exception {
final String virtualFile = createVirtualFile( "pdi-14832.txt", "1,\n" );
TextFileInputMeta meta = new TextFileInputMeta();
TextFileInputField field2 = field( "col2" );
field2.setIfNullValue( "DEFAULT" );
meta.setInputFields( new TextFileInputField[] { field( "col1" ), field2 } );
meta.setFileCompression( "None" );
meta.setFileType( "CSV" );
meta.setHeader( false );
meta.setNrHeaderLines( -1 );
meta.setFooter( false );
meta.setNrFooterLines( -1 );
TextFileInputData data = new TextFileInputData();
data.setFiles( new FileInputList() );
data.getFiles().addFile( KettleVFS.getFileObject( virtualFile ) );
data.outputRowMeta = new RowMeta();
data.outputRowMeta.addValueMeta( new ValueMetaString( "col1" ) );
data.outputRowMeta.addValueMeta( new ValueMetaString( "col2" ) );
data.dataErrorLineHandler = Mockito.mock( FileErrorHandler.class );
data.fileFormatType = TextFileInputMeta.FILE_FORMAT_UNIX;
data.separator = ",";
data.filterProcessor = new TextFileFilterProcessor( new TextFileFilter[ 0 ] );
data.filePlayList = new FilePlayListAll();
TextFileInput input = StepMockUtil.getStep( TextFileInput.class, TextFileInputMeta.class, "test" );
List<Object[]> output = TransTestingUtil.execute( input, meta, data, 1, false );
TransTestingUtil.assertResult( new Object[] { "1", "DEFAULT" }, output.get( 0 ) );
deleteVfsFile( virtualFile );
}
|
@Override
public void onTaskFinished(TaskAttachment attachment) {
if (attachment instanceof BrokerPendingTaskAttachment) {
onPendingTaskFinished((BrokerPendingTaskAttachment) attachment);
} else if (attachment instanceof BrokerLoadingTaskAttachment) {
onLoadingTaskFinished((BrokerLoadingTaskAttachment) attachment);
}
}
|
@Test
public void testPendingTaskOnFinishedWithJobCancelled(@Injectable BrokerPendingTaskAttachment attachment) {
BrokerLoadJob brokerLoadJob = new BrokerLoadJob();
Deencapsulation.setField(brokerLoadJob, "state", JobState.CANCELLED);
brokerLoadJob.onTaskFinished(attachment);
Set<Long> finishedTaskIds = Deencapsulation.getField(brokerLoadJob, "finishedTaskIds");
Assert.assertEquals(0, finishedTaskIds.size());
}
|
private void grow(int minCapacity) {
int oldCapacity = elementData.length;
int newCapacity = oldCapacity + (oldCapacity >> 1);
if (newCapacity - minCapacity < 0) {
newCapacity = minCapacity;
}
elementData = Arrays.copyOf(elementData, newCapacity);
}
|
@Test
public void grow() {
// Default capacity is 10
IntArrayList list = new IntArrayList();
for (int i = 1; i <= 11; i++) {
list.add(i);
}
assertThat(list.trimAndGet()).hasSize(11);
}
|
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat(
String groupId,
String memberId,
int memberEpoch,
String instanceId,
String rackId,
int rebalanceTimeoutMs,
String clientId,
String clientHost,
List<String> subscribedTopicNames,
String assignorName,
List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions
) throws ApiException {
final long currentTimeMs = time.milliseconds();
final List<CoordinatorRecord> records = new ArrayList<>();
// Get or create the consumer group.
boolean createIfNotExists = memberEpoch == 0;
final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records);
throwIfConsumerGroupIsFull(group, memberId);
// Get or create the member.
if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString();
final ConsumerGroupMember member;
if (instanceId == null) {
member = getOrMaybeSubscribeDynamicConsumerGroupMember(
group,
memberId,
memberEpoch,
ownedTopicPartitions,
createIfNotExists,
false
);
} else {
member = getOrMaybeSubscribeStaticConsumerGroupMember(
group,
memberId,
memberEpoch,
instanceId,
ownedTopicPartitions,
createIfNotExists,
false,
records
);
}
// 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue
// record is written to the __consumer_offsets partition to persist the change. If the subscriptions have
// changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue
// record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have
// changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition.
ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member)
.maybeUpdateInstanceId(Optional.ofNullable(instanceId))
.maybeUpdateRackId(Optional.ofNullable(rackId))
.maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs))
.maybeUpdateServerAssignorName(Optional.ofNullable(assignorName))
.maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames))
.setClientId(clientId)
.setClientHost(clientHost)
.setClassicMemberMetadata(null)
.build();
boolean bumpGroupEpoch = hasMemberSubscriptionChanged(
groupId,
member,
updatedMember,
records
);
int groupEpoch = group.groupEpoch();
Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata();
Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames();
SubscriptionType subscriptionType = group.subscriptionType();
if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) {
// The subscription metadata is updated in two cases:
// 1) The member has updated its subscriptions;
// 2) The refresh deadline has been reached.
subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember);
subscriptionMetadata = group.computeSubscriptionMetadata(
subscribedTopicNamesMap,
metadataImage.topics(),
metadataImage.cluster()
);
int numMembers = group.numMembers();
if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) {
numMembers++;
}
subscriptionType = ModernGroup.subscriptionType(
subscribedTopicNamesMap,
numMembers
);
if (!subscriptionMetadata.equals(group.subscriptionMetadata())) {
log.info("[GroupId {}] Computed new subscription metadata: {}.",
groupId, subscriptionMetadata);
bumpGroupEpoch = true;
records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata));
}
if (bumpGroupEpoch) {
groupEpoch += 1;
records.add(newConsumerGroupEpochRecord(groupId, groupEpoch));
log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch);
metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME);
}
group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch);
}
// 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between
// the existing and the new target assignment is persisted to the partition.
final int targetAssignmentEpoch;
final Assignment targetAssignment;
if (groupEpoch > group.assignmentEpoch()) {
targetAssignment = updateTargetAssignment(
group,
groupEpoch,
member,
updatedMember,
subscriptionMetadata,
subscriptionType,
records
);
targetAssignmentEpoch = groupEpoch;
} else {
targetAssignmentEpoch = group.assignmentEpoch();
targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId());
}
// 3. Reconcile the member's assignment with the target assignment if the member is not
// fully reconciled yet.
updatedMember = maybeReconcile(
groupId,
updatedMember,
group::currentPartitionEpoch,
targetAssignmentEpoch,
targetAssignment,
ownedTopicPartitions,
records
);
scheduleConsumerGroupSessionTimeout(groupId, memberId);
// Prepare the response.
ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData()
.setMemberId(updatedMember.memberId())
.setMemberEpoch(updatedMember.memberEpoch())
.setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId));
// The assignment is only provided in the following cases:
// 1. The member sent a full request. It does so when joining or rejoining the group with zero
// as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields
// (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request
// as those must be set in a full request.
// 2. The member's assignment has been updated.
boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null);
if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) {
response.setAssignment(createConsumerGroupResponseAssignment(updatedMember));
}
return new CoordinatorResult<>(records, response);
}
|
@Test
public void testNewMemberIsRejectedWithMaximumMembersIsReached() {
String groupId = "fooup";
// Use a static member id as it makes the test easier.
String memberId1 = Uuid.randomUuid().toString();
String memberId2 = Uuid.randomUuid().toString();
String memberId3 = Uuid.randomUuid().toString();
Uuid fooTopicId = Uuid.randomUuid();
String fooTopicName = "foo";
Uuid barTopicId = Uuid.randomUuid();
String barTopicName = "bar";
// Create a context with one consumer group containing two members.
MockPartitionAssignor assignor = new MockPartitionAssignor("range");
GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder()
.withConsumerGroupAssignors(Collections.singletonList(assignor))
.withMetadataImage(new MetadataImageBuilder()
.addTopic(fooTopicId, fooTopicName, 6)
.addTopic(barTopicId, barTopicName, 3)
.build())
.withConsumerGroupMaxSize(2)
.withConsumerGroup(new ConsumerGroupBuilder(groupId, 10)
.withMember(new ConsumerGroupMember.Builder(memberId1)
.setState(MemberState.STABLE)
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignorName("range")
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2),
mkTopicAssignment(barTopicId, 0, 1)))
.build())
.withMember(new ConsumerGroupMember.Builder(memberId2)
.setState(MemberState.STABLE)
.setMemberEpoch(10)
.setPreviousMemberEpoch(9)
.setClientId(DEFAULT_CLIENT_ID)
.setClientHost(DEFAULT_CLIENT_ADDRESS.toString())
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setServerAssignorName("range")
.setAssignedPartitions(mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4, 5),
mkTopicAssignment(barTopicId, 2)))
.build())
.withAssignment(memberId1, mkAssignment(
mkTopicAssignment(fooTopicId, 0, 1, 2),
mkTopicAssignment(barTopicId, 0, 1)))
.withAssignment(memberId2, mkAssignment(
mkTopicAssignment(fooTopicId, 3, 4, 5),
mkTopicAssignment(barTopicId, 2)))
.withAssignmentEpoch(10))
.build();
assertThrows(GroupMaxSizeReachedException.class, () ->
context.consumerGroupHeartbeat(
new ConsumerGroupHeartbeatRequestData()
.setGroupId(groupId)
.setMemberId(memberId3)
.setMemberEpoch(0)
.setServerAssignor("range")
.setRebalanceTimeoutMs(5000)
.setSubscribedTopicNames(Arrays.asList("foo", "bar"))
.setTopicPartitions(Collections.emptyList())));
}
|
@Override
public void deleteRewardActivity(Long id) {
// 校验存在
RewardActivityDO dbRewardActivity = validateRewardActivityExists(id);
if (!dbRewardActivity.getStatus().equals(PromotionActivityStatusEnum.CLOSE.getStatus())) { // 未关闭的活动,不能删除噢
throw exception(REWARD_ACTIVITY_DELETE_FAIL_STATUS_NOT_CLOSED);
}
// 删除
rewardActivityMapper.deleteById(id);
}
|
@Test
public void testDeleteRewardActivity_notExists() {
// 准备参数
Long id = randomLongId();
// 调用, 并断言异常
assertServiceException(() -> rewardActivityService.deleteRewardActivity(id), REWARD_ACTIVITY_NOT_EXISTS);
}
|
@Override
public Num calculate(BarSeries series, Position position) {
if (position == null || !position.isClosed()) {
return series.zero();
}
Returns returns = new Returns(series, position, Returns.ReturnType.LOG);
return calculateVaR(returns, confidence);
}
|
@Test
public void calculateWithNoBarsShouldReturn0() {
series = new MockBarSeries(numFunction, 100d, 95d, 100d, 80d, 85d, 70d);
AnalysisCriterion varCriterion = getCriterion();
assertNumEquals(numOf(0), varCriterion.calculate(series, new BaseTradingRecord()));
}
|
public void apply() {
if (applied) {
throw new IllegalStateException("can't apply twice");
}
applied = true;
PluginFileWriteRule writeRule = new PluginFileWriteRule(
props.nonNullValueAsFile(ProcessProperties.Property.PATH_HOME.getKey()).toPath(),
props.nonNullValueAsFile(ProcessProperties.Property.PATH_TEMP.getKey()).toPath());
PluginCeRule ceRule = new PluginCeRule();
pluginSecurityManager.restrictPlugins(writeRule, ceRule);
}
|
@Test
public void apply_calls_PluginSecurityManager() {
Properties properties = new Properties();
properties.setProperty(PATH_HOME.getKey(), "home");
properties.setProperty(PATH_TEMP.getKey(), "temp");
Props props = new Props(properties);
CeSecurityManager ceSecurityManager = new CeSecurityManager(pluginSecurityManager, props);
ceSecurityManager.apply();
verify(pluginSecurityManager).restrictPlugins(any(PluginFileWriteRule.class), any(PluginCeRule.class));
}
|
@Override
public Set<TopicPartition> getAllSubscribedPartitions(Consumer<?, ?> consumer) {
Set<TopicPartition> allPartitions = new HashSet<>();
for (String topic : topics) {
List<PartitionInfo> partitionInfoList = consumer.partitionsFor(topic);
if (partitionInfoList != null) {
for (PartitionInfo partitionInfo : partitionInfoList) {
allPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
}
} else {
LOG.warn("Topic {} not found, skipping addition of the topic", topic);
}
}
return allPartitions;
}
|
@Test
public void testFilter() {
String matchingTopicOne = "test-1";
String matchingTopicTwo = "test-11";
String unmatchedTopic = "unmatched";
NamedTopicFilter filter = new NamedTopicFilter(matchingTopicOne, matchingTopicTwo);
when(consumerMock.partitionsFor(matchingTopicOne)).thenReturn(Collections.singletonList(createPartitionInfo(matchingTopicOne, 0)));
List<PartitionInfo> partitionTwoPartitions = new ArrayList<>();
partitionTwoPartitions.add(createPartitionInfo(matchingTopicTwo, 0));
partitionTwoPartitions.add(createPartitionInfo(matchingTopicTwo, 1));
when(consumerMock.partitionsFor(matchingTopicTwo)).thenReturn(partitionTwoPartitions);
when(consumerMock.partitionsFor(unmatchedTopic)).thenReturn(Collections.singletonList(createPartitionInfo(unmatchedTopic, 0)));
Set<TopicPartition> matchedPartitions = filter.getAllSubscribedPartitions(consumerMock);
assertThat("Expected filter to pass only topics with exact name matches", matchedPartitions,
containsInAnyOrder(new TopicPartition(matchingTopicOne, 0), new TopicPartition(matchingTopicTwo, 0), new TopicPartition(matchingTopicTwo, 1)));
}
|
@Override
public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) {
return DatasourceConfiguration.isEmbeddedStorage() && EnvUtil.getStandaloneMode();
}
|
@Test
void testMatches() {
MockedStatic<DatasourceConfiguration> propertyUtilMockedStatic = Mockito.mockStatic(DatasourceConfiguration.class);
MockedStatic<EnvUtil> envUtilMockedStatic = Mockito.mockStatic(EnvUtil.class);
propertyUtilMockedStatic.when(DatasourceConfiguration::isEmbeddedStorage).thenReturn(true);
envUtilMockedStatic.when(EnvUtil::getStandaloneMode).thenReturn(true);
assertTrue(conditionStandaloneEmbedStorage.matches(context, metadata));
propertyUtilMockedStatic.when(DatasourceConfiguration::isEmbeddedStorage).thenReturn(true);
envUtilMockedStatic.when(EnvUtil::getStandaloneMode).thenReturn(false);
assertFalse(conditionStandaloneEmbedStorage.matches(context, metadata));
propertyUtilMockedStatic.when(DatasourceConfiguration::isEmbeddedStorage).thenReturn(false);
envUtilMockedStatic.when(EnvUtil::getStandaloneMode).thenReturn(true);
assertFalse(conditionStandaloneEmbedStorage.matches(context, metadata));
propertyUtilMockedStatic.when(DatasourceConfiguration::isEmbeddedStorage).thenReturn(false);
envUtilMockedStatic.when(EnvUtil::getStandaloneMode).thenReturn(false);
assertFalse(conditionStandaloneEmbedStorage.matches(context, metadata));
propertyUtilMockedStatic.close();
envUtilMockedStatic.close();
}
|
@Override
public ConfigDef config() {
return CONFIG_DEF;
}
|
@Test
public void testPatternMayNotBeEmptyInConfig() {
Map<String, String> props = new HashMap<>();
props.put("pattern", "");
ConfigException e = assertThrows(ConfigException.class, () -> config(props));
assertTrue(e.getMessage().contains("String must be non-empty"));
}
|
public static void main(String[] args) {
new App("No Danger", "Green Light");
}
|
@Test
void shouldExecuteApplicationWithoutException() {
assertDoesNotThrow(() -> App.main(new String[]{}));
}
|
@SuppressWarnings({"deprecation", "checkstyle:linelength"})
public void convertSiteProperties(Configuration conf,
Configuration yarnSiteConfig, boolean drfUsed,
boolean enableAsyncScheduler, boolean userPercentage,
FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) {
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER,
CapacityScheduler.class.getCanonicalName());
if (conf.getBoolean(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED,
FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
int interval = conf.getInt(
FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS,
FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS);
yarnSiteConfig.setInt(PREFIX +
"schedule-asynchronously.scheduling-interval-ms", interval);
}
// This should be always true to trigger cs auto
// refresh queue.
yarnSiteConfig.setBoolean(
YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION,
FairSchedulerConfiguration.DEFAULT_PREEMPTION)) {
preemptionEnabled = true;
String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy.
class.getCanonicalName(), yarnSiteConfig);
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
policies);
int waitTimeBeforeKill = conf.getInt(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL,
FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL);
yarnSiteConfig.setInt(
CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL,
waitTimeBeforeKill);
long waitBeforeNextStarvationCheck = conf.getLong(
FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS,
FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS);
yarnSiteConfig.setLong(
CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
waitBeforeNextStarvationCheck);
} else {
if (preemptionMode ==
FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) {
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, "");
}
}
// For auto created queue's auto deletion.
if (!userPercentage) {
String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy.
class.getCanonicalName(), yarnSiteConfig);
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
policies);
// Set the expired for deletion interval to 10s, consistent with fs.
yarnSiteConfig.setInt(CapacitySchedulerConfiguration.
AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10);
}
if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE,
FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true);
} else {
yarnSiteConfig.setBoolean(
CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false);
}
// Make auto cs conf refresh enabled.
yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
addMonitorPolicy(QueueConfigurationAutoRefreshPolicy
.class.getCanonicalName(), yarnSiteConfig));
int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN,
FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN);
if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) {
yarnSiteConfig.setInt(
CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT,
maxAssign);
}
float localityThresholdNode = conf.getFloat(
FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE,
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE);
if (localityThresholdNode !=
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) {
yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY,
localityThresholdNode);
}
float localityThresholdRack = conf.getFloat(
FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK,
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK);
if (localityThresholdRack !=
FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) {
yarnSiteConfig.setFloat(
CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY,
localityThresholdRack);
}
if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT,
FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) {
sizeBasedWeight = true;
}
if (drfUsed) {
yarnSiteConfig.set(
CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS,
DominantResourceCalculator.class.getCanonicalName());
}
if (enableAsyncScheduler) {
yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true);
}
}
|
@Test
public void testSiteDrfEnabledConversion() {
converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, true,
false, false, null);
assertEquals("Resource calculator type", DominantResourceCalculator.class,
yarnConvertedConfig.getClass(
CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, null));
}
|
public int minValue()
{
final int missingValue = this.missingValue;
int min = 0 == size ? missingValue : Integer.MAX_VALUE;
final int[] entries = this.entries;
@DoNotSub final int length = entries.length;
for (@DoNotSub int valueIndex = 1; valueIndex < length; valueIndex += 2)
{
final int value = entries[valueIndex];
if (missingValue != value)
{
min = Math.min(min, value);
}
}
return min;
}
|
@Test
void shouldHaveNoMinValueForEmptyCollection()
{
assertEquals(MISSING_VALUE, map.minValue());
}
|
@Override
public Stream<Pair<String, CompactionOperation>> getPendingLogCompactionOperations() {
return execute(preferredView::getPendingLogCompactionOperations, () -> getSecondaryView().getPendingLogCompactionOperations());
}
|
@Test
public void testGetPendingLogCompactionOperations() {
Stream<Pair<String, CompactionOperation>> actual;
Stream<Pair<String, CompactionOperation>> expected = Collections.singleton(
(Pair<String, CompactionOperation>) new ImmutablePair<>("test", new CompactionOperation()))
.stream();
when(primary.getPendingLogCompactionOperations()).thenReturn(expected);
actual = fsView.getPendingLogCompactionOperations();
assertEquals(expected, actual);
verify(secondaryViewSupplier, never()).get();
resetMocks();
when(secondaryViewSupplier.get()).thenReturn(secondary);
when(primary.getPendingLogCompactionOperations()).thenThrow(new RuntimeException());
when(secondary.getPendingLogCompactionOperations()).thenReturn(expected);
actual = fsView.getPendingLogCompactionOperations();
assertEquals(expected, actual);
resetMocks();
when(secondary.getPendingLogCompactionOperations()).thenReturn(expected);
actual = fsView.getPendingLogCompactionOperations();
assertEquals(expected, actual);
resetMocks();
when(secondary.getPendingLogCompactionOperations()).thenThrow(new RuntimeException());
assertThrows(RuntimeException.class, () -> {
fsView.getPendingLogCompactionOperations();
});
}
|
@Override
public Response filter(Request request, RequestMeta meta, Class handlerClazz) throws NacosException {
try {
Method method = getHandleMethod(handlerClazz);
if (method.isAnnotationPresent(Secured.class) && authConfigs.isAuthEnabled()) {
if (Loggers.AUTH.isDebugEnabled()) {
Loggers.AUTH.debug("auth start, request: {}", request.getClass().getSimpleName());
}
Secured secured = method.getAnnotation(Secured.class);
if (!protocolAuthService.enableAuth(secured)) {
return null;
}
String clientIp = meta.getClientIp();
request.putHeader(Constants.Identity.X_REAL_IP, clientIp);
Resource resource = protocolAuthService.parseResource(request, secured);
IdentityContext identityContext = protocolAuthService.parseIdentity(request);
boolean result = protocolAuthService.validateIdentity(identityContext, resource);
RequestContext requestContext = RequestContextHolder.getContext();
requestContext.getAuthContext().setIdentityContext(identityContext);
requestContext.getAuthContext().setResource(resource);
if (null == requestContext.getAuthContext().getAuthResult()) {
requestContext.getAuthContext().setAuthResult(result);
}
if (!result) {
// TODO Get reason of failure
throw new AccessException("Validate Identity failed.");
}
String action = secured.action().toString();
result = protocolAuthService.validateAuthority(identityContext, new Permission(resource, action));
if (!result) {
// TODO Get reason of failure
throw new AccessException("Validate Authority failed.");
}
}
} catch (AccessException e) {
if (Loggers.AUTH.isDebugEnabled()) {
Loggers.AUTH.debug("access denied, request: {}, reason: {}", request.getClass().getSimpleName(),
e.getErrMsg());
}
Response defaultResponseInstance = getDefaultResponseInstance(handlerClazz);
defaultResponseInstance.setErrorInfo(NacosException.NO_RIGHT, e.getErrMsg());
return defaultResponseInstance;
} catch (Exception e) {
Response defaultResponseInstance = getDefaultResponseInstance(handlerClazz);
defaultResponseInstance.setErrorInfo(NacosException.SERVER_ERROR, ExceptionUtil.getAllExceptionMsg(e));
return defaultResponseInstance;
}
return null;
}
|
@Test
void testFilter() {
Mockito.when(authConfigs.isAuthEnabled()).thenReturn(true);
Request healthCheckRequest = new HealthCheckRequest();
try {
Response healthCheckResponse = remoteRequestAuthFilter.filter(healthCheckRequest, new RequestMeta(), MockRequestHandler.class);
assertNull(healthCheckResponse);
} catch (NacosException e) {
e.printStackTrace();
fail(e.getMessage());
}
}
|
public RuntimeOptionsBuilder parse(Map<String, String> properties) {
return parse(properties::get);
}
|
@Test
void should_have_publish_plugin_disabled_by_default() {
RuntimeOptions options = cucumberPropertiesParser
.parse(properties)
.enablePublishPlugin()
.build();
assertThat(options.plugins(), empty());
}
|
@Override
public PrimitiveTypeEncoding<UTF8Buffer> getEncoding(UTF8Buffer value) {
return value.getLength() <= 255 ? smallBufferEncoding : largeBufferEncoding;
}
|
@Test
public void testGetEncodingForSmallUTF8Buffer() {
PrimitiveTypeEncoding<UTF8Buffer> encoding = utf8BufferEncoding.getEncoding(smallBuffer);
assertTrue(encoding instanceof UTF8BufferType.SmallUTF8BufferEncoding);
assertEquals(1, encoding.getConstructorSize());
assertEquals(smallBuffer.getLength() + Byte.BYTES, encoding.getValueSize(smallBuffer));
assertEquals(EncodingCodes.STR8, encoding.getEncodingCode());
assertFalse(encoding.encodesJavaPrimitive());
assertEquals(utf8BufferEncoding, encoding.getType());
}
|
List<Condition> run(boolean useKRaft) {
List<Condition> warnings = new ArrayList<>();
checkKafkaReplicationConfig(warnings);
checkKafkaBrokersStorage(warnings);
if (useKRaft) {
// Additional checks done for KRaft clusters
checkKRaftControllerStorage(warnings);
checkKRaftControllerCount(warnings);
checkKafkaMetadataVersion(warnings);
checkInterBrokerProtocolVersionInKRaft(warnings);
checkLogMessageFormatVersionInKRaft(warnings);
} else {
// Additional checks done for ZooKeeper-based clusters
checkKafkaLogMessageFormatVersion(warnings);
checkKafkaInterBrokerProtocolVersion(warnings);
checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings);
}
return warnings;
}
|
@Test
public void testMetadataVersionIsOlderThanDefaultKafkaVersion() {
Kafka kafka = new KafkaBuilder(KAFKA)
.editSpec()
.editKafka()
.withMetadataVersion(KafkaVersionTestUtils.PREVIOUS_METADATA_VERSION)
.endKafka()
.endSpec()
.build();
KafkaSpecChecker checker = generateChecker(kafka, List.of(CONTROLLERS, POOL_A), new KafkaVersionChange(VERSIONS.defaultVersion(), VERSIONS.defaultVersion(), null, null, KafkaVersionTestUtils.PREVIOUS_METADATA_VERSION));
List<Condition> warnings = checker.run(true);
assertThat(warnings, hasSize(1));
assertThat(warnings.get(0).getReason(), is("KafkaMetadataVersion"));
assertThat(warnings.get(0).getMessage(), is("Metadata version is older than the Kafka version used by the cluster, which suggests that an upgrade is incomplete."));
}
|
public static URL toURL(java.nio.file.Path path) throws MalformedURLException {
final String scheme = path.toUri().getScheme();
return new URL(scheme, null, -1, path.toString());
}
|
@Test
void testAbsolutePathToURL() throws MalformedURLException {
final java.nio.file.Path absolutePath = temporaryFolder.getRoot().toAbsolutePath();
final URL absoluteURL = FileUtils.toURL(absolutePath);
final java.nio.file.Path transformedURL = Paths.get(absoluteURL.getPath());
assertThat(transformedURL).isEqualTo(absolutePath);
}
|
public static Find find(String regex) {
return find(regex, 0);
}
|
@Test
@Category(NeedsRunner.class)
public void testFindNone() {
PCollection<String> output = p.apply(Create.of("a", "b", "c", "d")).apply(Regex.find("[xyz]"));
PAssert.that(output).empty();
p.run();
}
|
@Override
public Class<? extends SumLabeledStorageBuilder> builder() {
return SumLabeledStorageBuilder.class;
}
|
@Test
public void testBuilder() throws IllegalAccessException, InstantiationException {
function.accept(
MeterEntity.newService("service-test", Layer.GENERAL),
HTTP_CODE_COUNT_1
);
function.calculate();
StorageBuilder<SumLabeledFunction> storageBuilder = function.builder().newInstance();
final HashMapConverter.ToStorage toStorage = new HashMapConverter.ToStorage();
storageBuilder.entity2Storage(function, toStorage);
final Map<String, Object> map = toStorage.obtain();
map.put(VALUE, ((DataTable) map.get(VALUE)).toStorageData());
SumLabeledFunction function2 = storageBuilder.storage2Entity(new HashMapConverter.ToEntity(map));
Assertions.assertEquals(function, function2);
Assertions.assertEquals(function2.getValue(), function2.getValue());
}
|
public static NearCacheConfig copyWithInitializedDefaultMaxSizeForOnHeapMaps(NearCacheConfig nearCacheConfig) {
if (nearCacheConfig == null) {
return null;
}
EvictionConfig evictionConfig = nearCacheConfig.getEvictionConfig();
if (nearCacheConfig.getInMemoryFormat() == InMemoryFormat.NATIVE
|| evictionConfig.sizeConfigured) {
return nearCacheConfig;
}
// create copy of eviction config
EvictionConfig copyEvictionConfig = new EvictionConfig(evictionConfig)
.setSize(MapConfig.DEFAULT_MAX_SIZE);
// create copy of nearCache config and set eviction config
return new NearCacheConfig(nearCacheConfig)
.setEvictionConfig(copyEvictionConfig);
}
|
@Test
public void testCopyInitDefaultMaxSizeForOnHeapMaps_doesNotCopy_whenSizeIsConfigured() {
NearCacheConfig nearCacheConfig = new NearCacheConfig();
nearCacheConfig.setEvictionConfig(new EvictionConfig().setSize(10));
NearCacheConfig copy = NearCacheConfigAccessor.copyWithInitializedDefaultMaxSizeForOnHeapMaps(nearCacheConfig);
assertSame(nearCacheConfig, copy);
}
|
String getOwnerId(TbContext ctx) {
return "Tenant[" + ctx.getTenantId().getId() + "]RuleNode[" + ctx.getSelf().getId().getId() + "]";
}
|
@Test
public void verifyGetOwnerIdMethod() {
given(ctxMock.getTenantId()).willReturn(TENANT_ID);
given(ctxMock.getSelf()).willReturn(new RuleNode(RULE_NODE_ID));
String actualOwnerIdStr = mqttNode.getOwnerId(ctxMock);
String expectedOwnerIdStr = "Tenant[" + TENANT_ID.getId() + "]RuleNode[" + RULE_NODE_ID.getId() + "]";
assertThat(actualOwnerIdStr).isEqualTo(expectedOwnerIdStr);
}
|
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest req) {
var now = clock.instant();
var bearerToken = requestBearerToken(req).orElse(null);
if (bearerToken == null) {
log.fine("Missing bearer token");
return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized"));
}
var permission = Permission.getRequiredPermission(req).orElse(null);
if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
var requestTokenHash = requestTokenHash(bearerToken);
var clientIds = new TreeSet<String>();
var permissions = EnumSet.noneOf(Permission.class);
var matchedTokens = new HashSet<TokenVersion>();
for (Client c : allowedClients) {
if (!c.permissions().contains(permission)) continue;
var matchedToken = c.tokens().get(requestTokenHash);
if (matchedToken == null) continue;
var expiration = matchedToken.expiration().orElse(null);
if (expiration != null && now.isAfter(expiration)) continue;
matchedTokens.add(matchedToken);
clientIds.add(c.id());
permissions.addAll(c.permissions());
}
if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
if (matchedTokens.size() > 1) {
log.warning("Multiple tokens matched for request %s"
.formatted(matchedTokens.stream().map(TokenVersion::id).toList()));
return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
}
var matchedToken = matchedTokens.stream().findAny().get();
addAccessLogEntry(req, "token.id", matchedToken.id());
addAccessLogEntry(req, "token.hash", matchedToken.fingerprint().toDelimitedHexString());
addAccessLogEntry(req, "token.exp", matchedToken.expiration().map(Instant::toString).orElse("<none>"));
ClientPrincipal.attachToRequest(req, clientIds, permissions);
return Optional.empty();
}
|
@Test
void fails_for_token_with_invalid_permission() {
var req = FilterTestUtils.newRequestBuilder()
.withMethod(Method.GET)
.withHeader("Authorization", "Bearer " + WRITE_TOKEN.secretTokenString())
.build();
var responseHandler = new MockResponseHandler();
newFilterWithClientsConfig().filter(req, responseHandler);
assertNotNull(responseHandler.getResponse());
assertEquals(FORBIDDEN, responseHandler.getResponse().getStatus());
}
|
@Override
public boolean put(PageId pageId, ByteBuffer page, CacheContext cacheContext) {
LOG.debug("put({},{} bytes) enters", pageId, page.remaining());
if (mState.get() != READ_WRITE) {
Metrics.PUT_NOT_READY_ERRORS.inc();
Metrics.PUT_ERRORS.inc();
return false;
}
int originPosition = page.position();
if (!mOptions.isAsyncWriteEnabled()) {
boolean ok = putInternal(pageId, page, cacheContext);
LOG.debug("put({},{} bytes) exits: {}", pageId, page.position() - originPosition, ok);
if (!ok) {
Metrics.PUT_ERRORS.inc();
}
return ok;
}
if (!mPendingRequests.add(pageId)) { // already queued
return false;
}
try {
mAsyncCacheExecutor.get().submit(() -> {
try {
boolean ok = putInternal(pageId, page, cacheContext);
if (!ok) {
Metrics.PUT_ERRORS.inc();
}
} finally {
mPendingRequests.remove(pageId);
}
});
} catch (RejectedExecutionException e) { // queue is full, skip
// RejectedExecutionException may be thrown in extreme cases when the
// highly concurrent caching workloads. In these cases, return false
mPendingRequests.remove(pageId);
Metrics.PUT_ASYNC_REJECTION_ERRORS.inc();
Metrics.PUT_ERRORS.inc();
LOG.debug("put({},{} bytes) fails due to full queue", pageId,
page.position() - originPosition);
return false;
}
LOG.debug("put({},{} bytes) exits with async write", pageId, page.position() - originPosition);
return true;
}
|
@Test
public void highStorageOverheadPut() throws Exception {
// a store that is so inefficient to store any data
double highOverhead = CACHE_SIZE_BYTES / PAGE_SIZE_BYTES + 0.1;
mConf.set(PropertyKey.USER_CLIENT_CACHE_STORE_OVERHEAD, highOverhead);
mCacheManager = createLocalCacheManager();
assertFalse(mCacheManager.put(PAGE_ID1, PAGE1));
}
|
public static HttpHeaderDateFormat get() {
return dateFormatThreadLocal.get();
}
|
@Test
public void testFormat() {
HttpHeaderDateFormat format = HttpHeaderDateFormat.get();
final String formatted = format.format(DATE);
assertNotNull(formatted);
assertEquals("Sun, 06 Nov 1994 08:49:37 GMT", formatted);
}
|
public static List<Chunk> split(String s) {
int pos = s.indexOf(SLASH);
if (pos == -1) {
throw new RuntimeException("path did not start with or contain '/'");
}
List<Chunk> list = new ArrayList();
int startPos = 0;
int searchPos = 0;
boolean anyDepth = false;
while (pos != -1) {
if (pos == 0) {
startPos = 1;
searchPos = 1;
} else if (s.charAt(pos - 1) == '\\') {
s = s.substring(0, pos - 1) + s.substring(pos);
searchPos = pos;
} else {
String temp = s.substring(startPos, pos);
if (temp.isEmpty()) {
anyDepth = true;
} else {
list.add(new Chunk(anyDepth, temp));
anyDepth = false; // reset
}
startPos = pos + 1;
searchPos = startPos;
}
pos = s.indexOf(SLASH, searchPos);
}
if (startPos != s.length()) {
String temp = s.substring(startPos);
if (!temp.isEmpty()) {
list.add(new Chunk(anyDepth, temp));
}
}
return list;
}
|
@Test
void testOnlyName2() {
List<PathSearch.Chunk> list = PathSearch.split("//listitem/{Taxpayer Information}");
logger.debug("list: {}", list);
PathSearch.Chunk first = list.get(0);
assertTrue(first.anyDepth);
assertEquals("listitem", first.controlType);
assertNull(first.className);
assertNull(first.name);
assertEquals(-1, first.index);
PathSearch.Chunk second = list.get(1);
assertFalse(second.anyDepth);
assertEquals(null, second.controlType);
assertEquals(null, second.className);
assertEquals("Taxpayer Information", second.name);
assertNotNull(second.nameCondition);
}
|
public void addBasicProperty(String name, String strValue) {
if (strValue == null) {
return;
}
name = StringUtil.capitalizeFirstLetter(name);
Method adderMethod =aggregationAssessor.findAdderMethod(name);
if (adderMethod == null) {
addError("No adder for property [" + name + "].");
return;
}
Class<?>[] paramTypes = adderMethod.getParameterTypes();
isSanityCheckSuccessful(name, adderMethod, paramTypes, strValue);
Object arg;
try {
arg = StringToObjectConverter.convertArg(this, strValue, paramTypes[0]);
} catch (Throwable t) {
addError("Conversion to type [" + paramTypes[0] + "] failed. ", t);
return;
}
if (arg != null) {
invokeMethodWithSingleParameterOnThisObject(adderMethod, arg);
}
}
|
@Test
public void addValueOfTest() {
setter.addBasicProperty("fileSize", "1GB");
setter.addBasicProperty("fileSize", "10KB");
assertEquals(2, house.fileSizes.size());
assertEquals(FileSize.valueOf("1GB"), house.fileSizes.get(0));
assertEquals(FileSize.valueOf("10KB"), house.fileSizes.get(1));
}
|
public String defaultRemoteUrl() {
final String sanitizedUrl = sanitizeUrl();
try {
URI uri = new URI(sanitizedUrl);
if (uri.getUserInfo() != null) {
uri = new URI(uri.getScheme(), removePassword(uri.getUserInfo()), uri.getHost(), uri.getPort(), uri.getPath(), uri.getQuery(), uri.getFragment());
return uri.toString();
}
} catch (URISyntaxException e) {
return sanitizedUrl;
}
return sanitizedUrl;
}
|
@Test
void shouldReturnTheURLWhenNoCredentialsAreSpecified() {
assertThat(new HgUrlArgument("http://url##branch").defaultRemoteUrl(), is("http://url#branch"));
}
|
@Override
@Transactional(rollbackFor = Exception.class)
public void updateJob(JobSaveReqVO updateReqVO) throws SchedulerException {
validateCronExpression(updateReqVO.getCronExpression());
// 校验存在
JobDO job = validateJobExists(updateReqVO.getId());
// 只有开启状态,才可以修改.原因是,如果出暂停状态,修改 Quartz Job 时,会导致任务又开始执行
if (!job.getStatus().equals(JobStatusEnum.NORMAL.getStatus())) {
throw exception(JOB_UPDATE_ONLY_NORMAL_STATUS);
}
// 更新
JobDO updateObj = BeanUtils.toBean(updateReqVO, JobDO.class);
fillJobMonitorTimeoutEmpty(updateObj);
jobMapper.updateById(updateObj);
// 更新 Job 到 Quartz 中
schedulerManager.updateJob(job.getHandlerName(), updateReqVO.getHandlerParam(), updateReqVO.getCronExpression(),
updateReqVO.getRetryCount(), updateReqVO.getRetryInterval());
}
|
@Test
public void testUpdateJob_success() throws SchedulerException {
// mock 数据
JobDO job = randomPojo(JobDO.class, o -> o.setStatus(JobStatusEnum.NORMAL.getStatus()));
jobMapper.insert(job);
// 准备参数
JobSaveReqVO updateReqVO = randomPojo(JobSaveReqVO.class, o -> {
o.setId(job.getId());
o.setCronExpression("0 0/1 * * * ? *");
});
// 调用
jobService.updateJob(updateReqVO);
// 校验记录的属性是否正确
JobDO updateJob = jobMapper.selectById(updateReqVO.getId());
assertPojoEquals(updateReqVO, updateJob);
// 校验调用
verify(schedulerManager).updateJob(eq(job.getHandlerName()), eq(updateReqVO.getHandlerParam()),
eq(updateReqVO.getCronExpression()), eq(updateReqVO.getRetryCount()), eq(updateReqVO.getRetryInterval()));
}
|
@PublicAPI(usage = ACCESS)
public Md5sum getMd5sum() {
return md5sum;
}
|
@Test
public void compensates_error_on_md5_calculation() throws Exception {
Source source = newSource(new URI("bummer"));
assertThat(source.getMd5sum()).isEqualTo(Md5sum.UNDETERMINED);
}
|
@ScalarOperator(ADD)
@SqlType(StandardTypes.REAL)
public static long add(@SqlType(StandardTypes.REAL) long left, @SqlType(StandardTypes.REAL) long right)
{
return floatToRawIntBits(intBitsToFloat((int) left) + intBitsToFloat((int) right));
}
|
@Test
public void testAdd()
{
assertFunction("REAL'12.34' + REAL'56.78'", REAL, 12.34f + 56.78f);
assertFunction("REAL'-17.34' + REAL'-22.891'", REAL, -17.34f + -22.891f);
assertFunction("REAL'-89.123' + REAL'754.0'", REAL, -89.123f + 754.0f);
assertFunction("REAL'-0.0' + REAL'0.0'", REAL, -0.0f + 0.0f);
}
|
@Override
public PollResult poll(long currentTimeMs) {
return pollInternal(
prepareFetchRequests(),
this::handleFetchSuccess,
this::handleFetchFailure
);
}
|
@Test
public void testMultipleAbortMarkers() {
buildFetcher(OffsetResetStrategy.EARLIEST, new ByteArrayDeserializer(),
new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED);
ByteBuffer buffer = ByteBuffer.allocate(1024);
int currentOffset = 0;
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset,
new SimpleRecord(time.milliseconds(), "abort1-1".getBytes(), "value".getBytes()),
new SimpleRecord(time.milliseconds(), "abort1-2".getBytes(), "value".getBytes()));
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Duplicate abort -- should be ignored.
currentOffset += abortTransaction(buffer, 1L, currentOffset);
// Now commit a transaction.
currentOffset += appendTransactionalRecords(buffer, 1L, currentOffset,
new SimpleRecord(time.milliseconds(), "commit1-1".getBytes(), "value".getBytes()),
new SimpleRecord(time.milliseconds(), "commit1-2".getBytes(), "value".getBytes()));
commitTransaction(buffer, 1L, currentOffset);
buffer.flip();
List<FetchResponseData.AbortedTransaction> abortedTransactions = Collections.singletonList(
new FetchResponseData.AbortedTransaction().setProducerId(1).setFirstOffset(0)
);
MemoryRecords records = MemoryRecords.readableRecords(buffer);
assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fullFetchResponseWithAbortedTransactions(records, abortedTransactions, Errors.NONE, 100L, 100L, 0));
networkClientDelegate.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords();
assertTrue(fetchedRecords.containsKey(tp0));
assertEquals(fetchedRecords.get(tp0).size(), 2);
List<ConsumerRecord<byte[], byte[]>> fetchedConsumerRecords = fetchedRecords.get(tp0);
Set<String> expectedCommittedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2"));
Set<String> actuallyCommittedKeys = new HashSet<>();
for (ConsumerRecord<byte[], byte[]> consumerRecord : fetchedConsumerRecords) {
actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8));
}
assertEquals(expectedCommittedKeys, actuallyCommittedKeys);
}
|
public static BigDecimal toNanos(Timestamp timestamp) {
final BigDecimal secondsAsNanos =
BigDecimal.valueOf(timestamp.getSeconds()).subtract(MIN_SECONDS).scaleByPowerOfTen(9);
final BigDecimal nanos = BigDecimal.valueOf(timestamp.getNanos());
return secondsAsNanos.add(nanos);
}
|
@Test
public void testToNanosConvertTimestampToNanos() {
assertEquals(
new BigDecimal("62135596810000000009"),
TimestampUtils.toNanos(Timestamp.ofTimeSecondsAndNanos(10L, 9)));
}
|
public void ensureActiveGroup() {
while (!ensureActiveGroup(time.timer(Long.MAX_VALUE))) {
log.warn("still waiting to ensure active group");
}
}
|
@Test
public void testWakeupAfterSyncGroupSentExternalCompletion() throws Exception {
setupCoordinator();
mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE));
mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE));
mockClient.prepareResponse(new MockClient.RequestMatcher() {
private int invocations = 0;
@Override
public boolean matches(AbstractRequest body) {
invocations++;
boolean isSyncGroupRequest = body instanceof SyncGroupRequest;
if (isSyncGroupRequest && invocations == 1)
// wakeup after the request returns
consumerClient.wakeup();
return isSyncGroupRequest;
}
}, syncGroupResponse(Errors.NONE));
AtomicBoolean heartbeatReceived = prepareFirstHeartbeat();
assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()");
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(0, coordinator.onJoinCompleteInvokes);
assertFalse(heartbeatReceived.get());
// the join group completes in this poll()
consumerClient.poll(mockTime.timer(0));
coordinator.ensureActiveGroup();
assertEquals(1, coordinator.onJoinPrepareInvokes);
assertEquals(1, coordinator.onJoinCompleteInvokes);
awaitFirstHeartbeat(heartbeatReceived);
}
|
Map<ProjectionProducer<PTransform<?, ?>>, Map<PCollection<?>, FieldAccessDescriptor>>
getPushdownOpportunities() {
return pushdownOpportunities.build();
}
|
@Test
public void testSimplePushdownProducer_returnsOnePushdown() {
Pipeline p = Pipeline.create();
PTransform<PBegin, PCollection<Row>> source = new SimpleSourceWithPushdown();
PCollection<Row> output = p.apply(source);
Map<PCollection<?>, FieldAccessDescriptor> pCollectionFieldAccess =
ImmutableMap.of(output, FieldAccessDescriptor.withFieldNames("field1", "field2"));
ProjectionProducerVisitor visitor = new ProjectionProducerVisitor(pCollectionFieldAccess);
p.traverseTopologically(visitor);
Map<ProjectionProducer<PTransform<?, ?>>, Map<PCollection<?>, FieldAccessDescriptor>>
pushdownOpportunities = visitor.getPushdownOpportunities();
Assert.assertEquals(1, pushdownOpportunities.size());
Map<PCollection<?>, FieldAccessDescriptor> opportunitiesForSource =
pushdownOpportunities.get(source);
Assert.assertNotNull(opportunitiesForSource);
Assert.assertEquals(1, opportunitiesForSource.size());
FieldAccessDescriptor fieldAccessDescriptor = opportunitiesForSource.get(output);
Assert.assertNotNull(fieldAccessDescriptor);
Assert.assertFalse(fieldAccessDescriptor.getAllFields());
assertThat(fieldAccessDescriptor.fieldNamesAccessed(), containsInAnyOrder("field1", "field2"));
}
|
@Override
public void asyncRequest(Request request, final RequestCallBack requestCallBack) throws NacosException {
Payload grpcRequest = GrpcUtils.convert(request);
ListenableFuture<Payload> requestFuture = grpcFutureServiceStub.request(grpcRequest);
//set callback .
Futures.addCallback(requestFuture, new FutureCallback<Payload>() {
@Override
public void onSuccess(@Nullable Payload grpcResponse) {
Response response = (Response) GrpcUtils.parse(grpcResponse);
if (response != null) {
if (response instanceof ErrorResponse) {
requestCallBack.onException(new NacosException(response.getErrorCode(), response.getMessage()));
} else {
requestCallBack.onResponse(response);
}
} else {
requestCallBack.onException(new NacosException(ResponseCode.FAIL.getCode(), "response is null"));
}
}
@Override
public void onFailure(Throwable throwable) {
if (throwable instanceof CancellationException) {
requestCallBack.onException(
new TimeoutException("Timeout after " + requestCallBack.getTimeout() + " milliseconds."));
} else {
requestCallBack.onException(throwable);
}
}
}, requestCallBack.getExecutor() != null ? requestCallBack.getExecutor() : this.executor);
// set timeout future.
ListenableFuture<Payload> payloadListenableFuture = Futures.withTimeout(requestFuture,
requestCallBack.getTimeout(), TimeUnit.MILLISECONDS, RpcScheduledExecutor.TIMEOUT_SCHEDULER);
}
|
@Test
void testAsyncRequestError() throws NacosException, ExecutionException, InterruptedException {
when(future.get()).thenReturn(errorResponsePayload);
doAnswer(invocationOnMock -> {
((Runnable) invocationOnMock.getArgument(0)).run();
return null;
}).when(future).addListener(any(Runnable.class), eq(executor));
RequestCallBack requestCallBack = mock(RequestCallBack.class);
connection.asyncRequest(new HealthCheckRequest(), requestCallBack);
verify(requestCallBack).onException(any(NacosException.class));
}
|
@Override
protected ExecuteContext doAfter(ExecuteContext context) {
final Class<?> type = (Class<?>) context.getMemberFieldValue("type");
if (type == null) {
return context;
}
if (canInjectClusterInvoker(type.getName()) && isDefined.compareAndSet(false, true)) {
if (!(context.getResult() instanceof Map)) {
return context;
}
final Map<String, Class<?>> classes = (Map<String, Class<?>>) context.getResult();
final String retryClusterInvoker = flowControlConfig.getRetryClusterInvoker();
if (classes.get(retryClusterInvoker) != null) {
return context;
}
final Optional<Class<?>> retryInvokerClass;
final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
if (APACHE_DUBBO_CLUSTER_CLASS_NAME.equals(type.getName())) {
ClassUtils.defineClass(
"io.sermant.flowcontrol.retry.cluster.ApacheDubboClusterInvoker", contextClassLoader);
retryInvokerClass = ClassUtils.defineClass(
"io.sermant.flowcontrol.retry.cluster.ApacheDubboCluster", contextClassLoader);
} else if (ALIBABA_DUBBO_CLUSTER_CLASS_NAME.equals(type.getName())) {
ClassUtils.defineClass(
"io.sermant.flowcontrol.retry.cluster.AlibabaDubboClusterInvoker", contextClassLoader);
retryInvokerClass = ClassUtils.defineClass(
"io.sermant.flowcontrol.retry.cluster.AlibabaDubboCluster", contextClassLoader);
} else {
return context;
}
retryInvokerClass.ifPresent(invokerClass -> classes.put(retryClusterInvoker, invokerClass));
ClusterInvokerCreator.INSTANCE.getClusterInvokerMap().putAll(classes);
}
return context;
}
|
@Test
public void testApacheDubbo() throws NoSuchMethodException {
final ExtensionLoaderInterceptor interceptor = new ExtensionLoaderInterceptor();
final HashMap<String, Class<?>> result = new HashMap<>();
interceptor.doAfter(buildContext(true, result));
Assert.assertEquals(result.get(flowControlConfig.getRetryClusterInvoker()), ApacheDubboCluster.class);
}
|
public static BigDecimal cast(final Integer value, final int precision, final int scale) {
if (value == null) {
return null;
}
return cast(value.longValue(), precision, scale);
}
|
@Test
public void shouldCastDecimalRoundingUpNegative() {
// When:
final BigDecimal decimal = DecimalUtil.cast(new BigDecimal("-1.12"), 2, 1);
// Then:
assertThat(decimal, is(new BigDecimal("-1.1")));
}
|
public static void clean(
Object func, ExecutionConfig.ClosureCleanerLevel level, boolean checkSerializable) {
clean(func, level, checkSerializable, Collections.newSetFromMap(new IdentityHashMap<>()));
}
|
@Test
void testWriteReplace() {
WithWriteReplace.SerializablePayload writeReplace =
new WithWriteReplace.SerializablePayload(new WithWriteReplace.Payload("text"));
assertThat(writeReplace.get().getRaw()).isEqualTo("text");
ClosureCleaner.clean(writeReplace, ExecutionConfig.ClosureCleanerLevel.TOP_LEVEL, true);
}
|
public static int getFieldCount(LogicalType logicalType) {
return logicalType.accept(FIELD_COUNT_EXTRACTOR);
}
|
@Test
void testFieldCountExtraction() {
DataType dataType = ROW(FIELD("f0", INT()), FIELD("f1", STRING()));
assertThat(LogicalTypeChecks.getFieldCount(dataType.getLogicalType())).isEqualTo(2);
}
|
@Override
public long arrayInsert(String path, long index, Object... values) {
return get(arrayInsertAsync(path, index, values));
}
|
@Test
public void testArrayInsert() {
RJsonBucket<TestType> al = redisson.getJsonBucket("test", new JacksonCodec<>(TestType.class));
TestType t = new TestType();
NestedType nt = new NestedType();
nt.setValues(Arrays.asList("t1", "t2", "t4"));
t.setType(nt);
al.set(t);
long s = al.arrayInsert("type.values", 2, "t3");
assertThat(s).isEqualTo(4);
List<Long> s1 = al.arrayInsertMulti("$.type.values", 3, "t5");
assertThat(s1).containsExactly(5L);
List<String> n = al.get(new JacksonCodec<>(new TypeReference<List<String>>() {}), "type.values");
assertThat(n).containsExactly("t1", "t2", "t3", "t5", "t4");
}
|
public List<String[]> select(int m) {
final List<String[]> result = new ArrayList<>((int) count(this.datas.length, m));
select(0, new String[m], 0, result);
return result;
}
|
@Test
public void selectTest() {
Combination combination = new Combination(new String[] { "1", "2", "3", "4", "5" });
List<String[]> list = combination.select(2);
assertEquals(Combination.count(5, 2), list.size());
assertArrayEquals(new String[] {"1", "2"}, list.get(0));
assertArrayEquals(new String[] {"1", "3"}, list.get(1));
assertArrayEquals(new String[] {"1", "4"}, list.get(2));
assertArrayEquals(new String[] {"1", "5"}, list.get(3));
assertArrayEquals(new String[] {"2", "3"}, list.get(4));
assertArrayEquals(new String[] {"2", "4"}, list.get(5));
assertArrayEquals(new String[] {"2", "5"}, list.get(6));
assertArrayEquals(new String[] {"3", "4"}, list.get(7));
assertArrayEquals(new String[] {"3", "5"}, list.get(8));
assertArrayEquals(new String[] {"4", "5"}, list.get(9));
List<String[]> selectAll = combination.selectAll();
assertEquals(Combination.countAll(5), selectAll.size());
List<String[]> list2 = combination.select(0);
assertEquals(1, list2.size());
}
|
public static List<TierFactory> initializeTierFactories(Configuration configuration) {
String externalTierFactoryClass =
configuration.get(
NettyShuffleEnvironmentOptions
.NETWORK_HYBRID_SHUFFLE_EXTERNAL_REMOTE_TIER_FACTORY_CLASS_NAME);
if (externalTierFactoryClass != null) {
return Collections.singletonList(
createExternalTierFactory(configuration, externalTierFactoryClass));
} else {
return getEphemeralTierFactories(configuration);
}
}
|
@Test
void testInitDurableExternalRemoteTierWithHigherPriority() {
Configuration configuration = new Configuration();
configuration.set(
NettyShuffleEnvironmentOptions.NETWORK_HYBRID_SHUFFLE_REMOTE_STORAGE_BASE_PATH,
tmpDir.toString());
configuration.set(
NettyShuffleEnvironmentOptions
.NETWORK_HYBRID_SHUFFLE_EXTERNAL_REMOTE_TIER_FACTORY_CLASS_NAME,
ExternalRemoteTierFactory.class.getName());
List<TierFactory> tierFactories =
TierFactoryInitializer.initializeTierFactories(configuration);
assertThat(tierFactories).hasSize(1);
assertThat(tierFactories.get(0)).isInstanceOf(ExternalRemoteTierFactory.class);
}
|
public static boolean canDrop(
FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) {
Objects.requireNonNull(pred, "pred cannnot be null");
Objects.requireNonNull(columns, "columns cannnot be null");
return pred.accept(new DictionaryFilter(columns, dictionaries));
}
|
@Test
public void testLtEqMissingColumn() throws Exception {
BinaryColumn b = binaryColumn("missing_column");
assertTrue(
"Should drop block for any non-null query",
canDrop(ltEq(b, Binary.fromString("any")), ccmd, dictionaries));
}
|
@Override
public <T> Serde<T> createSerde(
final Schema schema,
final KsqlConfig ksqlConfig,
final Supplier<SchemaRegistryClient> srFactory,
final Class<T> targetType,
final boolean isKey
) {
validateSchema(schema);
final Optional<Schema> physicalSchema;
if (useSchemaRegistryFormat) {
physicalSchema = properties.getSchemaId().isPresent() ? Optional.of(
SerdeUtils.getAndTranslateSchemaById(srFactory, properties.getSchemaId()
.get(), new JsonSchemaTranslator())) : Optional.empty();
} else {
physicalSchema = Optional.empty();
}
final Converter converter = useSchemaRegistryFormat
? getSchemaRegistryConverter(srFactory.get(), ksqlConfig, properties.getSchemaId(), isKey)
: getConverter();
// The translators are used in the serializer & deserializzer only for JSON_SR formats
final ConnectDataTranslator dataTranslator = physicalSchema.isPresent()
? new ConnectSRSchemaDataTranslator(physicalSchema.get())
: new ConnectDataTranslator(schema);
final Supplier<Serializer<T>> serializer = () -> createSerializer(
targetType,
dataTranslator,
converter
);
final Deserializer<T> deserializer = createDeserializer(
ksqlConfig,
schema,
targetType,
dataTranslator,
converter
);
// Sanity check:
serializer.get();
return Serdes.serdeFrom(
new ThreadLocalSerializer<>(serializer),
deserializer
);
}
|
@Test
public void shouldUseOldJsonDeserializerOnJsonSrWhenJsonSchemaConverterIsDisabled() {
// Given
final ConnectSchema connectSchema = (ConnectSchema) SchemaBuilder.string().build();
when(config.getBoolean(KsqlConfig.KSQL_JSON_SR_CONVERTER_DESERIALIZER_ENABLED))
.thenReturn(false);
// When
final Serde<String> serde =
jsonFactory.createSerde(connectSchema, config, srFactory, String.class, false);
// Then
assertThat(serde.deserializer(), is(instanceOf(KsqlJsonDeserializer.class)));
}
|
public static boolean isCompositeType(LogicalType logicalType) {
if (logicalType instanceof DistinctType) {
return isCompositeType(((DistinctType) logicalType).getSourceType());
}
LogicalTypeRoot typeRoot = logicalType.getTypeRoot();
return typeRoot == STRUCTURED_TYPE || typeRoot == ROW;
}
|
@Test
void testIsCompositeTypeLegacySimpleType() {
DataType dataType = TypeConversions.fromLegacyInfoToDataType(Types.STRING);
assertThat(LogicalTypeChecks.isCompositeType(dataType.getLogicalType())).isFalse();
}
|
public void log(QueryLogParams params) {
_logger.debug("Broker Response: {}", params._response);
if (!(_logRateLimiter.tryAcquire() || shouldForceLog(params))) {
_numDroppedLogs.incrementAndGet();
return;
}
final StringBuilder queryLogBuilder = new StringBuilder();
for (QueryLogEntry value : QUERY_LOG_ENTRY_VALUES) {
value.format(queryLogBuilder, this, params);
queryLogBuilder.append(',');
}
// always log the query last - don't add this to the QueryLogEntry enum
queryLogBuilder.append("query=")
.append(StringUtils.substring(params._requestContext.getQuery(), 0, _maxQueryLengthToLog));
_logger.info(queryLogBuilder.toString());
if (_droppedLogRateLimiter.tryAcquire()) {
// use getAndSet to 0 so that there will be no race condition between
// loggers that increment this counter and this thread
long numDroppedLogsSinceLastLog = _numDroppedLogs.getAndSet(0);
if (numDroppedLogsSinceLastLog > 0) {
_logger.warn("{} logs were dropped. (log max rate per second: {})", numDroppedLogsSinceLastLog,
_logRateLimiter.getRate());
}
}
}
|
@Test
public void shouldOmitClientId() {
// Given:
Mockito.when(_logRateLimiter.tryAcquire()).thenReturn(true);
QueryLogger.QueryLogParams params = generateParams(false, 0, 456);
QueryLogger queryLogger = new QueryLogger(_logRateLimiter, 100, false, _logger, _droppedRateLimiter);
// When:
queryLogger.log(params);
// Then:
Assert.assertEquals(_infoLog.size(), 1);
Assert.assertFalse(_infoLog.get(0).contains("clientId"),
"did not expect to see clientId Logs. Got: " + _infoLog.get(0));
}
|
public boolean finishAndReleaseAll() {
return finish(true);
}
|
@Test
public void testFinishAndReleaseAll() {
ByteBuf in = Unpooled.buffer();
ByteBuf out = Unpooled.buffer();
try {
EmbeddedChannel channel = new EmbeddedChannel();
assertTrue(channel.writeInbound(in));
assertEquals(1, in.refCnt());
assertTrue(channel.writeOutbound(out));
assertEquals(1, out.refCnt());
assertTrue(channel.finishAndReleaseAll());
assertEquals(0, in.refCnt());
assertEquals(0, out.refCnt());
assertNull(channel.readInbound());
assertNull(channel.readOutbound());
} finally {
release(in, out);
}
}
|
public DirectExecutionContext getExecutionContext(
AppliedPTransform<?, ?, ?> application, StructuralKey<?> key) {
StepAndKey stepAndKey = StepAndKey.of(application, key);
return new DirectExecutionContext(
clock,
key,
(CopyOnAccessInMemoryStateInternals) applicationStateInternals.get(stepAndKey),
watermarkManager.getWatermarks(application));
}
|
@Test
public void getExecutionContextDifferentStepsIndependentState() {
StructuralKey<?> myKey = StructuralKey.of("foo", StringUtf8Coder.of());
DirectExecutionContext fooContext = context.getExecutionContext(impulseProducer, myKey);
StateTag<BagState<Integer>> intBag = StateTags.bag("myBag", VarIntCoder.of());
fooContext.getStepContext("s1").stateInternals().state(StateNamespaces.global(), intBag).add(1);
DirectExecutionContext barContext = context.getExecutionContext(downstreamProducer, myKey);
assertThat(
barContext
.getStepContext("s1")
.stateInternals()
.state(StateNamespaces.global(), intBag)
.read(),
emptyIterable());
}
|
@Override
public void execute(@Nonnull Runnable command) {
throwRejectedExecutionExceptionIfShutdown();
command.run();
}
|
@Test
void testExecute() {
final CompletableFuture<Thread> future = new CompletableFuture<>();
testTaskSubmissionBeforeShutdown(
testInstance ->
testInstance.execute(() -> future.complete(Thread.currentThread())));
assertThat(future).isCompletedWithValue(Thread.currentThread());
}
|
@Udf
public <T> Boolean contains(
@UdfParameter final String jsonArray,
@UdfParameter final T val
) {
try (JsonParser parser = PARSER_FACTORY.createParser(jsonArray)) {
if (parser.nextToken() != START_ARRAY) {
return false;
}
while (parser.nextToken() != null) {
final JsonToken token = parser.currentToken();
if (token == null) {
return val == null;
} else if (token == END_ARRAY) {
return false;
}
parser.skipChildren();
if (TOKEN_COMPAT.getOrDefault(token, foo -> false).test(val)) {
if (token == VALUE_NULL
|| (val != null && Objects.equals(parser.readValueAs(val.getClass()), val))) {
return true;
}
}
}
return false;
} catch (final IOException e) {
return false;
}
}
|
@Test
public void shouldFindLongsInJsonArray() {
assertEquals(true, jsonUdf.contains("[1]", 1L));
assertEquals(true, jsonUdf.contains("[1111111111111111]", 1111111111111111L));
assertEquals(true, jsonUdf.contains("[[222222222222222], 33333]", 33333L));
assertEquals(true, jsonUdf.contains("[{}, \"abc\", null, 1]", 1L));
assertEquals(false, jsonUdf.contains("[[222222222222222], 33333]", 222222222222222L));
assertEquals(false, jsonUdf.contains("[{}, \"abc\", null, [1]]", 1L));
assertEquals(false, jsonUdf.contains("[{}, \"abc\", null, {\"1\":1}]", 1L));
assertEquals(false, jsonUdf.contains("[1]", 1.0));
}
|
public FEELFnResult<Object> invoke(@ParameterName("list") List list) {
if ( list == null || list.isEmpty() ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty"));
} else {
try {
return FEELFnResult.ofResult(Collections.min(list, new InterceptNotComparableComparator()));
} catch (ClassCastException e) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable"));
}
}
}
|
@Test
void invokeNullList() {
FunctionTestUtil.assertResultError(minFunction.invoke((List) null), InvalidParametersEvent.class);
}
|
public static List<ValueMetaInterface> getValueMetaPluginClasses() throws KettlePluginException {
List<ValueMetaInterface> list = new ArrayList<ValueMetaInterface>();
List<PluginInterface> plugins = pluginRegistry.getPlugins( ValueMetaPluginType.class );
for ( PluginInterface plugin : plugins ) {
ValueMetaInterface valueMetaInterface = (ValueMetaInterface) pluginRegistry.loadClass( plugin );
list.add( valueMetaInterface );
}
return list;
}
|
@Test
public void testGetValueMetaPluginClasses() throws KettlePluginException {
List<ValueMetaInterface> dataTypes = ValueMetaFactory.getValueMetaPluginClasses();
boolean numberExists = false;
boolean stringExists = false;
boolean dateExists = false;
boolean booleanExists = false;
boolean integerExists = false;
boolean bignumberExists = false;
boolean serializableExists = false;
boolean binaryExists = false;
boolean timestampExists = false;
boolean inetExists = false;
for ( ValueMetaInterface obj : dataTypes ) {
if ( obj instanceof ValueMetaNumber ) {
numberExists = true;
}
if ( obj.getClass().equals( ValueMetaString.class ) ) {
stringExists = true;
}
if ( obj.getClass().equals( ValueMetaDate.class ) ) {
dateExists = true;
}
if ( obj.getClass().equals( ValueMetaBoolean.class ) ) {
booleanExists = true;
}
if ( obj.getClass().equals( ValueMetaInteger.class ) ) {
integerExists = true;
}
if ( obj.getClass().equals( ValueMetaBigNumber.class ) ) {
bignumberExists = true;
}
if ( obj.getClass().equals( ValueMetaSerializable.class ) ) {
serializableExists = true;
}
if ( obj.getClass().equals( ValueMetaBinary.class ) ) {
binaryExists = true;
}
if ( obj.getClass().equals( ValueMetaTimestamp.class ) ) {
timestampExists = true;
}
if ( obj.getClass().equals( ValueMetaInternetAddress.class ) ) {
inetExists = true;
}
}
assertTrue( numberExists );
assertTrue( stringExists );
assertTrue( dateExists );
assertTrue( booleanExists );
assertTrue( integerExists );
assertTrue( bignumberExists );
assertTrue( serializableExists );
assertTrue( binaryExists );
assertTrue( timestampExists );
assertTrue( inetExists );
}
|
public static boolean isBasicType(Object object) {
if (null == object) {
return false;
}
return ClassUtil.isBasicType(object.getClass());
}
|
@Test
public void isBasicTypeTest() {
int a = 1;
final boolean basicType = ObjectUtil.isBasicType(a);
assertTrue(basicType);
}
|
@Override
@Nullable
public int[] readIntArray(@Nonnull String fieldName) throws IOException {
return readIncompatibleField(fieldName, INT_ARRAY, super::readIntArray);
}
|
@Test
public void testReadIntArray() throws Exception {
assertNull(reader.readIntArray("NO SUCH FIELD"));
}
|
@Override
protected void addWordToStorage(String word, int frequency) {
throw new UnsupportedOperationException();
}
|
@Test(expected = UnsupportedOperationException.class)
public void testCanNotAddToStorage() {
mUnderTest.addWordToStorage("wording", 3);
}
|
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException {
return delegate.invokeAll(tasks);
}
|
@Test
public void invokeAll1() throws InterruptedException {
underTest.invokeAll(callables, timeout, SECONDS);
verify(executorService).invokeAll(callables, timeout, SECONDS);
}
|
@Override
protected void validateDataImpl(TenantId tenantId, Customer customer) {
validateString("Customer title", customer.getTitle());
if (customer.getTitle().equals(CustomerServiceImpl.PUBLIC_CUSTOMER_TITLE)) {
throw new DataValidationException("'Public' title for customer is system reserved!");
}
if (!StringUtils.isEmpty(customer.getEmail())) {
validateEmail(customer.getEmail());
}
if (customer.getTenantId() == null) {
throw new DataValidationException("Customer should be assigned to tenant!");
} else {
if (!tenantService.tenantExists(customer.getTenantId())) {
throw new DataValidationException("Customer is referencing to non-existent tenant!");
}
}
}
|
@Test
void testValidateNameInvocation() {
Customer customer = new Customer();
customer.setTitle("Customer A");
customer.setTenantId(tenantId);
validator.validateDataImpl(tenantId, customer);
verify(validator).validateString("Customer title", customer.getTitle());
}
|
@Override
public void suspend() {
switch (state()) {
case CREATED:
transitToSuspend();
break;
case RESTORING:
transitToSuspend();
break;
case RUNNING:
try {
// use try-catch to ensure state transition to SUSPENDED even if user code throws in `Processor#close()`
closeTopology();
// we must clear the buffered records when suspending because upon resuming the consumer would
// re-fetch those records starting from the committed position
partitionGroup.clear();
} finally {
transitToSuspend();
}
break;
case SUSPENDED:
log.info("Skip suspending since state is {}", state());
break;
case CLOSED:
throw new IllegalStateException("Illegal state " + state() + " while suspending active task " + id);
default:
throw new IllegalStateException("Unknown state " + state() + " while suspending active task " + id);
}
}
|
@Test
public void shouldAlwaysSuspendCreatedTasks() {
when(stateManager.taskId()).thenReturn(taskId);
when(stateManager.taskType()).thenReturn(TaskType.ACTIVE);
task = createStatefulTask(createConfig("100"), true);
assertThat(task.state(), equalTo(CREATED));
task.suspend();
assertThat(task.state(), equalTo(SUSPENDED));
}
|
@Override
public void put(SchedulerQueryContext query)
throws OutOfCapacityException {
Preconditions.checkNotNull(query);
_queueLock.lock();
String groupName = _groupSelector.getSchedulerGroupName(query);
try {
SchedulerGroup groupContext = getOrCreateGroupContext(groupName);
checkGroupHasCapacity(groupContext);
query.setSchedulerGroupContext(groupContext);
groupContext.addLast(query);
_queryReaderCondition.signal();
} finally {
_queueLock.unlock();
}
}
|
@Test
public void testTakeWithLimits()
throws OutOfCapacityException, BrokenBarrierException, InterruptedException {
// Test that take() will not return query if that group is already using hardLimit resources
Map<String, Object> properties = new HashMap<>();
properties.put(ResourceManager.QUERY_WORKER_CONFIG_KEY, 40);
properties.put(ResourceManager.QUERY_RUNNER_CONFIG_KEY, 10);
properties.put(ResourceLimitPolicy.TABLE_THREADS_SOFT_LIMIT, 20);
properties.put(ResourceLimitPolicy.TABLE_THREADS_HARD_LIMIT, 80);
PinotConfiguration configuration = new PinotConfiguration(properties);
PolicyBasedResourceManager rm = new PolicyBasedResourceManager(configuration);
MultiLevelPriorityQueue queue = createQueue(configuration, rm);
queue.put(createQueryRequest(GROUP_ONE, METRICS));
queue.put(createQueryRequest(GROUP_ONE, METRICS));
queue.put(createQueryRequest(GROUP_TWO, METRICS));
// group one has higher priority but it's above soft thread limit
TestSchedulerGroup testGroupOne = GROUP_FACTORY._groupMap.get(GROUP_ONE);
TestSchedulerGroup testGroupTwo = GROUP_FACTORY._groupMap.get(GROUP_TWO);
testGroupOne.addReservedThreads(rm.getTableThreadsSoftLimit() + 1);
QueueReader reader = new QueueReader(queue);
reader.startAndWaitForRead();
assertEquals(reader._readQueries.size(), 1);
assertEquals(reader._readQueries.poll().getSchedulerGroup().name(), GROUP_TWO);
// add one more group two
queue.put(createQueryRequest(GROUP_TWO, METRICS));
reader = new QueueReader(queue);
reader.startAndWaitForRead();
assertEquals(reader._readQueries.size(), 1);
assertEquals(reader._readQueries.poll().getSchedulerGroup().name(), GROUP_TWO);
// add one more groupTwo and set groupTwo threads to higher than groupOne
queue.put(createQueryRequest(GROUP_TWO, METRICS));
testGroupTwo.addReservedThreads(testGroupOne.totalReservedThreads() + 1);
reader = new QueueReader(queue);
reader.startAndWaitForRead();
assertEquals(reader._readQueries.size(), 1);
assertEquals(reader._readQueries.poll().getSchedulerGroup().name(), GROUP_ONE);
// set groupOne above hard limit
testGroupOne.addReservedThreads(rm.getTableThreadsHardLimit());
reader = new QueueReader(queue);
reader.startAndWaitForRead();
assertEquals(reader._readQueries.size(), 1);
assertEquals(reader._readQueries.poll().getSchedulerGroup().name(), GROUP_TWO);
// all groups above hard limit
queue.put(createQueryRequest(GROUP_TWO, METRICS));
queue.put(createQueryRequest(GROUP_TWO, METRICS));
queue.put(createQueryRequest(GROUP_ONE, METRICS));
testGroupTwo.addReservedThreads(rm.getTableThreadsHardLimit());
reader = new QueueReader(queue);
reader.startAndWaitForQueueWakeup();
assertEquals(reader._readQueries.size(), 0);
// try again
sleepForQueueWakeup(queue);
assertEquals(reader._readQueries.size(), 0);
// now set thread limit lower for a group (aka. query finished)
testGroupTwo.releasedReservedThreads(testGroupTwo.totalReservedThreads());
sleepForQueueWakeup(queue);
assertEquals(reader._readQueries.size(), 1);
}
|
public Labels strimziSelectorLabels() {
Map<String, String> newLabels = new HashMap<>(3);
List<String> strimziSelectorLabels = new ArrayList<>(3);
strimziSelectorLabels.add(STRIMZI_CLUSTER_LABEL);
strimziSelectorLabels.add(STRIMZI_NAME_LABEL);
strimziSelectorLabels.add(STRIMZI_KIND_LABEL);
strimziSelectorLabels.add(STRIMZI_POOL_NAME_LABEL);
strimziSelectorLabels.forEach(key -> {
if (labels.containsKey(key)) newLabels.put(key, labels.get(key));
});
return new Labels(newLabels);
}
|
@Test
public void testStrimziSelectorLabels() {
Map<String, String> sourceMap = new HashMap<>(5);
sourceMap.put(Labels.STRIMZI_CLUSTER_LABEL, "my-cluster");
sourceMap.put("key1", "value1");
sourceMap.put(Labels.STRIMZI_KIND_LABEL, "Kafka");
sourceMap.put("key2", "value2");
sourceMap.put(Labels.STRIMZI_NAME_LABEL, "my-cluster-kafka");
sourceMap.put(Labels.STRIMZI_DISCOVERY_LABEL, "true");
Labels labels = Labels.fromMap(sourceMap);
Map<String, String> expected = new HashMap<>(2);
expected.put(Labels.STRIMZI_CLUSTER_LABEL, "my-cluster");
expected.put(Labels.STRIMZI_KIND_LABEL, "Kafka");
expected.put(Labels.STRIMZI_NAME_LABEL, "my-cluster-kafka");
assertThat(labels.strimziSelectorLabels().toMap(), is(expected));
}
|
public static InetAddress findConnectingAddress(
InetSocketAddress targetAddress, long maxWaitMillis, long startLoggingAfter)
throws IOException {
if (targetAddress == null) {
throw new NullPointerException("targetAddress must not be null");
}
if (maxWaitMillis <= 0) {
throw new IllegalArgumentException("Max wait time must be positive");
}
final long startTimeNanos = System.nanoTime();
long currentSleepTime = MIN_SLEEP_TIME;
long elapsedTimeMillis = 0;
final List<AddressDetectionState> strategies =
Collections.unmodifiableList(
Arrays.asList(
AddressDetectionState.LOCAL_HOST,
AddressDetectionState.ADDRESS,
AddressDetectionState.FAST_CONNECT,
AddressDetectionState.SLOW_CONNECT));
// loop while there is time left
while (elapsedTimeMillis < maxWaitMillis) {
boolean logging = elapsedTimeMillis >= startLoggingAfter;
if (logging) {
LOG.info("Trying to connect to " + targetAddress);
}
// Try each strategy in order
for (AddressDetectionState strategy : strategies) {
InetAddress address = findAddressUsingStrategy(strategy, targetAddress, logging);
if (address != null) {
return address;
}
}
// we have made a pass with all strategies over all interfaces
// sleep for a while before we make the next pass
elapsedTimeMillis = (System.nanoTime() - startTimeNanos) / 1_000_000;
long toWait = Math.min(maxWaitMillis - elapsedTimeMillis, currentSleepTime);
if (toWait > 0) {
if (logging) {
LOG.info("Could not connect. Waiting for {} msecs before next attempt", toWait);
} else {
LOG.debug(
"Could not connect. Waiting for {} msecs before next attempt", toWait);
}
try {
Thread.sleep(toWait);
} catch (InterruptedException e) {
throw new IOException("Connection attempts have been interrupted.");
}
}
// increase the exponential backoff timer
currentSleepTime = Math.min(2 * currentSleepTime, MAX_SLEEP_TIME);
}
// our attempts timed out. use the heuristic fallback
LOG.warn(
"Could not connect to {}. Selecting a local address using heuristics.",
targetAddress);
InetAddress heuristic =
findAddressUsingStrategy(AddressDetectionState.HEURISTIC, targetAddress, true);
if (heuristic != null) {
return heuristic;
} else {
LOG.warn(
"Could not find any IPv4 address that is not loopback or link-local. Using localhost address.");
return InetAddress.getLocalHost();
}
}
|
@Test
void testFindConnectingAddressWhenGetLocalHostThrows() throws Exception {
try (MockedStatic mocked = mockStatic(InetAddress.class)) {
mocked.when(InetAddress::getLocalHost)
.thenThrow(new UnknownHostException())
.thenCallRealMethod();
}
final InetAddress loopbackAddress = Inet4Address.getByName("127.0.0.1");
Thread socketServerThread;
try (ServerSocket socket = new ServerSocket(0, 1, loopbackAddress)) {
// Make sure that the thread will eventually die even if something else goes wrong
socket.setSoTimeout(0);
socketServerThread =
new Thread(
new Runnable() {
@Override
public void run() {
try {
NetUtils.acceptWithoutTimeout(socket);
} catch (IOException e) {
// ignore
}
}
});
socketServerThread.start();
final InetSocketAddress socketAddress =
new InetSocketAddress(loopbackAddress, socket.getLocalPort());
final InetAddress address =
ConnectionUtils.findConnectingAddress(socketAddress, 2000, 400);
// Make sure we got an address via alternative means
assertThat(address).isNotNull();
}
}
|
public static Date parse(String date, ParsePosition pos) throws ParseException {
Exception fail = null;
try {
int offset = pos.getIndex();
// extract year
int year = parseInt(date, offset, offset += 4);
if (checkOffset(date, offset, '-')) {
offset += 1;
}
// extract month
int month = parseInt(date, offset, offset += 2);
if (checkOffset(date, offset, '-')) {
offset += 1;
}
// extract day
int day = parseInt(date, offset, offset += 2);
// default time value
int hour = 0;
int minutes = 0;
int seconds = 0;
// always use 0 otherwise returned date will include millis of current time
int milliseconds = 0;
// if the value has no time component (and no time zone), we are done
boolean hasT = checkOffset(date, offset, 'T');
if (!hasT && (date.length() <= offset)) {
Calendar calendar = new GregorianCalendar(year, month - 1, day);
calendar.setLenient(false);
pos.setIndex(offset);
return calendar.getTime();
}
if (hasT) {
// extract hours, minutes, seconds and milliseconds
hour = parseInt(date, offset += 1, offset += 2);
if (checkOffset(date, offset, ':')) {
offset += 1;
}
minutes = parseInt(date, offset, offset += 2);
if (checkOffset(date, offset, ':')) {
offset += 1;
}
// second and milliseconds can be optional
if (date.length() > offset) {
char c = date.charAt(offset);
if (c != 'Z' && c != '+' && c != '-') {
seconds = parseInt(date, offset, offset += 2);
if (seconds > 59 && seconds < 63) {
seconds = 59; // truncate up to 3 leap seconds
}
// milliseconds can be optional in the format
if (checkOffset(date, offset, '.')) {
offset += 1;
int endOffset = indexOfNonDigit(date, offset + 1); // assume at least one digit
int parseEndOffset = Math.min(endOffset, offset + 3); // parse up to 3 digits
int fraction = parseInt(date, offset, parseEndOffset);
// compensate for "missing" digits
switch (parseEndOffset - offset) { // number of digits parsed
case 2:
milliseconds = fraction * 10;
break;
case 1:
milliseconds = fraction * 100;
break;
default:
milliseconds = fraction;
}
offset = endOffset;
}
}
}
}
// extract timezone
if (date.length() <= offset) {
throw new IllegalArgumentException("No time zone indicator");
}
TimeZone timezone = null;
char timezoneIndicator = date.charAt(offset);
if (timezoneIndicator == 'Z') {
timezone = TIMEZONE_UTC;
offset += 1;
} else if (timezoneIndicator == '+' || timezoneIndicator == '-') {
String timezoneOffset = date.substring(offset);
// When timezone has no minutes, we should append it, valid timezones are, for example:
// +00:00, +0000 and +00
timezoneOffset = timezoneOffset.length() >= 5 ? timezoneOffset : timezoneOffset + "00";
offset += timezoneOffset.length();
// 18-Jun-2015, tatu: Minor simplification, skip offset of "+0000"/"+00:00"
if (timezoneOffset.equals("+0000") || timezoneOffset.equals("+00:00")) {
timezone = TIMEZONE_UTC;
} else {
// 18-Jun-2015, tatu: Looks like offsets only work from GMT, not UTC...
// not sure why, but that's the way it looks. Further, Javadocs for
// `java.util.TimeZone` specifically instruct use of GMT as base for
// custom timezones... odd.
String timezoneId = "GMT" + timezoneOffset;
// String timezoneId = "UTC" + timezoneOffset;
timezone = TimeZone.getTimeZone(timezoneId);
String act = timezone.getID();
if (!act.equals(timezoneId)) {
/* 22-Jan-2015, tatu: Looks like canonical version has colons, but we may be given
* one without. If so, don't sweat.
* Yes, very inefficient. Hopefully not hit often.
* If it becomes a perf problem, add 'loose' comparison instead.
*/
String cleaned = act.replace(":", "");
if (!cleaned.equals(timezoneId)) {
throw new IndexOutOfBoundsException(
"Mismatching time zone indicator: "
+ timezoneId
+ " given, resolves to "
+ timezone.getID());
}
}
}
} else {
throw new IndexOutOfBoundsException(
"Invalid time zone indicator '" + timezoneIndicator + "'");
}
Calendar calendar = new GregorianCalendar(timezone);
calendar.setLenient(false);
calendar.set(Calendar.YEAR, year);
calendar.set(Calendar.MONTH, month - 1);
calendar.set(Calendar.DAY_OF_MONTH, day);
calendar.set(Calendar.HOUR_OF_DAY, hour);
calendar.set(Calendar.MINUTE, minutes);
calendar.set(Calendar.SECOND, seconds);
calendar.set(Calendar.MILLISECOND, milliseconds);
pos.setIndex(offset);
return calendar.getTime();
// If we get a ParseException it'll already have the right message/offset.
// Other exception types can convert here.
} catch (IndexOutOfBoundsException | IllegalArgumentException e) {
fail = e;
}
String input = (date == null) ? null : ('"' + date + '"');
String msg = fail.getMessage();
if (msg == null || msg.isEmpty()) {
msg = "(" + fail.getClass().getName() + ")";
}
ParseException ex =
new ParseException("Failed to parse date [" + input + "]: " + msg, pos.getIndex());
ex.initCause(fail);
throw ex;
}
|
@Test
@SuppressWarnings("UndefinedEquals")
public void testDateParseWithDefaultTimezone() throws ParseException {
String dateStr = "2018-06-25";
Date date = ISO8601Utils.parse(dateStr, new ParsePosition(0));
Date expectedDate = new GregorianCalendar(2018, Calendar.JUNE, 25).getTime();
assertThat(date).isEqualTo(expectedDate);
}
|
@Override public String service() {
// MethodDescriptor.getServiceName() is not in our floor version: gRPC 1.2
return GrpcParser.service(call.getMethodDescriptor().getFullMethodName());
}
|
@Test void service() {
when(call.getMethodDescriptor()).thenReturn(methodDescriptor);
assertThat(request.service()).isEqualTo("helloworld.Greeter");
}
|
public QueueConnection queueConnection(QueueConnection connection) {
// It is common to implement both interfaces
if (connection instanceof XAQueueConnection) {
return xaQueueConnection((XAQueueConnection) connection);
}
return TracingConnection.create(connection, this);
}
|
@Test void queueConnection_doesntDoubleWrap() {
QueueConnection wrapped = jmsTracing.queueConnection(mock(QueueConnection.class));
assertThat(jmsTracing.queueConnection(wrapped))
.isSameAs(wrapped);
}
|
@Override
public URI toURI() {
throw new UnsupportedOperationException("Not implemented");
}
|
@Test(expectedExceptions = UnsupportedOperationException.class)
public void testToURI() {
fs.getFile("nonsuch.txt").toURI();
}
|
public static <T> Write<T> delete() {
return Write.<T>builder(MutationType.DELETE).build();
}
|
@Test
public void testDelete() throws Exception {
List<Row> results = getRows(CASSANDRA_TABLE);
assertEquals(NUM_ROWS, results.size());
Scientist einstein = new Scientist();
einstein.id = 0;
einstein.department = "phys";
einstein.name = "Einstein";
pipeline
.apply(Create.of(einstein))
.apply(
CassandraIO.<Scientist>delete()
.withHosts(Collections.singletonList(CASSANDRA_HOST))
.withPort(cassandraPort)
.withKeyspace(CASSANDRA_KEYSPACE)
.withEntity(Scientist.class));
pipeline.run();
results = getRows(CASSANDRA_TABLE);
assertEquals(NUM_ROWS - 1, results.size());
// re-insert suppressed doc to make the test autonomous
session.execute(
String.format(
"INSERT INTO %s.%s(person_department, person_id, person_name) values("
+ "'phys', "
+ einstein.id
+ ", '"
+ einstein.name
+ "');",
CASSANDRA_KEYSPACE,
CASSANDRA_TABLE));
}
|
public String getGreeting() {
return "Hello World!";
}
|
@Test void appHasAGreeting() {
App classUnderTest = new App();
assertNotNull(classUnderTest.getGreeting(), "app should have a greeting");
}
|
@VisibleForTesting
static Optional<Catalog> loadCatalog(Configuration conf, String catalogName) {
String catalogType = getCatalogType(conf, catalogName);
if (NO_CATALOG_TYPE.equalsIgnoreCase(catalogType)) {
return Optional.empty();
} else {
String name = catalogName == null ? ICEBERG_DEFAULT_CATALOG_NAME : catalogName;
return Optional.of(
CatalogUtil.buildIcebergCatalog(
name, getCatalogProperties(conf, name, catalogType), conf));
}
}
|
@Test
public void testLoadCatalogLocation() {
assertThat(Catalogs.loadCatalog(conf, Catalogs.ICEBERG_HADOOP_TABLE_NAME)).isNotPresent();
}
|
public IssuesChangesNotification newIssuesChangesNotification(Set<DefaultIssue> issues, Map<String, UserDto> assigneesByUuid) {
AnalysisChange change = new AnalysisChange(analysisMetadataHolder.getAnalysisDate());
Set<ChangedIssue> changedIssues = issues.stream()
.map(issue -> new ChangedIssue.Builder(issue.key())
.setAssignee(getAssignee(issue.assignee(), assigneesByUuid))
.setNewStatus(issue.status())
.setNewIssueStatus(issue.status() != null ? IssueStatus.of(issue.status(), issue.resolution()) : null)
.setRule(getRuleByRuleKey(issue.ruleKey()))
.setProject(getProject())
.build())
.collect(Collectors.toSet());
return issuesChangesSerializer.serialize(new IssuesChangesNotificationBuilder(changedIssues, change));
}
|
@Test
public void newIssuesChangesNotification_fails_with_ISE_if_branch_has_not_been_set() {
RuleKey ruleKey = RuleKey.of("foo", "bar");
DefaultIssue issue = new DefaultIssue()
.setRuleKey(ruleKey);
Map<String, UserDto> assigneesByUuid = nonEmptyAssigneesByUuid();
ruleRepository.add(ruleKey);
analysisMetadata.setAnalysisDate(new Random().nextLong());
treeRootHolder.setRoot(ReportComponent.builder(PROJECT, 1).build());
assertThatThrownBy(() -> underTest.newIssuesChangesNotification(ImmutableSet.of(issue), assigneesByUuid))
.isInstanceOf(IllegalStateException.class)
.hasMessage("Branch has not been set");
}
|
@Override
public ApiResult<TopicPartition, ListOffsetsResultInfo> handleResponse(
Node broker,
Set<TopicPartition> keys,
AbstractResponse abstractResponse
) {
ListOffsetsResponse response = (ListOffsetsResponse) abstractResponse;
Map<TopicPartition, ListOffsetsResultInfo> completed = new HashMap<>();
Map<TopicPartition, Throwable> failed = new HashMap<>();
List<TopicPartition> unmapped = new ArrayList<>();
Set<TopicPartition> retriable = new HashSet<>();
for (ListOffsetsTopicResponse topic : response.topics()) {
for (ListOffsetsPartitionResponse partition : topic.partitions()) {
TopicPartition topicPartition = new TopicPartition(topic.name(), partition.partitionIndex());
Errors error = Errors.forCode(partition.errorCode());
if (!offsetTimestampsByPartition.containsKey(topicPartition)) {
log.warn("ListOffsets response includes unknown topic partition {}", topicPartition);
} else if (error == Errors.NONE) {
Optional<Integer> leaderEpoch = (partition.leaderEpoch() == ListOffsetsResponse.UNKNOWN_EPOCH)
? Optional.empty()
: Optional.of(partition.leaderEpoch());
completed.put(
topicPartition,
new ListOffsetsResultInfo(partition.offset(), partition.timestamp(), leaderEpoch));
} else {
handlePartitionError(topicPartition, error, failed, unmapped, retriable);
}
}
}
// Sanity-check if the current leader for these partitions returned results for all of them
for (TopicPartition topicPartition : keys) {
if (unmapped.isEmpty()
&& !completed.containsKey(topicPartition)
&& !failed.containsKey(topicPartition)
&& !retriable.contains(topicPartition)
) {
ApiException sanityCheckException = new ApiException(
"The response from broker " + broker.id() +
" did not contain a result for topic partition " + topicPartition);
log.error(
"ListOffsets request for topic partition {} failed sanity check",
topicPartition,
sanityCheckException);
failed.put(topicPartition, sanityCheckException);
}
}
return new ApiResult<>(completed, failed, unmapped);
}
|
@Test
public void testHandleSuccessfulResponse() {
ApiResult<TopicPartition, ListOffsetsResultInfo> result =
handleResponse(createResponse(emptyMap()));
assertResult(result, offsetTimestampsByPartition.keySet(), emptyMap(), emptyList(), emptySet());
}
|
@GetMapping("/{memberId}/products")
public ResponseEntity<List<ProductByMemberResponse>> findProductHistories(
@PathVariable("memberId") final Long memberId,
@AuthMember final Long authId
) {
return ResponseEntity.ok(memberService.findProductHistories(memberId, authId));
}
|
@Test
void 상품_판매_내역을_조회한다() throws Exception {
// given
when(memberService.findProductHistories(anyLong(), anyLong()))
.thenReturn(List.of(new ProductByMemberResponse(1L, 1L, "상품명", 10000, Location.BUILDING_CENTER, ProductStatus.WAITING, LocalDateTime.now())));
// when & then
mockMvc.perform(get("/api/members/{memberId}/products", 1L)
.header(HttpHeaders.AUTHORIZATION, "Bearer tokenInfo~")
).andExpect(status().isOk())
.andDo(customDocument("find_member_products",
requestHeaders(
headerWithName(AUTHORIZATION).description("유저 토큰 정보")
),
responseFields(
fieldWithPath("[0].productId").description("상품 id"),
fieldWithPath("[0].sellerId").description("판매자 id"),
fieldWithPath("[0].title").description("상품 제목"),
fieldWithPath("[0].price").description("상품 가격"),
fieldWithPath("[0].location").description("상품 거래 장소"),
fieldWithPath("[0].productStatus").description("상품 거래 상태"),
fieldWithPath("[0].createTime").description("상품 업로드 날짜")
)
));
}
|
public static void validatePolymorhpicInfo(PolymorphicInfo info) {
if (info.getPcaVersion() != 1) {
logger.error("Unsupported PCA version {}", info.getPcaVersion());
throw new ClientException("Polymorphic info is not correct");
}
int polymorphicFlags = info.getFlags().intValue();
boolean randomizedPip = (polymorphicFlags & 32) != 0;
boolean compressedEncoding = (polymorphicFlags & 4) != 0;
if (!randomizedPip || !compressedEncoding) {
logger.error("Polymorphic flags incorrect randomizedPip: {} compressedEncoding: {}",
randomizedPip, compressedEncoding);
throw new ClientException("Polymorphic info is not correct");
}
}
|
@Test
public void invalidPolymorphicRandomizedPip() {
final PolymorphicInfo info = mapper.read(
Hex.decode("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"),
PolymorphicInfo.class);
ClientException thrown = assertThrows(ClientException.class, () -> CardValidations.validatePolymorhpicInfo(info));
assertEquals("Polymorphic info is not correct", thrown.getMessage());
}
|
private <T> T accept(Expression<T> expr) {
return expr.accept(this);
}
|
@Test
public void testAnd() throws Exception {
final Expr.Greater trueExpr = Expr.Greater.create(Expr.NumberValue.create(2), Expr.NumberValue.create(1));
final Expr.Greater falseExpr = Expr.Greater.create(Expr.NumberValue.create(1), Expr.NumberValue.create(2));
assertThat(Expr.And.create(trueExpr, trueExpr).accept(new BooleanNumberConditionsVisitor()))
.isTrue();
assertThat(Expr.And.create(trueExpr, falseExpr).accept(new BooleanNumberConditionsVisitor()))
.isFalse();
assertThat(Expr.And.create(falseExpr, trueExpr).accept(new BooleanNumberConditionsVisitor()))
.isFalse();
assertThat(Expr.And.create(falseExpr, falseExpr).accept(new BooleanNumberConditionsVisitor()))
.isFalse();
assertThat(loadCondition("condition-and.json").accept(new BooleanNumberConditionsVisitor()))
.isTrue();
}
|
public static boolean webSocketHostPathMatches(String hostPath, String targetPath) {
boolean exactPathMatch = true;
if (ObjectHelper.isEmpty(hostPath) || ObjectHelper.isEmpty(targetPath)) {
// This scenario should not really be possible as the input args come from the vertx-websocket consumer / producer URI
return false;
}
// Paths ending with '*' are Vert.x wildcard routes so match on the path prefix
if (hostPath.endsWith("*")) {
exactPathMatch = false;
hostPath = hostPath.substring(0, hostPath.lastIndexOf('*'));
}
String normalizedHostPath = HttpUtils.normalizePath(hostPath + "/");
String normalizedTargetPath = HttpUtils.normalizePath(targetPath + "/");
String[] hostPathElements = normalizedHostPath.split("/");
String[] targetPathElements = normalizedTargetPath.split("/");
if (exactPathMatch && hostPathElements.length != targetPathElements.length) {
return false;
}
if (exactPathMatch) {
return normalizedHostPath.equals(normalizedTargetPath);
} else {
return normalizedTargetPath.startsWith(normalizedHostPath);
}
}
|
@Test
void webSocketHostNullPathNotMatches() {
String hostPath = null;
String targetPath = null;
assertFalse(VertxWebsocketHelper.webSocketHostPathMatches(hostPath, targetPath));
}
|
public static AWSCredentialsProvider deserialize(String awsCredentialsProviderSerialized) {
ObjectMapper om = new ObjectMapper();
om.registerModule(new AwsModule());
try {
return om.readValue(awsCredentialsProviderSerialized, AWSCredentialsProvider.class);
} catch (IOException e) {
throw new IllegalArgumentException(
"AwsCredentialsProvider can not be deserialized from Json", e);
}
}
|
@Test(expected = IllegalArgumentException.class)
public void testFailOnAWSCredentialsProviderDeserialization() {
deserialize("invalid string");
}
|
public static <N> ImmutableGraph<N> singletonGraph(Graph<N> graph, N node) {
final MutableGraph<N> mutableGraph = GraphBuilder.from(graph).build();
mutableGraph.addNode(node);
return ImmutableGraph.copyOf(mutableGraph);
}
|
@Test
public void singletonGraphWithTemplate() {
final MutableGraph<String> templateGraph = GraphBuilder
.directed()
.allowsSelfLoops(true)
.build();
final ImmutableGraph<String> singletonGraph = Graphs.singletonGraph(templateGraph, "Test");
assertThat(singletonGraph.isDirected()).isTrue();
assertThat(singletonGraph.allowsSelfLoops()).isTrue();
assertThat(singletonGraph.nodes()).containsExactly("Test");
assertThat(singletonGraph.edges()).isEmpty();
}
|
@Override
public BufferWithSubpartition getNextBuffer(@Nullable MemorySegment transitBuffer) {
checkState(isFinished, "Sort buffer is not ready to be read.");
checkState(!isReleased, "Sort buffer is already released.");
if (!hasRemaining()) {
freeSegments.add(transitBuffer);
return null;
}
int numBytesRead = 0;
Buffer.DataType bufferDataType = Buffer.DataType.DATA_BUFFER;
int currentReadingSubpartitionId = subpartitionReadOrder[readOrderIndex];
do {
// Get the buffer index and offset from the index entry
int toReadBufferIndex = getSegmentIndexFromPointer(readIndexEntryAddress);
int toReadOffsetInBuffer = getSegmentOffsetFromPointer(readIndexEntryAddress);
// Get the lengthAndDataType buffer according the buffer index
MemorySegment toReadBuffer = segments.get(toReadBufferIndex);
// From the lengthAndDataType buffer, read and get the length and the data type
long lengthAndDataType = toReadBuffer.getLong(toReadOffsetInBuffer);
int recordLength = getSegmentIndexFromPointer(lengthAndDataType);
Buffer.DataType dataType =
Buffer.DataType.values()[getSegmentOffsetFromPointer(lengthAndDataType)];
// If the buffer is an event and some data has been read, return it directly to ensure
// that the event will occupy one buffer independently
if (dataType.isEvent() && numBytesRead > 0) {
break;
}
bufferDataType = dataType;
// Get the next index entry address and move the read position forward
long nextReadIndexEntryAddress = toReadBuffer.getLong(toReadOffsetInBuffer + 8);
toReadOffsetInBuffer += INDEX_ENTRY_SIZE;
// Allocate a temp buffer for the event, recycle the original buffer
if (bufferDataType.isEvent()) {
freeSegments.add(transitBuffer);
transitBuffer = MemorySegmentFactory.allocateUnpooledSegment(recordLength);
}
if (!isPartialRecordAllowed
&& !isLastBufferPartialRecord
&& numBytesRead > 0
&& numBytesRead + recordLength > transitBuffer.size()) {
break;
}
// Start reading data from the data buffer
numBytesRead +=
copyRecordOrEvent(
transitBuffer,
numBytesRead,
toReadBufferIndex,
toReadOffsetInBuffer,
recordLength);
if (recordRemainingBytes == 0) {
// move to next subpartition if the current subpartition has been finished
if (readIndexEntryAddress
== lastIndexEntryAddresses[currentReadingSubpartitionId]) {
isLastBufferPartialRecord = false;
updateReadSubpartitionAndIndexEntryAddress();
break;
}
readIndexEntryAddress = nextReadIndexEntryAddress;
if (isLastBufferPartialRecord) {
isLastBufferPartialRecord = false;
break;
}
} else {
isLastBufferPartialRecord = true;
}
} while (numBytesRead < transitBuffer.size() && bufferDataType.isBuffer());
if (!isPartialRecordAllowed
&& !isLastBufferPartialRecord
&& bufferDataType == Buffer.DataType.DATA_BUFFER) {
bufferDataType = Buffer.DataType.DATA_BUFFER_WITH_CLEAR_END;
}
numTotalBytesRead += numBytesRead;
return new BufferWithSubpartition(
new NetworkBuffer(
transitBuffer,
bufferDataType.isBuffer() ? bufferRecycler : FreeingBufferRecycler.INSTANCE,
bufferDataType,
numBytesRead),
currentReadingSubpartitionId);
}
|
@Test
void testBufferIsRecycledWhenSortBufferIsEmpty() throws Exception {
int numSubpartitions = 10;
int bufferPoolSize = 512;
int numBuffersForSort = 20;
NetworkBufferPool globalPool = new NetworkBufferPool(bufferPoolSize, BUFFER_SIZE_BYTES);
BufferPool bufferPool = globalPool.createBufferPool(bufferPoolSize, bufferPoolSize);
LinkedList<MemorySegment> segments = new LinkedList<>();
for (int i = 0; i < numBuffersForSort; ++i) {
segments.add(bufferPool.requestMemorySegmentBlocking());
}
TieredStorageSortBuffer sortBuffer =
new TieredStorageSortBuffer(
segments,
bufferPool,
numSubpartitions,
BUFFER_SIZE_BYTES,
numBuffersForSort,
true);
MemorySegment memorySegment = segments.poll();
sortBuffer.finish();
assertThat(sortBuffer.getNextBuffer(memorySegment)).isNull();
assertThat(bufferPool.bestEffortGetNumOfUsedBuffers()).isEqualTo(numBuffersForSort);
}
|
@VisibleForTesting
public static void validateIngestionConfig(TableConfig tableConfig, @Nullable Schema schema) {
IngestionConfig ingestionConfig = tableConfig.getIngestionConfig();
if (ingestionConfig != null) {
String tableNameWithType = tableConfig.getTableName();
// Batch
if (ingestionConfig.getBatchIngestionConfig() != null) {
BatchIngestionConfig cfg = ingestionConfig.getBatchIngestionConfig();
List<Map<String, String>> batchConfigMaps = cfg.getBatchConfigMaps();
try {
if (CollectionUtils.isNotEmpty(batchConfigMaps)) {
// Validate that BatchConfig can be created
batchConfigMaps.forEach(b -> new BatchConfig(tableNameWithType, b));
}
} catch (Exception e) {
throw new IllegalStateException("Could not create BatchConfig using the batchConfig map", e);
}
if (tableConfig.isDimTable()) {
Preconditions.checkState(cfg.getSegmentIngestionType().equalsIgnoreCase("REFRESH"),
"Dimension tables must have segment ingestion type REFRESH");
}
}
if (tableConfig.isDimTable()) {
Preconditions.checkState(ingestionConfig.getBatchIngestionConfig() != null,
"Dimension tables must have batch ingestion configuration");
}
// Stream
// stream config map can either be in ingestion config or indexing config. cannot be in both places
if (ingestionConfig.getStreamIngestionConfig() != null) {
IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
Preconditions.checkState(indexingConfig == null || MapUtils.isEmpty(indexingConfig.getStreamConfigs()),
"Should not use indexingConfig#getStreamConfigs if ingestionConfig#StreamIngestionConfig is provided");
List<Map<String, String>> streamConfigMaps = ingestionConfig.getStreamIngestionConfig().getStreamConfigMaps();
Preconditions.checkState(streamConfigMaps.size() == 1, "Only 1 stream is supported in REALTIME table");
}
// Filter config
FilterConfig filterConfig = ingestionConfig.getFilterConfig();
if (filterConfig != null) {
String filterFunction = filterConfig.getFilterFunction();
if (filterFunction != null) {
if (_disableGroovy && FunctionEvaluatorFactory.isGroovyExpression(filterFunction)) {
throw new IllegalStateException(
"Groovy filter functions are disabled for table config. Found '" + filterFunction + "'");
}
try {
FunctionEvaluatorFactory.getExpressionEvaluator(filterFunction);
} catch (Exception e) {
throw new IllegalStateException("Invalid filter function " + filterFunction, e);
}
}
}
// Aggregation configs
List<AggregationConfig> aggregationConfigs = ingestionConfig.getAggregationConfigs();
Set<String> aggregationSourceColumns = new HashSet<>();
if (!CollectionUtils.isEmpty(aggregationConfigs)) {
Preconditions.checkState(!tableConfig.getIndexingConfig().isAggregateMetrics(),
"aggregateMetrics cannot be set with AggregationConfig");
Set<String> aggregationColumns = new HashSet<>();
for (AggregationConfig aggregationConfig : aggregationConfigs) {
String columnName = aggregationConfig.getColumnName();
String aggregationFunction = aggregationConfig.getAggregationFunction();
if (columnName == null || aggregationFunction == null) {
throw new IllegalStateException(
"columnName/aggregationFunction cannot be null in AggregationConfig " + aggregationConfig);
}
FieldSpec fieldSpec = null;
if (schema != null) {
fieldSpec = schema.getFieldSpecFor(columnName);
Preconditions.checkState(fieldSpec != null, "The destination column '" + columnName
+ "' of the aggregation function must be present in the schema");
Preconditions.checkState(fieldSpec.getFieldType() == FieldSpec.FieldType.METRIC,
"The destination column '" + columnName + "' of the aggregation function must be a metric column");
}
if (!aggregationColumns.add(columnName)) {
throw new IllegalStateException("Duplicate aggregation config found for column '" + columnName + "'");
}
ExpressionContext expressionContext;
try {
expressionContext = RequestContextUtils.getExpression(aggregationConfig.getAggregationFunction());
} catch (Exception e) {
throw new IllegalStateException(
"Invalid aggregation function '" + aggregationFunction + "' for column '" + columnName + "'", e);
}
Preconditions.checkState(expressionContext.getType() == ExpressionContext.Type.FUNCTION,
"aggregation function must be a function for: %s", aggregationConfig);
FunctionContext functionContext = expressionContext.getFunction();
AggregationFunctionType functionType =
AggregationFunctionType.getAggregationFunctionType(functionContext.getFunctionName());
validateIngestionAggregation(functionType);
List<ExpressionContext> arguments = functionContext.getArguments();
int numArguments = arguments.size();
if (functionType == DISTINCTCOUNTHLL) {
Preconditions.checkState(numArguments >= 1 && numArguments <= 2,
"DISTINCT_COUNT_HLL can have at most two arguments: %s", aggregationConfig);
if (numArguments == 2) {
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of DISTINCT_COUNT_HLL must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of DISTINCT_COUNT_HLL must be a number: %s", aggregationConfig);
}
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL must be BYTES: %s", aggregationConfig);
}
} else if (functionType == DISTINCTCOUNTHLLPLUS) {
Preconditions.checkState(numArguments >= 1 && numArguments <= 3,
"DISTINCT_COUNT_HLL_PLUS can have at most three arguments: %s", aggregationConfig);
if (numArguments == 2) {
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of DISTINCT_COUNT_HLL_PLUS must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of DISTINCT_COUNT_HLL_PLUS must be a number: %s", aggregationConfig);
}
if (numArguments == 3) {
ExpressionContext thirdArgument = arguments.get(2);
Preconditions.checkState(thirdArgument.getType() == ExpressionContext.Type.LITERAL,
"Third argument of DISTINCT_COUNT_HLL_PLUS must be literal: %s", aggregationConfig);
String literal = thirdArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Third argument of DISTINCT_COUNT_HLL_PLUS must be a number: %s", aggregationConfig);
}
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL_PLUS must be BYTES: %s", aggregationConfig);
}
} else if (functionType == SUMPRECISION) {
Preconditions.checkState(numArguments >= 2 && numArguments <= 3,
"SUM_PRECISION must specify precision (required), scale (optional): %s", aggregationConfig);
ExpressionContext secondArgument = arguments.get(1);
Preconditions.checkState(secondArgument.getType() == ExpressionContext.Type.LITERAL,
"Second argument of SUM_PRECISION must be literal: %s", aggregationConfig);
String literal = secondArgument.getLiteral().getStringValue();
Preconditions.checkState(StringUtils.isNumeric(literal),
"Second argument of SUM_PRECISION must be a number: %s", aggregationConfig);
if (fieldSpec != null) {
DataType dataType = fieldSpec.getDataType();
Preconditions.checkState(dataType == DataType.BIG_DECIMAL || dataType == DataType.BYTES,
"Result type for DISTINCT_COUNT_HLL must be BIG_DECIMAL or BYTES: %s", aggregationConfig);
}
} else {
Preconditions.checkState(numArguments == 1, "%s can only have one argument: %s", functionType,
aggregationConfig);
}
ExpressionContext firstArgument = arguments.get(0);
Preconditions.checkState(firstArgument.getType() == ExpressionContext.Type.IDENTIFIER,
"First argument of aggregation function: %s must be identifier, got: %s", functionType,
firstArgument.getType());
aggregationSourceColumns.add(firstArgument.getIdentifier());
}
if (schema != null) {
Preconditions.checkState(new HashSet<>(schema.getMetricNames()).equals(aggregationColumns),
"all metric columns must be aggregated");
}
// This is required by MutableSegmentImpl.enableMetricsAggregationIfPossible().
// That code will disable ingestion aggregation if all metrics aren't noDictionaryColumns.
// But if you do that after the table is already created, all future aggregations will
// just be the default value.
Map<String, DictionaryIndexConfig> configPerCol = StandardIndexes.dictionary().getConfig(tableConfig, schema);
aggregationColumns.forEach(column -> {
DictionaryIndexConfig dictConfig = configPerCol.get(column);
Preconditions.checkState(dictConfig != null && dictConfig.isDisabled(),
"Aggregated column: %s must be a no-dictionary column", column);
});
}
// Enrichment configs
List<EnrichmentConfig> enrichmentConfigs = ingestionConfig.getEnrichmentConfigs();
if (enrichmentConfigs != null) {
for (EnrichmentConfig enrichmentConfig : enrichmentConfigs) {
RecordEnricherRegistry.validateEnrichmentConfig(enrichmentConfig,
new RecordEnricherValidationConfig(_disableGroovy));
}
}
// Transform configs
List<TransformConfig> transformConfigs = ingestionConfig.getTransformConfigs();
if (transformConfigs != null) {
Set<String> transformColumns = new HashSet<>();
for (TransformConfig transformConfig : transformConfigs) {
String columnName = transformConfig.getColumnName();
String transformFunction = transformConfig.getTransformFunction();
if (columnName == null || transformFunction == null) {
throw new IllegalStateException(
"columnName/transformFunction cannot be null in TransformConfig " + transformConfig);
}
if (!transformColumns.add(columnName)) {
throw new IllegalStateException("Duplicate transform config found for column '" + columnName + "'");
}
if (schema != null) {
Preconditions.checkState(
schema.getFieldSpecFor(columnName) != null || aggregationSourceColumns.contains(columnName),
"The destination column '" + columnName
+ "' of the transform function must be present in the schema or as a source column for "
+ "aggregations");
}
FunctionEvaluator expressionEvaluator;
if (_disableGroovy && FunctionEvaluatorFactory.isGroovyExpression(transformFunction)) {
throw new IllegalStateException(
"Groovy transform functions are disabled for table config. Found '" + transformFunction
+ "' for column '" + columnName + "'");
}
try {
expressionEvaluator = FunctionEvaluatorFactory.getExpressionEvaluator(transformFunction);
} catch (Exception e) {
throw new IllegalStateException(
"Invalid transform function '" + transformFunction + "' for column '" + columnName + "'", e);
}
List<String> arguments = expressionEvaluator.getArguments();
if (arguments.contains(columnName)) {
throw new IllegalStateException(
"Arguments of a transform function '" + arguments + "' cannot contain the destination column '"
+ columnName + "'");
}
}
}
// Complex configs
ComplexTypeConfig complexTypeConfig = ingestionConfig.getComplexTypeConfig();
if (complexTypeConfig != null && schema != null) {
Map<String, String> prefixesToRename = complexTypeConfig.getPrefixesToRename();
if (MapUtils.isNotEmpty(prefixesToRename)) {
Set<String> fieldNames = schema.getColumnNames();
for (String prefix : prefixesToRename.keySet()) {
for (String field : fieldNames) {
Preconditions.checkState(!field.startsWith(prefix),
"Fields in the schema may not begin with any prefix specified in the prefixesToRename"
+ " config. Name conflict with field: " + field + " and prefix: " + prefix);
}
}
}
}
SchemaConformingTransformerConfig schemaConformingTransformerConfig =
ingestionConfig.getSchemaConformingTransformerConfig();
if (null != schemaConformingTransformerConfig && null != schema) {
SchemaConformingTransformer.validateSchema(schema, schemaConformingTransformerConfig);
}
SchemaConformingTransformerV2Config schemaConformingTransformerV2Config =
ingestionConfig.getSchemaConformingTransformerV2Config();
if (null != schemaConformingTransformerV2Config && null != schema) {
SchemaConformingTransformerV2.validateSchema(schema, schemaConformingTransformerV2Config);
}
}
}
|
@Test
public void ingestionBatchConfigsTest() {
Map<String, String> batchConfigMap = new HashMap<>();
batchConfigMap.put(BatchConfigProperties.INPUT_DIR_URI, "s3://foo");
batchConfigMap.put(BatchConfigProperties.OUTPUT_DIR_URI, "gs://bar");
batchConfigMap.put(BatchConfigProperties.INPUT_FS_CLASS, "org.foo.S3FS");
batchConfigMap.put(BatchConfigProperties.OUTPUT_FS_CLASS, "org.foo.GcsFS");
batchConfigMap.put(BatchConfigProperties.INPUT_FORMAT, "avro");
batchConfigMap.put(BatchConfigProperties.RECORD_READER_CLASS, "org.foo.Reader");
IngestionConfig ingestionConfig = new IngestionConfig();
// TODO: Check if we should allow duplicate config maps
ingestionConfig.setBatchIngestionConfig(
new BatchIngestionConfig(Arrays.asList(batchConfigMap, batchConfigMap), null, null));
TableConfig tableConfig =
new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setIngestionConfig(ingestionConfig).build();
TableConfigUtils.validateIngestionConfig(tableConfig, null);
}
|
@Override
public SettableFuture schedule(SourceScheduler scheduler)
{
// Return a new future even if newDriverGroupReady has not finished.
// Returning the same SettableFuture instance could lead to ListenableFuture retaining too many listener objects.
// TODO: After initial scheduling, tasks would only be available after they finished at least one bucket.
// This is not necessarily the case if the initial scheduling covered all buckets and
// the available slots is not fully utilized (concurrentLifespansPerTask is large or infinite).
// In this case if a task failed, the recovered driver groups have to wait for tasks to be available again after finishing at least one bucket,
// even though by definition of concurrentLifespansPerTask they are already available.
checkState(initialScheduled, "schedule should only be called after initial scheduling finished");
checkState(failedTasks.size() < nodeByTaskId.size(), "All tasks have failed");
synchronized (this) {
newDriverGroupReady = SettableFuture.create();
while (!availableTasks.isEmpty() && (!noPreferenceDriverGroups.isEmpty() || !nodeToPreferredDriverGroups.isEmpty())) {
int taskId = availableTasks.dequeueInt();
if (failedTasks.contains(taskId)) {
continue;
}
OptionalInt nextDriverGroupId = getNextDriverGroup(nodeByTaskId.get(taskId));
if (!nextDriverGroupId.isPresent()) {
continue;
}
scheduler.startLifespan(Lifespan.driverGroup(nextDriverGroupId.getAsInt()), partitionHandles.get(nextDriverGroupId.getAsInt()));
taskByDriverGroup[nextDriverGroupId.getAsInt()] = taskId;
runningDriverGroupIdsByTask[taskId].add(nextDriverGroupId.getAsInt());
}
}
return newDriverGroupReady;
}
|
@Test
public void testSchedule()
{
LifespanScheduler lifespanScheduler = getLifespanScheduler();
TestingSourceScheduler sourceScheduler = new TestingSourceScheduler();
lifespanScheduler.scheduleInitial(sourceScheduler);
lifespanScheduler.onLifespanExecutionFinished(sourceScheduler.getLastStartedLifespans());
assertEquals(sourceScheduler.getLastStartedLifespans().size(), 2);
sourceScheduler.getLastStartedLifespans().clear();
while (!lifespanScheduler.allLifespanExecutionFinished()) {
lifespanScheduler.schedule(sourceScheduler);
lifespanScheduler.onLifespanExecutionFinished(sourceScheduler.getLastStartedLifespans());
assertEquals(sourceScheduler.getLastStartedLifespans().size(), 2);
sourceScheduler.getLastStartedLifespans().clear();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.