focal_method
stringlengths 13
60.9k
| test_case
stringlengths 25
109k
|
---|---|
@Override
public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException {
DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook);
defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis()));
try {
final String msgId = commandLine.getOptionValue('i').trim();
String traceTopic = TopicValidator.RMQ_SYS_TRACE_TOPIC;
if (commandLine.hasOption('t')) {
traceTopic = commandLine.getOptionValue('t').trim();
}
if (commandLine.hasOption('n')) {
defaultMQAdminExt.setNamesrvAddr(commandLine.getOptionValue('n').trim());
}
long beginTimestamp = 0;
long endTimestamp = Long.MAX_VALUE;
int maxNum = 64;
if (commandLine.hasOption("b")) {
beginTimestamp = Long.parseLong(commandLine.getOptionValue("b").trim());
}
if (commandLine.hasOption("e")) {
endTimestamp = Long.parseLong(commandLine.getOptionValue("e").trim());
}
if (commandLine.hasOption("c")) {
maxNum = Integer.parseInt(commandLine.getOptionValue("c").trim());
}
this.queryTraceByMsgId(defaultMQAdminExt, traceTopic, msgId, maxNum, beginTimestamp, endTimestamp);
} catch (Exception e) {
throw new SubCommandException(this.getClass().getSimpleName() + "command failed", e);
} finally {
defaultMQAdminExt.shutdown();
}
}
|
@Test
public void testExecute() throws SubCommandException {
QueryMsgTraceByIdSubCommand cmd = new QueryMsgTraceByIdSubCommand();
Options options = ServerUtil.buildCommandlineOptions(new Options());
String[] subargs = new String[] {String.format("-i %s", MSG_ID),
String.format("-n localhost:%d", nameServerMocker.listenPort())};
final CommandLine commandLine =
ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs,
cmd.buildCommandlineOptions(options), new DefaultParser());
cmd.execute(commandLine, options, null);
}
|
public void measureAccountEnvelopeUuidMismatches(final Account account,
final MessageProtos.Envelope envelope) {
if (envelope.hasDestinationUuid()) {
try {
measureAccountDestinationUuidMismatches(account, ServiceIdentifier.valueOf(envelope.getDestinationUuid()));
} catch (final IllegalArgumentException ignored) {
logger.warn("Envelope had invalid destination UUID: {}", envelope.getDestinationUuid());
}
}
}
|
@Test
void measureAccountEnvelopeUuidMismatches() {
final MessageProtos.Envelope envelopeToAci = createEnvelope(new AciServiceIdentifier(aci));
messageMetrics.measureAccountEnvelopeUuidMismatches(account, envelopeToAci);
Optional<Counter> counter = findCounter(simpleMeterRegistry);
assertTrue(counter.isEmpty());
final MessageProtos.Envelope envelopeToPni = createEnvelope(new PniServiceIdentifier(pni));
messageMetrics.measureAccountEnvelopeUuidMismatches(account, envelopeToPni);
counter = findCounter(simpleMeterRegistry);
assertTrue(counter.isEmpty());
final MessageProtos.Envelope envelopeToOtherUuid = createEnvelope(new AciServiceIdentifier(otherUuid));
messageMetrics.measureAccountEnvelopeUuidMismatches(account, envelopeToOtherUuid);
counter = findCounter(simpleMeterRegistry);
assertEquals(1.0, counter.map(Counter::count).orElse(0.0));
final MessageProtos.Envelope envelopeToNull = createEnvelope(null);
messageMetrics.measureAccountEnvelopeUuidMismatches(account, envelopeToNull);
counter = findCounter(simpleMeterRegistry);
assertEquals(1.0, counter.map(Counter::count).orElse(0.0));
}
|
public void undelete() {
// make a copy because the selected trash items changes as soon as trashService.undelete is called
List<UIDeletedObject> selectedTrashFileItemsSnapshot = new ArrayList<UIDeletedObject>( selectedTrashFileItems );
if ( selectedTrashFileItemsSnapshot != null && selectedTrashFileItemsSnapshot.size() > 0 ) {
List<ObjectId> ids = new ArrayList<ObjectId>();
for ( UIDeletedObject uiObj : selectedTrashFileItemsSnapshot ) {
ids.add( uiObj.getId() );
}
try {
trashService.undelete( ids );
setTrash( trashService.getTrash() );
for ( UIDeletedObject uiObj : selectedTrashFileItemsSnapshot ) {
// find the closest UIRepositoryDirectory that is in the dirMap
RepositoryDirectoryInterface dir = repository.findDirectory( uiObj.getOriginalParentPath() );
while ( dir != null && dirMap.get( dir.getObjectId() ) == null ) {
dir = dir.getParent();
}
// now refresh that UIRepositoryDirectory so that the file/folders deck instantly refreshes on undelete
if ( dir != null ) {
dirMap.get( dir.getObjectId() ).refresh();
}
// if transformation or directory with transformations call extension to restore data services references.
if ( RepositoryObjectType.TRANSFORMATION.name().equals( uiObj.getType() ) ) {
TransMeta transMeta = repository.loadTransformation( uiObj.getId(), null );
ExtensionPointHandler
.callExtensionPoint( LogChannel.GENERAL, KettleExtensionPoint.TransAfterOpen.id, transMeta );
transMeta.clearChanged();
} else if ( !RepositoryObjectType.JOB.name().equals( uiObj.getType() ) ) {
// if not a transformation and not a job then is a Directory
RepositoryDirectoryInterface
actualDir =
repository.findDirectory(
uiObj.getOriginalParentPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + uiObj.getName() );
if ( actualDir != null ) {
List<RepositoryElementMetaInterface> transformations = new ArrayList<>();
getAllTransformations( actualDir, transformations );
for ( RepositoryElementMetaInterface repositoryElementMetaInterface : transformations ) {
TransMeta transMeta = repository.loadTransformation( repositoryElementMetaInterface.getObjectId(), null );
ExtensionPointHandler
.callExtensionPoint( LogChannel.GENERAL, KettleExtensionPoint.TransAfterOpen.id, transMeta );
transMeta.clearChanged();
}
} else {
displayExceptionMessage( BaseMessages.getString( PKG, "TrashBrowseController.UnableToRestoreDirectory",
uiObj.getOriginalParentPath() + RepositoryDirectory.DIRECTORY_SEPARATOR + uiObj.getName() ) );
}
}
}
deck.setSelectedIndex( 1 );
} catch ( Throwable th ) {
if ( mainController == null || !mainController.handleLostRepository( th ) ) {
displayExceptionMessage( BaseMessages.getString( PKG,
"TrashBrowseController.UnableToRestoreFile", th.getLocalizedMessage() ) ); //$NON-NLS-1$
}
}
} else {
// ui probably allowed the button to be enabled when it shouldn't have been enabled
throw new RuntimeException();
}
}
|
@Test
public void testExceptionNotHandle() throws Exception {
RuntimeException runtimeException = new RuntimeException( "Exception handle" );
when( selectedTrashFileItemsMock.toArray() )
.thenReturn( new TrashBrowseController.UIDeletedObject[] { uiDirectoryObjectMock } );
doThrow( runtimeException ).when( trashServiceMock ).undelete( anyList() );
doReturn( true ).when( mainControllerMock ).handleLostRepository( any( Throwable.class ) );
trBrController.undelete();
verify( messageBoxMock, never() ).setTitle( "Error" );
verify( messageBoxMock, never() ).setAcceptLabel( "OK" );
verify( messageBoxMock, never() ).setMessage( contains( "Exception handle" ) );
verify( messageBoxMock, never() ).open();
verify( deckMock, never() ).setSelectedIndex( 1 );
}
|
@Override
public void confirmLeadership(UUID leaderSessionID, String leaderAddress) {
parentService.confirmLeadership(componentId, leaderSessionID, leaderAddress);
}
|
@Test
void testLeaderConfirmation() throws Exception {
final AtomicReference<String> componentIdRef = new AtomicReference<>();
final AtomicReference<UUID> leaderSessionIDRef = new AtomicReference<>();
final AtomicReference<String> leaderAddressRef = new AtomicReference<>();
final DefaultLeaderElection.ParentService parentService =
TestingAbstractLeaderElectionService.newBuilder()
.setConfirmLeadershipConsumer(
(componentId, leaderSessionID, address) -> {
componentIdRef.set(componentId);
leaderSessionIDRef.set(leaderSessionID);
leaderAddressRef.set(address);
})
.build();
try (final DefaultLeaderElection testInstance =
new DefaultLeaderElection(parentService, DEFAULT_TEST_COMPONENT_ID)) {
final UUID expectedLeaderSessionID = UUID.randomUUID();
final String expectedAddress = "random-address";
testInstance.confirmLeadership(expectedLeaderSessionID, expectedAddress);
assertThat(componentIdRef).hasValue(DEFAULT_TEST_COMPONENT_ID);
assertThat(leaderSessionIDRef).hasValue(expectedLeaderSessionID);
assertThat(leaderAddressRef).hasValue(expectedAddress);
}
}
|
@Subscribe
public void onChatMessage(ChatMessage event)
{
if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM)
{
String message = Text.removeTags(event.getMessage());
Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message);
Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message);
Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message);
Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message);
Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message);
Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message);
Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message);
Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message);
Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message);
Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message);
Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message);
Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message);
Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message);
Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message);
Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message);
Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message);
Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message);
Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message);
if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE))
{
notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered");
}
else if (dodgyBreakMatcher.find())
{
notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust.");
updateDodgyNecklaceCharges(MAX_DODGY_CHARGES);
}
else if (dodgyCheckMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1)));
}
else if (dodgyProtectMatcher.find())
{
updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1)));
}
else if (amuletOfChemistryCheckMatcher.find())
{
updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1)));
}
else if (amuletOfChemistryUsedMatcher.find())
{
final String match = amuletOfChemistryUsedMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateAmuletOfChemistryCharges(charges);
}
else if (amuletOfChemistryBreakMatcher.find())
{
notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust.");
updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES);
}
else if (amuletOfBountyCheckMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1)));
}
else if (amuletOfBountyUsedMatcher.find())
{
updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1)));
}
else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT))
{
updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES);
}
else if (message.contains(BINDING_BREAK_TEXT))
{
notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT);
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1);
}
else if (bindingNecklaceUsedMatcher.find())
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
if (equipment.contains(ItemID.BINDING_NECKLACE))
{
updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1);
}
}
else if (bindingNecklaceCheckMatcher.find())
{
final String match = bindingNecklaceCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateBindingNecklaceCharges(charges);
}
else if (ringOfForgingCheckMatcher.find())
{
final String match = ringOfForgingCheckMatcher.group(1);
int charges = 1;
if (!match.equals("one"))
{
charges = Integer.parseInt(match);
}
updateRingOfForgingCharges(charges);
}
else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player smelted with a Ring of Forging equipped.
if (equipment == null)
{
return;
}
if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1))
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES);
updateRingOfForgingCharges(charges);
}
}
else if (message.equals(RING_OF_FORGING_BREAK_TEXT))
{
notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted.");
// This chat message triggers before the used message so add 1 to the max charges to ensure proper sync
updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1);
}
else if (chronicleAddMatcher.find())
{
final String match = chronicleAddMatcher.group(1);
if (match.equals("one"))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match));
}
}
else if (chronicleUseAndCheckMatcher.find())
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1)));
}
else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1);
}
else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0);
}
else if (message.equals(CHRONICLE_FULL_TEXT))
{
setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000);
}
else if (slaughterActivateMatcher.find())
{
final String found = slaughterActivateMatcher.group(1);
if (found == null)
{
updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT);
}
else
{
updateBraceletOfSlaughterCharges(Integer.parseInt(found));
}
}
else if (slaughterCheckMatcher.find())
{
updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1)));
}
else if (expeditiousActivateMatcher.find())
{
final String found = expeditiousActivateMatcher.group(1);
if (found == null)
{
updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES);
notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT);
}
else
{
updateExpeditiousBraceletCharges(Integer.parseInt(found));
}
}
else if (expeditiousCheckMatcher.find())
{
updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1)));
}
else if (bloodEssenceCheckMatcher.find())
{
updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1)));
}
else if (bloodEssenceExtractMatcher.find())
{
updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1)));
}
else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT))
{
updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES);
}
else if (braceletOfClayCheckMatcher.find())
{
updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1)));
}
else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN))
{
final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT);
// Determine if the player mined with a Bracelet of Clay equipped.
if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY))
{
final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY);
// Charge is not used if only 1 inventory slot is available when mining in Prifddinas
boolean ignore = inventory != null
&& inventory.count() == 27
&& message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN);
if (!ignore)
{
int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES);
updateBraceletOfClayCharges(charges);
}
}
}
else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT))
{
notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust");
updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES);
}
}
}
|
@Test
public void testChemistryUsed3()
{
ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", USED_AMULET_OF_CHEMISTRY_3_DOSES, "", 0);
itemChargePlugin.onChatMessage(chatMessage);
verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_AMULET_OF_CHEMISTRY, 2);
}
|
public static VersionSet parse(ImmutableList<String> versionAndRangesList) {
checkNotNull(versionAndRangesList);
checkArgument(!versionAndRangesList.isEmpty(), "Versions and ranges list cannot be empty.");
VersionSet.Builder versionSetBuilder = VersionSet.builder();
for (String versionOrRangeString : versionAndRangesList) {
if (isDiscreteVersion(versionOrRangeString)) {
versionSetBuilder.addVersion(Version.fromString(versionOrRangeString));
} else if (VersionRange.isValidVersionRange(versionOrRangeString)) {
versionSetBuilder.addVersionRange(VersionRange.parse(versionOrRangeString));
} else {
throw new IllegalArgumentException(
String.format(
"String '%s' is neither a discrete string nor a version range.",
versionOrRangeString));
}
}
return versionSetBuilder.build();
}
|
@Test
public void parse_withEmptyInputList_throwsIllegalArgumentException() {
IllegalArgumentException exception =
assertThrows(IllegalArgumentException.class, () -> VersionSet.parse(ImmutableList.of()));
assertThat(exception).hasMessageThat().isEqualTo("Versions and ranges list cannot be empty.");
}
|
public static void setupClientId(Configuration conf) {
if (OptionsResolver.isMultiWriter(conf)) {
// explicit client id always has higher priority
if (!conf.contains(FlinkOptions.WRITE_CLIENT_ID)) {
try (ClientIds clientIds = ClientIds.builder().conf(conf).build()) {
String clientId = clientIds.nextId(conf);
conf.setString(FlinkOptions.WRITE_CLIENT_ID, clientId);
}
}
}
}
|
@Test
void testSetupClientId() throws Exception {
Configuration conf = getConf();
conf.setString(FlinkOptions.WRITE_CLIENT_ID, "2");
OptionsInference.setupClientId(conf);
assertThat("Explicit client id has higher priority",
conf.getString(FlinkOptions.WRITE_CLIENT_ID), is("2"));
for (int i = 0; i < 3; i++) {
conf = getConf();
try (ClientIds clientIds = ClientIds.builder().conf(conf).build()) {
OptionsInference.setupClientId(conf);
String expectedId = i == 0 ? ClientIds.INIT_CLIENT_ID : i + "";
assertThat("The client id should auto inc to " + expectedId,
conf.getString(FlinkOptions.WRITE_CLIENT_ID), is(expectedId));
}
}
// sleep 1 second to simulate a zombie heartbeat
Thread.sleep(1000);
conf = getConf();
try (ClientIds clientIds = ClientIds.builder()
.conf(conf)
.heartbeatIntervalInMs(10) // max 10 milliseconds tolerable heartbeat timeout
.numTolerableHeartbeatMisses(1). build()) {
String nextId = clientIds.nextId(conf);
assertThat("The inactive client id should be reused",
nextId, is(""));
}
}
|
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) {
final String storeName = storeQueryParameters.storeName();
final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType();
final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType);
if (!globalStore.isEmpty()) {
return queryableStoreType.create(globalStoreProvider, storeName);
}
return queryableStoreType.create(
new WrappingStoreProvider(storeProviders.values(), storeQueryParameters),
storeName
);
}
|
@Test
public void shouldReturnWindowStoreWhenItExists() {
assertNotNull(storeProvider.getStore(StoreQueryParameters.fromNameAndType(windowStore, QueryableStoreTypes.windowStore())));
}
|
@Override
public int getOrder() {
return PluginEnum.URI.getCode();
}
|
@Test
public void testGetOrder() {
assertEquals(uriPlugin.getOrder(), PluginEnum.URI.getCode());
}
|
public static Configuration loadConfiguration(
String workingDirectory, Configuration dynamicParameters, Map<String, String> env) {
final Configuration configuration =
GlobalConfiguration.loadConfiguration(workingDirectory, dynamicParameters);
final String keytabPrincipal = env.get(YarnConfigKeys.KEYTAB_PRINCIPAL);
final String hostname = env.get(ApplicationConstants.Environment.NM_HOST.key());
Preconditions.checkState(
hostname != null,
"ApplicationMaster hostname variable %s not set",
ApplicationConstants.Environment.NM_HOST.key());
configuration.set(JobManagerOptions.ADDRESS, hostname);
configuration.set(RestOptions.ADDRESS, hostname);
configuration.set(RestOptions.BIND_ADDRESS, hostname);
// if a web monitor shall be started, set the port to random binding
if (configuration.get(WebOptions.PORT, 0) >= 0) {
configuration.set(WebOptions.PORT, 0);
}
if (!configuration.contains(RestOptions.BIND_PORT)) {
// set the REST port to 0 to select it randomly
configuration.set(RestOptions.BIND_PORT, "0");
}
// if the user has set the deprecated YARN-specific config keys, we add the
// corresponding generic config keys instead. that way, later code needs not
// deal with deprecated config keys
BootstrapTools.substituteDeprecatedConfigPrefix(
configuration,
ConfigConstants.YARN_APPLICATION_MASTER_ENV_PREFIX,
ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX);
BootstrapTools.substituteDeprecatedConfigPrefix(
configuration,
ConfigConstants.YARN_TASK_MANAGER_ENV_PREFIX,
ResourceManagerOptions.CONTAINERIZED_TASK_MANAGER_ENV_PREFIX);
final String keytabPath =
Utils.resolveKeytabPath(
workingDirectory, env.get(YarnConfigKeys.LOCAL_KEYTAB_PATH));
if (keytabPath != null && keytabPrincipal != null) {
configuration.set(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath);
configuration.set(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, keytabPrincipal);
}
final String localDirs = env.get(ApplicationConstants.Environment.LOCAL_DIRS.key());
BootstrapTools.updateTmpDirectoriesInConfiguration(configuration, localDirs);
return configuration;
}
|
@Test
void testRestPortOptionsUnspecified() throws IOException {
final Configuration initialConfiguration = new Configuration();
final Configuration configuration = loadConfiguration(initialConfiguration);
// having not specified the ports should set the rest bind port to 0
assertThat(configuration.get(RestOptions.BIND_PORT)).isEqualTo("0");
}
|
@Override
public boolean rename(final Path src, final Path dst) throws IOException {
// note fullPath will check that paths are relative to this FileSystem.
// Hence both are in same file system and a rename is valid
return super.rename(fullPath(src), fullPath(dst));
}
|
@Test
public void testRename() throws IOException {
// Rename a file
fileSystemTestHelper.createFile(fSys, "/newDir/foo");
fSys.rename(new Path("/newDir/foo"), new Path("/newDir/fooBar"));
Assert.assertFalse(fSys.exists(new Path("/newDir/foo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/foo")));
Assert.assertTrue(fSys.isFile(fileSystemTestHelper.getTestRootPath(fSys,"/newDir/fooBar")));
Assert.assertTrue(fSysTarget.isFile(new Path(chrootedTo,"newDir/fooBar")));
// Rename a dir
fSys.mkdirs(new Path("/newDir/dirFoo"));
fSys.rename(new Path("/newDir/dirFoo"), new Path("/newDir/dirFooBar"));
Assert.assertFalse(fSys.exists(new Path("/newDir/dirFoo")));
Assert.assertFalse(fSysTarget.exists(new Path(chrootedTo,"newDir/dirFoo")));
Assert.assertTrue(fSys.isDirectory(fileSystemTestHelper.getTestRootPath(fSys,"/newDir/dirFooBar")));
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar")));
}
|
protected static LinkedHashMap<String, KiePMMLProbabilityConfidence> getProbabilityConfidenceMap(final List<KiePMMLScoreDistribution> kiePMMLScoreDistributions,
final double missingValuePenalty) {
return (kiePMMLScoreDistributions != null && !kiePMMLScoreDistributions.isEmpty()) ? evaluateProbabilityConfidenceMap(kiePMMLScoreDistributions, missingValuePenalty) : new LinkedHashMap<>();
}
|
@Test
void getProbabilityConfidenceMap() {
LinkedHashMap<String, KiePMMLProbabilityConfidence> retrieved = KiePMMLNode.getProbabilityConfidenceMap(null, 1.0);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isEmpty();
retrieved = KiePMMLNode.getProbabilityConfidenceMap(Collections.emptyList(), 1.0);
assertThat(retrieved).isNotNull();
assertThat(retrieved).isEmpty();
List<KiePMMLScoreDistribution> kiePMMLScoreDistributions = getRandomKiePMMLScoreDistributions(false);
retrieved = KiePMMLNode.getProbabilityConfidenceMap(kiePMMLScoreDistributions, 1.0);
assertThat(retrieved).isNotNull();
assertThat(retrieved).hasSameSizeAs(kiePMMLScoreDistributions);
}
|
@Override
public boolean onTouchEvent(@NonNull MotionEvent nativeMotionEvent) {
if (mKeyboard == null) {
// I mean, if there isn't any keyboard I'm handling, what's the point?
return false;
}
final int action = nativeMotionEvent.getActionMasked();
final int pointerCount = nativeMotionEvent.getPointerCount();
if (pointerCount > 1) {
mLastTimeHadTwoFingers =
SystemClock.elapsedRealtime(); // marking the time. Read isAtTwoFingersState()
}
if (mTouchesAreDisabledTillLastFingerIsUp) {
if (!areTouchesDisabled(nativeMotionEvent) /*this means it was just reset*/) {
mTouchesAreDisabledTillLastFingerIsUp = false;
// continue with onTouchEvent flow.
if (action != MotionEvent.ACTION_DOWN) {
// swallowing the event.
// in case this is a DOWN event, we do want to pass it
return true;
}
} else {
// swallowing touch event until we reset mTouchesAreDisabledTillLastFingerIsUp
return true;
}
}
final long eventTime = nativeMotionEvent.getEventTime();
final int index = nativeMotionEvent.getActionIndex();
final int id = nativeMotionEvent.getPointerId(index);
final int x = (int) nativeMotionEvent.getX(index);
final int y = (int) nativeMotionEvent.getY(index);
if (mKeyPressTimingHandler.isInKeyRepeat()) {
// It will keep being in the key repeating mode while the key is
// being pressed.
if (action == MotionEvent.ACTION_MOVE) {
return true;
}
final PointerTracker tracker = getPointerTracker(id);
// Key repeating timer will be canceled if 2 or more keys are in
// action, and current
// event (UP or DOWN) is non-modifier key.
if (pointerCount > 1 && !tracker.isModifier()) {
mKeyPressTimingHandler.cancelKeyRepeatTimer();
}
// Up event will pass through.
}
if (action == MotionEvent.ACTION_MOVE) {
for (int i = 0; i < pointerCount; i++) {
PointerTracker tracker = getPointerTracker(nativeMotionEvent.getPointerId(i));
tracker.onMoveEvent(
(int) nativeMotionEvent.getX(i), (int) nativeMotionEvent.getY(i), eventTime);
}
} else {
PointerTracker tracker = getPointerTracker(id);
sendOnXEvent(action, eventTime, x, y, tracker);
}
return true;
}
|
@Test
public void testWithLongPressOutputLongPressKeyPressState() {
final AnyKeyboard.AnyKey key = findKey('f');
key.longPressCode = 'z';
KeyDrawableStateProvider provider =
new KeyDrawableStateProvider(
R.attr.key_type_function,
R.attr.key_type_action,
R.attr.action_done,
R.attr.action_search,
R.attr.action_go);
Assert.assertArrayEquals(provider.KEY_STATE_NORMAL, key.getCurrentDrawableState(provider));
Point keyPoint = ViewTestUtils.getKeyCenterPoint(key);
ViewTestUtils.navigateFromTo(mUnderTest, keyPoint, keyPoint, 80, true, false);
Assert.assertArrayEquals(provider.KEY_STATE_PRESSED, key.getCurrentDrawableState(provider));
ViewTestUtils.navigateFromTo(mUnderTest, keyPoint, keyPoint, 300, false, false);
TestRxSchedulers.foregroundAdvanceBy(1);
Assert.assertArrayEquals(provider.KEY_STATE_NORMAL, key.getCurrentDrawableState(provider));
mUnderTest.onTouchEvent(
MotionEvent.obtain(
SystemClock.uptimeMillis(),
SystemClock.uptimeMillis(),
MotionEvent.ACTION_UP,
keyPoint.x,
keyPoint.y,
0));
Assert.assertArrayEquals(provider.KEY_STATE_NORMAL, key.getCurrentDrawableState(provider));
}
|
public static Read<JmsRecord> read() {
return new AutoValue_JmsIO_Read.Builder<JmsRecord>()
.setMaxNumRecords(Long.MAX_VALUE)
.setCoder(SerializableCoder.of(JmsRecord.class))
.setCloseTimeout(DEFAULT_CLOSE_TIMEOUT)
.setRequiresDeduping(false)
.setMessageMapper(
new MessageMapper<JmsRecord>() {
@Override
public JmsRecord mapMessage(Message message) throws Exception {
TextMessage textMessage = (TextMessage) message;
Map<String, Object> properties = new HashMap<>();
@SuppressWarnings("rawtypes")
Enumeration propertyNames = textMessage.getPropertyNames();
while (propertyNames.hasMoreElements()) {
String propertyName = (String) propertyNames.nextElement();
properties.put(propertyName, textMessage.getObjectProperty(propertyName));
}
return new JmsRecord(
textMessage.getJMSMessageID(),
textMessage.getJMSTimestamp(),
textMessage.getJMSCorrelationID(),
textMessage.getJMSReplyTo(),
textMessage.getJMSDestination(),
textMessage.getJMSDeliveryMode(),
textMessage.getJMSRedelivered(),
textMessage.getJMSType(),
textMessage.getJMSExpiration(),
textMessage.getJMSPriority(),
properties,
textMessage.getText());
}
})
.build();
}
|
@Test
public void testAuthenticationRequired() {
pipeline.apply(JmsIO.read().withConnectionFactory(connectionFactory).withQueue(QUEUE));
String errorMessage =
this.connectionFactoryClass == ActiveMQConnectionFactory.class
? "User name [null] or password is invalid."
: "Client failed to authenticate using SASL: ANONYMOUS";
runPipelineExpectingJmsConnectException(errorMessage);
}
|
@Override
public EncryptRule build(final EncryptRuleConfiguration ruleConfig, final String databaseName, final DatabaseType protocolType,
final ResourceMetaData resourceMetaData, final Collection<ShardingSphereRule> builtRules, final ComputeNodeInstanceContext computeNodeInstanceContext) {
return new EncryptRule(databaseName, ruleConfig);
}
|
@SuppressWarnings({"rawtypes", "unchecked"})
@Test
void assertBuild() {
EncryptRuleConfiguration ruleConfig = mock(EncryptRuleConfiguration.class);
DatabaseRuleBuilder builder = OrderedSPILoader.getServices(DatabaseRuleBuilder.class, Collections.singleton(ruleConfig)).get(ruleConfig);
assertThat(builder.build(ruleConfig, "", new MockedDatabaseType(), mock(ResourceMetaData.class), Collections.emptyList(), mock(ComputeNodeInstanceContext.class)),
instanceOf(EncryptRule.class));
}
|
public static String unescape(String uri) {
return uri.replace("%2F", "/").replace("%2E", ".").replace("%25", "%");
}
|
@Test
public void testUnescape() {
AlluxioURI localUri1 = new AlluxioURI("/foo/alluxio/underFSStorage");
String localUriEscaped1 = MetricsSystem.escape(localUri1);
String localUriUnescaped1 = MetricsSystem.unescape(localUriEscaped1);
assertEquals(localUri1.toString(), localUriUnescaped1);
AlluxioURI localUri2 = new AlluxioURI("/.alluxio.wololo/alluxio/underFSStorage");
String localUriEscaped2 = MetricsSystem.escape(localUri2);
String localUriUnescaped2 = MetricsSystem.unescape(localUriEscaped2);
assertEquals(localUri2.toString(), localUriUnescaped2);
AlluxioURI localUri3 = new AlluxioURI("/%25alluxio%20user%2Ffoo%2Ebar/alluxio/underFSStorage");
String localUriEscaped3 = MetricsSystem.escape(localUri3);
String localUriUnescaped3 = MetricsSystem.unescape(localUriEscaped3);
assertEquals(localUri3.toString(), localUriUnescaped3);
AlluxioURI localUri4 = new AlluxioURI("s3a://test/Tasks+Export+%282017–11–05+06%3A10+PM%2Ecsv");
String localUriEscaped4 = MetricsSystem.escape(localUri4);
String localUriUnescaped4 = MetricsSystem.unescape(localUriEscaped4);
assertEquals(localUri4.toString(), localUriUnescaped4);
}
|
public static KubernetesJobManagerSpecification buildKubernetesJobManagerSpecification(
FlinkPod podTemplate, KubernetesJobManagerParameters kubernetesJobManagerParameters)
throws IOException {
FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy();
List<HasMetadata> accompanyingResources = new ArrayList<>();
final List<KubernetesStepDecorator> stepDecorators =
new ArrayList<>(
Arrays.asList(
new InitJobManagerDecorator(kubernetesJobManagerParameters),
new EnvSecretsDecorator(kubernetesJobManagerParameters),
new MountSecretsDecorator(kubernetesJobManagerParameters),
new CmdJobManagerDecorator(kubernetesJobManagerParameters),
new InternalServiceDecorator(kubernetesJobManagerParameters),
new ExternalServiceDecorator(kubernetesJobManagerParameters)));
Configuration configuration = kubernetesJobManagerParameters.getFlinkConfiguration();
if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new HadoopConfMountDecorator(kubernetesJobManagerParameters));
}
if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) {
stepDecorators.add(new KerberosMountDecorator(kubernetesJobManagerParameters));
}
stepDecorators.addAll(
Arrays.asList(
new FlinkConfMountDecorator(kubernetesJobManagerParameters),
new PodTemplateMountDecorator(kubernetesJobManagerParameters)));
for (KubernetesStepDecorator stepDecorator : stepDecorators) {
flinkPod = stepDecorator.decorateFlinkPod(flinkPod);
accompanyingResources.addAll(stepDecorator.buildAccompanyingKubernetesResources());
}
final Deployment deployment =
createJobManagerDeployment(flinkPod, kubernetesJobManagerParameters);
return new KubernetesJobManagerSpecification(deployment, accompanyingResources);
}
|
@Test
void testDeploymentMetadata() throws IOException {
kubernetesJobManagerSpecification =
KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(
flinkPod, kubernetesJobManagerParameters);
final Deployment resultDeployment = this.kubernetesJobManagerSpecification.getDeployment();
assertThat(resultDeployment.getApiVersion()).isEqualTo(Constants.APPS_API_VERSION);
assertThat(resultDeployment.getMetadata().getName())
.isEqualTo(KubernetesUtils.getDeploymentName(CLUSTER_ID));
final Map<String, String> expectedLabels = getCommonLabels();
expectedLabels.put(Constants.LABEL_COMPONENT_KEY, Constants.LABEL_COMPONENT_JOB_MANAGER);
expectedLabels.putAll(userLabels);
assertThat(resultDeployment.getMetadata().getLabels()).isEqualTo(expectedLabels);
assertThat(resultDeployment.getMetadata().getAnnotations()).isEqualTo(userAnnotations);
assertThat(resultDeployment.getMetadata().getOwnerReferences())
.contains(OWNER_REFERENCES.toArray(new OwnerReference[0]));
}
|
@Udf(description = "Returns the base 10 logarithm of an INT value.")
public Double log(
@UdfParameter(
value = "value",
description = "the value get the base 10 logarithm of."
) final Integer value
) {
return log(value == null ? null : value.doubleValue());
}
|
@Test
public void shouldHandlePositiveValueAndBase() {
assertThat(udf.log(1), closeTo(0.0, 0.000000000000001));
assertThat(udf.log(1L), closeTo(0.0, 0.000000000000001));
assertThat(udf.log(1.0), closeTo(0.0, 0.000000000000001));
assertThat(udf.log(13), closeTo(2.5649493574615367, 0.000000000000001));
assertThat(udf.log(13L), closeTo(2.5649493574615367, 0.000000000000001));
assertThat(udf.log(13.0), closeTo(2.5649493574615367, 0.000000000000001));
assertThat(udf.log(1), closeTo(0.0, 0.000000000000001));
assertThat(udf.log(1L), closeTo(0.0, 0.000000000000001));
assertThat(udf.log(1.0), closeTo(0.0, 0.000000000000001));
assertThat(udf.log(15, 13), closeTo(0.9471572411831843, 0.000000000000001));
assertThat(udf.log(15L, 13L), closeTo(0.9471572411831843, 0.000000000000001));
assertThat(udf.log(15.0, 13.0), closeTo(0.9471572411831843, 0.000000000000001));
assertThat(udf.log(Double.MIN_VALUE, 13.0), closeTo(-0.003445474597896734, 0.000000000000001));
assertThat(udf.log(Double.MAX_VALUE, 13.0), closeTo(0.0036137106622471603, 0.000000000000001));
}
|
@Override
protected Byte parseSerializeType(String serialization) {
Byte serializeType;
if (SERIALIZE_HESSIAN.equals(serialization)
|| SERIALIZE_HESSIAN2.equals(serialization)) {
serializeType = RemotingConstants.SERIALIZE_CODE_HESSIAN;
} else if (SERIALIZE_PROTOBUF.equals(serialization)) {
serializeType = RemotingConstants.SERIALIZE_CODE_PROTOBUF;
} else if (SERIALIZE_JAVA.equals(serialization)) {
serializeType = RemotingConstants.SERIALIZE_CODE_JAVA;
} else {
serializeType = super.parseSerializeType(serialization);
}
return serializeType;
}
|
@Test
public void testParseSerializeType() throws Exception {
ConsumerConfig consumerConfig = new ConsumerConfig().setProtocol("bolt");
ConsumerBootstrap bootstrap = Bootstraps.from(consumerConfig);
BoltClientProxyInvoker invoker = new BoltClientProxyInvoker(bootstrap);
byte actual = invoker.parseSerializeType(RpcConstants.SERIALIZE_HESSIAN2);
assertEquals(RemotingConstants.SERIALIZE_CODE_HESSIAN, actual);
}
|
public SecApduResponse generateSecureAPDUsRestService(SecApduRequest request, String clientIp) {
SecApduResponse response = new SecApduResponse();
EidSession session = initSession(request, clientIp, response);
if (session == null) return response;
// 1a. check that the content of ef.cardsecurity is authentic with the PA
// (passive authentication) is correct...
final PcaSecurityInfos efCardSecurity = mapper.read(cmsVerifier.verifyMessage(
ContentInfo.getInstance(request.getEfCardSecurity()), "0.4.0.127.0.7.3.2.1"), PcaSecurityInfos.class
);
CardValidations.validateCardSecurityVsCardAccess(efCardSecurity, session.getKeyReference(),
session.getPaceVersion(), session.getTaVersion());
// 2. generate 2 key pairs (known as Kenc(message encryption) and Kmac (message
// authentication) based on the public key created in step1 (on session)
// the nonce(rpicc) we received and the ca public key we got received
// calculate the secret key
byte[] rpicc = request.getRpicc();
KeyUtils.generateSecretKeys(session, efCardSecurity.getEcPublicKey().getPublicParameters(), rpicc);
// do the tpicc check with the tpicc from the request and the terminal token
CardValidations.validateTerminalTokenVsTpicc(session, request.getTpicc());
// PMA=polymorphic authenticate, GA= general authenticate
// we need 3 apdus
response.setApdus(new ApduService(session).createSecureApdusForRDW(
session.getUserConsentType().getTag80(), request.getPcaApplicationId())
);
// Result OK
sessionRepo.save(session);
return response;
}
|
@Test
public void generateSecureAPDUsRestServiceTest() {
EidSession session = new EidSession();
session.setAtReference("SSSSSSSSSSSSSSSS");
session.setEphemeralKey(ephemeralKey);
session.setKeyReference(2);
session.setTaVersion(2);
session.setPaceVersion(2);
session.setUserConsentType(PolymorphType.PIP);
session.setIdpicc(ByteArray.fromBase64("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"));
SecApduRequest request = new SecApduRequest();
request.setHeader(createRequestHeader());
request.setEfCardSecurity(Base64.decode("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"));
request.setPcaApplicationId(Base64.decode("SSSSSSSSSSSS"));
request.setRpicc(Base64.decode("SSSSSSSSSSSS"));
request.setTpicc(Base64.decode("SSSSSSSSSSSS"));
byte[] signature = "signature".getBytes();
doReturn(session).when(rdwService).initSession(any(AppRequest.class), eq(localhost), any(AppResponse.class));
when(signatureService.sign(any(byte[].class), eq("SSSSSSSSSSSSSSSS"), eq(false))).thenReturn(signature);
SecApduResponse result = rdwService.generateSecureAPDUsRestService(request, localhost);
assertEquals("OK", result.getStatus());
assertEquals(3, result.getApdus().size());
}
|
public Unmarshaller createUnmarshaller(Class<?> clazz) throws JAXBException {
Unmarshaller unmarshaller = getContext(clazz).createUnmarshaller();
if (unmarshallerEventHandler != null) {
unmarshaller.setEventHandler(unmarshallerEventHandler);
}
unmarshaller.setSchema(unmashallerSchema);
return unmarshaller;
}
|
@Test
void buildsUnmarshallerWithCustomEventHandler() throws Exception {
ValidationEventHandler handler = event -> false;
JAXBContextFactory factory =
new JAXBContextFactory.Builder().withUnmarshallerEventHandler(handler).build();
Unmarshaller unmarshaller = factory.createUnmarshaller(Object.class);
assertThat(unmarshaller.getEventHandler()).isSameAs(handler);
}
|
@Override
public Graph<EntityDescriptor> resolveNativeEntity(EntityDescriptor entityDescriptor) {
final MutableGraph<EntityDescriptor> mutableGraph = GraphBuilder.directed().build();
mutableGraph.addNode(entityDescriptor);
final ModelId modelId = entityDescriptor.id();
try {
final PipelineDao pipelineDao = pipelineService.load(modelId.id());
final String pipelineSource = pipelineDao.source();
final Collection<String> referencedRules = referencedRules(pipelineSource);
referencedRules.stream()
.map(ModelId::of)
.map(id -> EntityDescriptor.create(id, ModelTypes.PIPELINE_RULE_V1))
.forEach(rule -> mutableGraph.putEdge(entityDescriptor, rule));
final Set<PipelineConnections> pipelineConnections = connectionsService.loadByPipelineId(pipelineDao.id());
pipelineConnections.stream()
.map(PipelineConnections::streamId)
.map(ModelId::of)
.map(id -> EntityDescriptor.create(id, ModelTypes.STREAM_REF_V1))
.forEach(stream -> mutableGraph.putEdge(entityDescriptor, stream));
} catch (NotFoundException e) {
LOG.debug("Couldn't find pipeline {}", entityDescriptor, e);
}
return ImmutableGraph.copyOf(mutableGraph);
}
|
@Test
@MongoDBFixtures("PipelineFacadeTest/pipelines.json")
public void resolve() {
final Stage stage = Stage.builder()
.stage(0)
.match(Stage.Match.EITHER)
.ruleReferences(ImmutableList.of("debug", "no-op"))
.build();
RuleDao ruleDao1 = RuleDao.builder()
.id("2342353045938450345")
.title("debug")
.source("rule \\\"debug\\\"\\nrule \\\"no-op\\\"\\nend\"")
.build();
org.graylog.plugins.pipelineprocessor.ast.Rule rule1 = org.graylog.plugins.pipelineprocessor.ast.Rule.builder()
.id("1")
.name("debug")
.when(mock(LogicalExpression.class))
.then(Collections.emptyList())
.build();
RuleDao ruleDao2 = RuleDao.builder()
.id("2342353045938450346")
.title("no-op")
.source("rule \\\"debug\\\"\\nrule \\\"no-op\\\"\\nend\"")
.build();
org.graylog.plugins.pipelineprocessor.ast.Rule rule2 = org.graylog.plugins.pipelineprocessor.ast.Rule.builder()
.id("2")
.name("no-op")
.when(mock(LogicalExpression.class))
.then(Collections.emptyList())
.build();
stage.setRules(ImmutableList.of(rule1, rule2));
final Pipeline pipeline = Pipeline.builder()
.id("5a85c4854b900afd5d662be3")
.name("Test")
.stages(ImmutableSortedSet.of(stage))
.build();
when(pipelineRuleParser.parsePipeline(eq("dummy"), anyString())).thenReturn(pipeline);
when(ruleService.findByName("no-op")).thenReturn(Optional.of(ruleDao1));
when(ruleService.findByName("debug")).thenReturn(Optional.of(ruleDao2));
final EntityDescriptor pipelineEntity = EntityDescriptor.create("5a85c4854b900afd5d662be3", ModelTypes.PIPELINE_V1);
final Graph<EntityDescriptor> graph = facade.resolveNativeEntity(pipelineEntity);
final EntityDescriptor streamEntity = EntityDescriptor.create("5adf23894b900a0fdb4e517d", ModelTypes.STREAM_REF_V1);
final EntityDescriptor ruleEntity1 = EntityDescriptor.create("2342353045938450345", ModelTypes.PIPELINE_RULE_V1);
final EntityDescriptor ruleEntity2 = EntityDescriptor.create("2342353045938450346", ModelTypes.PIPELINE_RULE_V1);
assertThat(graph.nodes())
.containsOnly(pipelineEntity, streamEntity, ruleEntity1, ruleEntity2);
}
|
public static List<DiskRange> mergeAdjacentDiskRanges(Collection<DiskRange> diskRanges, DataSize maxMergeDistance, DataSize maxReadSize)
{
// sort ranges by start offset
List<DiskRange> ranges = new ArrayList<>(diskRanges);
Collections.sort(ranges, new Comparator<DiskRange>()
{
@Override
public int compare(DiskRange o1, DiskRange o2)
{
return Long.compare(o1.getOffset(), o2.getOffset());
}
});
// merge overlapping ranges
long maxReadSizeBytes = maxReadSize.toBytes();
long maxMergeDistanceBytes = maxMergeDistance.toBytes();
ImmutableList.Builder<DiskRange> result = ImmutableList.builder();
DiskRange last = ranges.get(0);
for (int i = 1; i < ranges.size(); i++) {
DiskRange current = ranges.get(i);
DiskRange merged = last.span(current);
if (merged.getLength() <= maxReadSizeBytes && last.getEnd() + maxMergeDistanceBytes >= current.getOffset()) {
last = merged;
}
else {
result.add(last);
last = current;
}
}
result.add(last);
return result.build();
}
|
@Test
public void testMergeGap()
{
List<DiskRange> consistent10ByteGap = ImmutableList.of(new DiskRange(100, 90), new DiskRange(200, 90), new DiskRange(300, 90));
assertEquals(mergeAdjacentDiskRanges(consistent10ByteGap, new DataSize(0, BYTE), new DataSize(1, GIGABYTE)), consistent10ByteGap);
assertEquals(mergeAdjacentDiskRanges(consistent10ByteGap, new DataSize(9, BYTE), new DataSize(1, GIGABYTE)), consistent10ByteGap);
assertEquals(mergeAdjacentDiskRanges(consistent10ByteGap, new DataSize(10, BYTE), new DataSize(1, GIGABYTE)), ImmutableList.of(new DiskRange(100, 290)));
assertEquals(mergeAdjacentDiskRanges(consistent10ByteGap, new DataSize(100, BYTE), new DataSize(1, GIGABYTE)), ImmutableList.of(new DiskRange(100, 290)));
List<DiskRange> middle10ByteGap = ImmutableList.of(new DiskRange(100, 80), new DiskRange(200, 90), new DiskRange(300, 80), new DiskRange(400, 90));
assertEquals(mergeAdjacentDiskRanges(middle10ByteGap, new DataSize(0, BYTE), new DataSize(1, GIGABYTE)), middle10ByteGap);
assertEquals(mergeAdjacentDiskRanges(middle10ByteGap, new DataSize(9, BYTE), new DataSize(1, GIGABYTE)), middle10ByteGap);
assertEquals(mergeAdjacentDiskRanges(middle10ByteGap, new DataSize(10, BYTE), new DataSize(1, GIGABYTE)),
ImmutableList.of(new DiskRange(100, 80), new DiskRange(200, 180), new DiskRange(400, 90)));
assertEquals(mergeAdjacentDiskRanges(middle10ByteGap, new DataSize(100, BYTE), new DataSize(1, GIGABYTE)), ImmutableList.of(new DiskRange(100, 390)));
}
|
public static <
EventTypeT,
EventKeyTypeT,
ResultTypeT,
StateTypeT extends MutableState<EventTypeT, ResultTypeT>>
OrderedEventProcessor<EventTypeT, EventKeyTypeT, ResultTypeT, StateTypeT> create(
OrderedProcessingHandler<EventTypeT, EventKeyTypeT, StateTypeT, ResultTypeT> handler) {
return new AutoValue_OrderedEventProcessor<>(handler);
}
|
@Test
public void testPerfectOrderingProcessing() throws CannotProvideCoderException {
Event[] events = {
Event.create(0, "id-1", "a"),
Event.create(1, "id-1", "b"),
Event.create(2, "id-1", "c"),
Event.create(3, "id-1", "d"),
Event.create(0, "id-2", "a"),
Event.create(1, "id-2", "b")
};
Collection<KV<String, OrderedProcessingStatus>> expectedStatuses = new ArrayList<>();
expectedStatuses.add(
KV.of(
"id-1",
OrderedProcessingStatus.create(
3L,
0,
null,
null,
4,
Arrays.stream(events).filter(e -> e.getKey().equals("id-1")).count(),
0,
false)));
expectedStatuses.add(
KV.of(
"id-2",
OrderedProcessingStatus.create(
1L,
0,
null,
null,
2,
Arrays.stream(events).filter(e -> e.getKey().equals("id-2")).count(),
0,
false)));
Collection<KV<String, String>> expectedOutput = new ArrayList<>();
expectedOutput.add(KV.of("id-1", "a"));
expectedOutput.add(KV.of("id-1", "ab"));
expectedOutput.add(KV.of("id-1", "abc"));
expectedOutput.add(KV.of("id-1", "abcd"));
expectedOutput.add(KV.of("id-2", "a"));
expectedOutput.add(KV.of("id-2", "ab"));
testProcessing(
events,
expectedStatuses,
expectedOutput,
EMISSION_FREQUENCY_ON_EVERY_ELEMENT,
INITIAL_SEQUENCE_OF_0,
LARGE_MAX_RESULTS_PER_OUTPUT,
DONT_PRODUCE_STATUS_ON_EVERY_EVENT);
}
|
protected boolean tryProcess2(@Nonnull Object item) throws Exception {
return tryProcess(2, item);
}
|
@Test
public void when_tryProcess2_then_delegatesToTryProcess() throws Exception {
// When
boolean done = p.tryProcess2(MOCK_ITEM);
// Then
assertTrue(done);
p.validateReceptionOfItem(ORDINAL_2, MOCK_ITEM);
}
|
public <T extends BuildableManifestTemplate> ManifestTemplate getManifestListTemplate(
Class<T> manifestTemplateClass) throws IOException {
Preconditions.checkArgument(
manifestTemplateClass == V22ManifestTemplate.class,
"Build an OCI image index is not yet supported");
Preconditions.checkState(!images.isEmpty(), "no images given");
V22ManifestListTemplate manifestList = new V22ManifestListTemplate();
for (Image image : images) {
ImageToJsonTranslator imageTranslator = new ImageToJsonTranslator(image);
BlobDescriptor configDescriptor =
Digests.computeDigest(imageTranslator.getContainerConfiguration());
BuildableManifestTemplate manifestTemplate =
imageTranslator.getManifestTemplate(manifestTemplateClass, configDescriptor);
BlobDescriptor manifestDescriptor = Digests.computeDigest(manifestTemplate);
ManifestDescriptorTemplate manifest = new ManifestDescriptorTemplate();
manifest.setMediaType(manifestTemplate.getManifestMediaType());
manifest.setSize(manifestDescriptor.getSize());
manifest.setDigest(manifestDescriptor.getDigest().toString());
manifest.setPlatform(image.getArchitecture(), image.getOs());
manifestList.addManifest(manifest);
}
return manifestList;
}
|
@Test
public void testGetManifestListTemplate() throws IOException {
// Expected Manifest List JSON
// {
// "schemaVersion":2,
// "mediaType":"application/vnd.docker.distribution.manifest.list.v2+json",
// "manifests":[
// {
// "mediaType":"application/vnd.docker.distribution.manifest.v2+json",
// "digest":"sha256:1f25787aab4669d252bdae09a72b9c345d2a7b8c64c8dbfba4c82af4834dbccc",
// "size":264,
// "platform":{
// "architecture":"amd64",
// "os":"linux"
// }
// },
// {
// "mediaType":"application/vnd.docker.distribution.manifest.v2+json",
// "digest":"sha256:51038a7a91c0e8f747e05dd84c3b0393a7016ec312ce384fc945356778497ae3",
// "size":264,
// "platform":{
// "architecture":"arm64",
// "os":"windows"
// }
// }
// ]
// }
ManifestTemplate manifestTemplate =
manifestListGenerator.getManifestListTemplate(V22ManifestTemplate.class);
Assert.assertTrue(manifestTemplate instanceof V22ManifestListTemplate);
V22ManifestListTemplate manifestList = (V22ManifestListTemplate) manifestTemplate;
Assert.assertEquals(2, manifestList.getSchemaVersion());
Assert.assertEquals(
Arrays.asList("sha256:1f25787aab4669d252bdae09a72b9c345d2a7b8c64c8dbfba4c82af4834dbccc"),
manifestList.getDigestsForPlatform("amd64", "linux"));
Assert.assertEquals(
Arrays.asList("sha256:51038a7a91c0e8f747e05dd84c3b0393a7016ec312ce384fc945356778497ae3"),
manifestList.getDigestsForPlatform("arm64", "windows"));
}
|
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException {
lock.lock();
try {
checkArgument(!req.completed, () ->
"given SendRequest has already been completed");
log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(),
req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString());
// Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us
// with the actual outputs that'll be used to gather the required amount of value. In this way, users
// can customize coin selection policies. The call below will ignore immature coinbases and outputs
// we don't have the keys for.
List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW);
// Connect (add a value amount) unconnected inputs
List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs());
req.tx.clearInputs();
inputs.forEach(req.tx::addInput);
// Warn if there are remaining unconnected inputs whose value we do not know
// TODO: Consider throwing if there are inputs that we don't have a value for
if (req.tx.getInputs().stream()
.map(TransactionInput::getValue)
.anyMatch(Objects::isNull))
log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee.");
// If any inputs have already been added, we don't need to get their value from wallet
Coin totalInput = req.tx.getInputSum();
// Calculate the amount of value we need to import.
Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput);
// Enforce the OP_RETURN limit
if (req.tx.getOutputs().stream()
.filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey()))
.count() > 1) // Only 1 OP_RETURN per transaction allowed.
throw new MultipleOpReturnRequested();
// Check for dusty sends
if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet.
if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust))
throw new DustySendRequested();
}
// Filter out candidates that are already included in the transaction inputs
List<TransactionOutput> candidates = prelimCandidates.stream()
.filter(output -> alreadyIncluded(req.tx.getInputs(), output))
.collect(StreamUtils.toUnmodifiableList());
CoinSelection bestCoinSelection;
TransactionOutput bestChangeOutput = null;
List<Coin> updatedOutputValues = null;
if (!req.emptyWallet) {
// This can throw InsufficientMoneyException.
FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates);
bestCoinSelection = feeCalculation.bestCoinSelection;
bestChangeOutput = feeCalculation.bestChangeOutput;
updatedOutputValues = feeCalculation.updatedOutputValues;
} else {
// We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output
// of the total value we can currently spend as determined by the selector, and then subtracting the fee.
checkState(req.tx.getOutputs().size() == 1, () ->
"empty wallet TX must have a single output only");
CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector;
bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates);
candidates = null; // Selector took ownership and might have changed candidates. Don't access again.
req.tx.getOutput(0).setValue(bestCoinSelection.totalValue());
log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString());
}
bestCoinSelection.outputs()
.forEach(req.tx::addInput);
if (req.emptyWallet) {
if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee))
throw new CouldNotAdjustDownwards();
}
if (updatedOutputValues != null) {
for (int i = 0; i < updatedOutputValues.size(); i++) {
req.tx.getOutput(i).setValue(updatedOutputValues.get(i));
}
}
if (bestChangeOutput != null) {
req.tx.addOutput(bestChangeOutput);
log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString());
}
// Now shuffle the outputs to obfuscate which is the change.
if (req.shuffleOutputs)
req.tx.shuffleOutputs();
// Now sign the inputs, thus proving that we are entitled to redeem the connected outputs.
if (req.signInputs)
signTransaction(req);
// Check size.
final int size = req.tx.messageSize();
if (size > Transaction.MAX_STANDARD_TX_SIZE)
throw new ExceededMaxTransactionSize();
// Label the transaction as being self created. We can use this later to spend its change output even before
// the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much
// point - the user isn't interested in a confidence transition they made themselves.
getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF);
// Label the transaction as being a user requested payment. This can be used to render GUI wallet
// transaction lists more appropriately, especially when the wallet starts to generate transactions itself
// for internal purposes.
req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT);
// Record the exchange rate that was valid when the transaction was completed.
req.tx.setExchangeRate(req.exchangeRate);
req.tx.setMemo(req.memo);
req.completed = true;
log.info(" completed: {}", req.tx);
} finally {
lock.unlock();
}
}
|
@Test(expected = Wallet.DustySendRequested.class)
public void sendDustAndMessageWithValueTest() throws Exception {
// Tests sending dust and OP_RETURN with value, should throw DustySendRequested
receiveATransaction(wallet, myAddress);
Transaction tx = new Transaction();
tx.addOutput(Coin.CENT, ScriptBuilder.createOpReturnScript("hello world!".getBytes()));
Coin dustThreshold = new TransactionOutput(null, Coin.COIN, OTHER_ADDRESS).getMinNonDustValue();
tx.addOutput(dustThreshold.subtract(SATOSHI), OTHER_ADDRESS);
SendRequest request = SendRequest.forTx(tx);
request.ensureMinRequiredFee = true;
wallet.completeTx(request);
}
|
public static void verify(NetworkParameters params, Block block, int height, EnumSet<VerifyFlag> flags) throws VerificationException {
verifyHeader(block);
verifyTransactions(params, block, height, flags);
}
|
@Test
public void testBlockVerification() {
Block.verify(TESTNET, block700000, Block.BLOCK_HEIGHT_GENESIS, EnumSet.noneOf(Block.VerifyFlag.class));
}
|
@Override
public void notify(final ConfigChangeEvent value) {
if (value.getEventType().equals(EventType.DELETE)) {
ruleSetting = null;
grouping.setEndpointGroupingRule(new QuickUriGroupingRule());
} else {
ruleSetting = value.getNewValue();
grouping.setEndpointGroupingRule(new EndpointGroupingRuleReader(new StringReader(ruleSetting)).read());
}
}
|
@Test
public void testWatcher() throws FileNotFoundException {
EndpointNameGrouping endpointNameGrouping = new EndpointNameGrouping();
EndpointNameGroupingRuleWatcher watcher = new EndpointNameGroupingRuleWatcher(
new ModuleProvider() {
@Override
public String name() {
return "test";
}
@Override
public Class<? extends ModuleDefine> module() {
return CoreModule.class;
}
@Override
public ConfigCreator newConfigCreator() {
return null;
}
@Override
public void prepare() throws ServiceNotProvidedException, ModuleStartException {
}
@Override
public void start() throws ServiceNotProvidedException, ModuleStartException {
}
@Override
public void notifyAfterCompleted() throws ServiceNotProvidedException, ModuleStartException {
}
@Override
public String[] requiredModules() {
return new String[0];
}
}, endpointNameGrouping);
Assertions.assertEquals("/prod/{var}", endpointNameGrouping.format("serviceA", "/prod/123")._1());
watcher.notify(new ConfigChangeWatcher.ConfigChangeEvent(
"grouping:\n" +
" # Endpoint of the service would follow the following rules\n" +
" - service-name: serviceA\n" +
" rules:\n" +
" - /prod/{var}\n" +
" - /prod/{var}/info\n"
, ConfigChangeWatcher.EventType.MODIFY
));
Assertions.assertEquals("/prod/{var}/info", endpointNameGrouping.format("serviceA", "/prod/123/info")._1());
watcher.notify(new ConfigChangeWatcher.ConfigChangeEvent("", ConfigChangeWatcher.EventType.DELETE));
Assertions.assertEquals("/prod/123", endpointNameGrouping.format("serviceA", "/prod/123")._1());
}
|
@Override
public ByteBuf setMedium(int index, int value) {
throw new ReadOnlyBufferException();
}
|
@Test
public void shouldRejectSetMedium() {
assertThrows(UnsupportedOperationException.class, new Executable() {
@Override
public void execute() {
unmodifiableBuffer(EMPTY_BUFFER).setMedium(0, 0);
}
});
}
|
@Override
public WxMaPhoneNumberInfo getWxMaPhoneNumberInfo(Integer userType, String phoneCode) {
WxMaService service = getWxMaService(userType);
try {
return service.getUserService().getPhoneNoInfo(phoneCode);
} catch (WxErrorException e) {
log.error("[getPhoneNoInfo][userType({}) phoneCode({}) 获得手机号失败]", userType, phoneCode, e);
throw exception(SOCIAL_CLIENT_WEIXIN_MINI_APP_PHONE_CODE_ERROR);
}
}
|
@Test
public void testGetWxMaPhoneNumberInfo_exception() throws WxErrorException {
// 准备参数
Integer userType = randomPojo(UserTypeEnum.class).getValue();
String phoneCode = randomString();
// mock 方法
WxMaUserService userService = mock(WxMaUserService.class);
when(wxMaService.getUserService()).thenReturn(userService);
WxErrorException wxErrorException = randomPojo(WxErrorException.class);
when(userService.getPhoneNoInfo(eq(phoneCode))).thenThrow(wxErrorException);
// 调用并断言异常
assertServiceException(() -> socialClientService.getWxMaPhoneNumberInfo(userType, phoneCode),
SOCIAL_CLIENT_WEIXIN_MINI_APP_PHONE_CODE_ERROR);
}
|
public static void extendActiveLock(Duration lockAtMostFor, Duration lockAtLeastFor) {
SimpleLock lock = locks().peekLast();
if (lock == null) throw new NoActiveLockException();
Optional<SimpleLock> newLock = lock.extend(lockAtMostFor, lockAtLeastFor);
if (newLock.isPresent()) {
// removing and adding here should be safe as it's a thread local variable and
// the changes are
// only visible in the current thread.
locks().removeLast();
locks().addLast(newLock.get());
} else {
throw new LockCanNotBeExtendedException();
}
}
|
@Test
void shouldFailIfNoActiveLock() {
assertThatThrownBy(() -> LockExtender.extendActiveLock(ofSeconds(1), ofSeconds(0)))
.isInstanceOf(NoActiveLockException.class);
}
|
public byte[] toBytes() {
String authzid = authorizationId.isEmpty() ? "" : "a=" + authorizationId;
String extensions = extensionsMessage();
if (!extensions.isEmpty())
extensions = SEPARATOR + extensions;
String message = String.format("n,%s,%sauth=Bearer %s%s%s%s", authzid,
SEPARATOR, tokenValue, extensions, SEPARATOR, SEPARATOR);
return message.getBytes(StandardCharsets.UTF_8);
}
|
@Test
public void testBuildClientResponseToBytes() throws Exception {
String expectedMessage = "n,,\u0001auth=Bearer 123.345.567\u0001nineteen=42\u0001\u0001";
Map<String, String> extensions = new HashMap<>();
extensions.put("nineteen", "42");
OAuthBearerClientInitialResponse response = new OAuthBearerClientInitialResponse("123.345.567", new SaslExtensions(extensions));
String message = new String(response.toBytes(), StandardCharsets.UTF_8);
assertEquals(expectedMessage, message);
}
|
@Override
public Class<? extends MinLabeledStorageBuilder> builder() {
return MinLabeledStorageBuilder.class;
}
|
@Test
public void testBuilder() throws IllegalAccessException, InstantiationException {
function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1);
function.calculate();
StorageBuilder<MinLabeledFunction> storageBuilder = function.builder().newInstance();
final HashMapConverter.ToStorage toStorage = new HashMapConverter.ToStorage();
storageBuilder.entity2Storage(function, toStorage);
final Map<String, Object> map = toStorage.obtain();
map.put(MinLabeledFunction.VALUE, ((DataTable) map.get(MinLabeledFunction.VALUE)).toStorageData());
MinLabeledFunction function2 = storageBuilder.storage2Entity(new HashMapConverter.ToEntity(map));
assertThat(function2.getValue()).isEqualTo(function.getValue());
}
|
public void pickSuggestionManually(int index, CharSequence suggestion) {
pickSuggestionManually(index, suggestion, mAutoSpace);
}
|
@Test
public void testNextWordDeleteAfterPick() {
mAnySoftKeyboardUnderTest.simulateTextTyping("hello face hello face hello face hello face ");
mAnySoftKeyboardUnderTest.simulateTextTyping("hello ");
verifySuggestions(true, "face");
mAnySoftKeyboardUnderTest.pickSuggestionManually(0, "face");
TestRxSchedulers.drainAllTasks();
Assert.assertEquals(
"hello face hello face hello face hello face hello face ",
getCurrentTestInputConnection().getCurrentTextInInputConnection());
mAnySoftKeyboardUnderTest.simulateKeyPress(KeyCodes.DELETE);
Assert.assertEquals(
"hello face hello face hello face hello face hello face",
getCurrentTestInputConnection().getCurrentTextInInputConnection());
}
|
public Optional<PluginMatchingResult<ServiceFingerprinter>> getServiceFingerprinter(
NetworkService networkService) {
return tsunamiPlugins.entrySet().stream()
.filter(entry -> entry.getKey().type().equals(PluginType.SERVICE_FINGERPRINT))
.filter(entry -> hasMatchingServiceName(networkService, entry.getKey()))
.map(
entry ->
PluginMatchingResult.<ServiceFingerprinter>builder()
.setPluginDefinition(entry.getKey())
.setTsunamiPlugin((ServiceFingerprinter) entry.getValue().get())
.addMatchedService(networkService)
.build())
.findFirst();
}
|
@Test
public void getServiceFingerprinter_whenNoFingerprinterMatches_returnsEmpty() {
NetworkService httpsService =
NetworkService.newBuilder()
.setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 80))
.setTransportProtocol(TransportProtocol.TCP)
.setServiceName("https")
.build();
PluginManager pluginManager =
Guice.createInjector(
new FakePortScannerBootstrapModule(), new FakeServiceFingerprinterBootstrapModule())
.getInstance(PluginManager.class);
Optional<PluginMatchingResult<ServiceFingerprinter>> fingerprinter =
pluginManager.getServiceFingerprinter(httpsService);
assertThat(fingerprinter).isEmpty();
}
|
public final void isIn(Range<T> range) {
if (!range.contains(checkNotNull(actual))) {
failWithActual("expected to be in range", range);
}
}
|
@Test
public void isInRange() {
Range<Integer> oneToFive = Range.closed(1, 5);
assertThat(4).isIn(oneToFive);
expectFailureWhenTestingThat(6).isIn(oneToFive);
assertThat(expectFailure.getFailure())
.factValue("expected to be in range")
.isEqualTo(oneToFive.toString());
}
|
public static String[] tokenizeOnSpace(String s)
{
return Arrays.stream(s.split("(?<=" + StringUtil.PATTERN_SPACE + ")|(?=" + StringUtil.PATTERN_SPACE + ")"))
.toArray(String[]::new);
}
|
@Test
void testTokenizeOnSpace_happyPath()
{
String[] result = StringUtil.tokenizeOnSpace("a b c");
assertArrayEquals(new String[] {"a", " ", "b", " ", "c"}, result);
}
|
public static SerializableFunction<Row, byte[]> getRowToProtoBytesFromSchema(
String schemaString, String messageName) {
Descriptors.Descriptor descriptor = getDescriptorFromProtoSchema(schemaString, messageName);
ProtoDynamicMessageSchema<DynamicMessage> protoDynamicMessageSchema =
ProtoDynamicMessageSchema.forDescriptor(ProtoDomain.buildFrom(descriptor), descriptor);
return new SimpleFunction<Row, byte[]>() {
@Override
public byte[] apply(Row input) {
SerializableFunction<Row, DynamicMessage> res =
protoDynamicMessageSchema.getFromRowFunction();
return res.apply(input).toByteArray();
}
};
}
|
@Test
public void testRowToProtoSchemaWithPackageFunction() {
Row row =
Row.withSchema(SCHEMA)
.withFieldValue("id", 1234)
.withFieldValue("name", "Doe")
.withFieldValue("active", false)
.withFieldValue("address.city", "seattle")
.withFieldValue("address.street", "fake street")
.withFieldValue("address.zip_code", "TO-1234")
.withFieldValue("address.state", "wa")
.build();
byte[] byteArray = {
8, -46, 9, 18, 3, 68, 111, 101, 34, 35, 10, 7, 115, 101, 97, 116, 116, 108, 101, 18, 11, 102,
97, 107, 101, 32, 115, 116, 114, 101, 101, 116, 26, 2, 119, 97, 34, 7, 84, 79, 45, 49, 50, 51,
52
};
byte[] resultBytes =
ProtoByteUtils.getRowToProtoBytesFromSchema(
PROTO_STRING_PACKAGE_SCHEMA, "com.test.proto.MyMessage")
.apply(row);
Assert.assertNotNull(resultBytes);
Assert.assertArrayEquals(byteArray, resultBytes);
}
|
public boolean ifNodeExist(String path) {
return zkClient.ifNodeExist(path);
}
|
@Test
public void testIfNodeExist() {
boolean result = zooKeeperBufferedClient.ifNodeExist(PARENT_PATH);
Assert.assertTrue(result);
}
|
public FEELFnResult<TemporalAmount> invoke(@ParameterName("from") Temporal from, @ParameterName("to") Temporal to) {
if ( from == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null"));
}
if ( to == null ) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "cannot be null"));
}
final LocalDate fromDate = getLocalDateFromTemporal(from);
if (fromDate == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "is of type not suitable for years and months function"));
}
final LocalDate toDate = getLocalDateFromTemporal(to);
if (toDate == null) {
return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "is of type not suitable for years and months function"));
}
return FEELFnResult.ofResult(new ComparablePeriod(Period.between(fromDate, toDate).withDays(0)));
}
|
@Test
void invokeUnsupportedTemporal() {
FunctionTestUtil.assertResultError(yamFunction.invoke(Instant.EPOCH, Instant.EPOCH),
InvalidParametersEvent.class);
FunctionTestUtil.assertResultError(yamFunction.invoke(LocalDate.of(2017, 1, 1), Instant.EPOCH),
InvalidParametersEvent.class);
}
|
public boolean isCipherColumn(final String columnName) {
return columns.values().stream().anyMatch(each -> each.getCipher().getName().equalsIgnoreCase(columnName));
}
|
@Test
void assertIsCipherColumn() {
assertTrue(encryptTable.isCipherColumn("CipherColumn"));
}
|
public List<String> getDeletedIds() {
return deletedIds;
}
|
@Test
public void testGetDeletedIds() {
List<String> ids = roleDOList
.stream()
.map(BaseDO::getId)
.collect(Collectors.toList());
assertEquals(ids, batchRoleDeletedEventTest.getDeletedIds());
List<String> emptyIds = emptyRoleDOList
.stream()
.map(BaseDO::getId)
.collect(Collectors.toList());
assertEquals(emptyIds, batchRoleDeletedEventEmptySourceTest.getDeletedIds());
}
|
@Override
public Object getField(final String reference) {
final Object unconverted = getUnconvertedField(FieldReference.from(reference));
return unconverted == null ? null : Javafier.deep(unconverted);
}
|
@Test
public void testGetFieldList() throws Exception {
Map<String, Object> data = new HashMap<>();
List<Object> l = new ArrayList<>();
data.put("foo", l);
l.add(1);
Event e = new Event(data);
assertEquals(1L, e.getField("[foo][0]"));
}
|
@Override
public <T> @Nullable Schema schemaFor(TypeDescriptor<T> typeDescriptor) {
checkForDynamicType(typeDescriptor);
return ProtoSchemaTranslator.getSchema((Class<Message>) typeDescriptor.getRawType());
}
|
@Test
public void testEnumSchema() {
Schema schema = new ProtoMessageSchema().schemaFor(TypeDescriptor.of(EnumMessage.class));
assertEquals(ENUM_SCHEMA, schema);
}
|
@ApiOperation(value = "Get a deployment resource", tags = { "Deployment" }, notes = "Replace ** by ResourceId")
/*
* @ApiImplicitParams({
*
* @ApiImplicitParam(name = "resourceId", dataType = "string", value =
* "The id of the resource to get. Make sure you URL-encode the resourceId in case it contains forward slashes. Eg: use diagrams%2Fmy-process.bpmn20.xml instead of diagrams/Fmy-process.bpmn20.xml."
* , paramType = "path") })
*/
@ApiResponses(value = {
@ApiResponse(code = 200, message = "Indicates both deployment and resource have been found and the resource has been returned."),
@ApiResponse(code = 404, message = "Indicates the requested deployment was not found or there is no resource with the given id present in the deployment. The status-description contains additional information.")
})
@GetMapping(value = "/repository/deployments/{deploymentId}/resources/**", produces = "application/json")
public DeploymentResourceResponse getDeploymentResource(@ApiParam(name = "deploymentId") @PathVariable("deploymentId") String deploymentId, HttpServletRequest request) {
// The ** is needed because the name of the resource can actually contain forward slashes.
// For example org/flowable/model.bpmn2. The number of forward slashes is unknown.
// Using ** means that everything should get matched.
// See also https://stackoverflow.com/questions/31421061/how-to-handle-requests-that-includes-forward-slashes/42403361#42403361
// Check if deployment exists
Deployment deployment = repositoryService.createDeploymentQuery().deploymentId(deploymentId).singleResult();
if (deployment == null) {
throw new FlowableObjectNotFoundException("Could not find a deployment with id '" + deploymentId + "'.");
}
if (restApiInterceptor != null) {
restApiInterceptor.accessDeploymentById(deployment);
}
String pathInfo = request.getPathInfo();
String resourceName = pathInfo.replace("/repository/deployments/" + deploymentId + "/resources/", "");
List<String> resourceList = repositoryService.getDeploymentResourceNames(deploymentId);
if (resourceList.contains(resourceName)) {
// Build resource representation
DeploymentResourceResponse response = restResponseFactory.createDeploymentResourceResponse(deploymentId, resourceName, contentTypeResolver.resolveContentType(resourceName));
return response;
} else {
// Resource not found in deployment
throw new FlowableObjectNotFoundException("Could not find a resource with id '" + resourceName + "' in deployment '" + deploymentId + "'.");
}
}
|
@Test
public void testGetDeploymentResource() throws Exception {
try {
String rawResourceName = "org/flowable/rest/service/api/repository/oneTaskProcess.bpmn20.xml";
Deployment deployment = repositoryService.createDeployment().name("Deployment 1").addClasspathResource(rawResourceName)
.addInputStream("test.txt", new ByteArrayInputStream("Test content".getBytes())).deploy();
// Build up the URL manually to make sure resource-id gets encoded
// correctly as one piece
HttpGet httpGet = new HttpGet(buildUrl(RestUrls.URL_DEPLOYMENT_RESOURCE, deployment.getId(), rawResourceName));
httpGet.addHeader(new BasicHeader(HttpHeaders.ACCEPT, "application/json"));
CloseableHttpResponse response = executeRequest(httpGet, HttpStatus.SC_OK);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThatJson(responseNode)
.when(Option.IGNORING_EXTRA_FIELDS)
.isEqualTo("{"
+ " url: '" + buildUrl(RestUrls.URL_DEPLOYMENT_RESOURCE, deployment.getId(), rawResourceName) + "',"
+ " contentUrl: '" + buildUrl(RestUrls.URL_DEPLOYMENT_RESOURCE_CONTENT, deployment.getId(), rawResourceName) + "',"
+ " mediaType: 'text/xml',"
+ " type: 'processDefinition'"
+ "}");
} finally {
// Always cleanup any created deployments, even if the test failed
List<Deployment> deployments = repositoryService.createDeploymentQuery().list();
for (Deployment deployment : deployments) {
repositoryService.deleteDeployment(deployment.getId(), true);
}
}
}
|
protected BaseNode getNullNode() {
return parseNotEmptyInput("null");
}
|
@Test
void getNullNode() {
assertThat(rangeFunction.getNullNode()).isInstanceOf(NullNode.class);
}
|
public static boolean instanceOfToolbar(Object view) {
return ReflectUtil.isInstance(view, "androidx.appcompat.widget.Toolbar", "android.support.v7.widget.Toolbar", "android.widget.Toolbar");
}
|
@Test
public void instanceOfToolbar() {
CheckBox textView1 = new CheckBox(mApplication);
textView1.setText("child1");
Assert.assertFalse(SAViewUtils.instanceOfToolbar(textView1));
}
|
public static Map<String, String> getKiePMMLTreeModelSourcesMap(final TreeCompilationDTO compilationDTO) {
logger.trace("getKiePMMLTreeModelSourcesMap {} {} {}", compilationDTO.getFields(),
compilationDTO.getModel(),
compilationDTO.getPackageName());
String className = compilationDTO.getSimpleClassName();
String packageName = compilationDTO.getPackageName();
CompilationUnit cloneCU = JavaParserUtils.getKiePMMLModelCompilationUnit(className, packageName,
KIE_PMML_TREE_MODEL_TEMPLATE_JAVA,
KIE_PMML_TREE_MODEL_TEMPLATE);
ClassOrInterfaceDeclaration modelTemplate = cloneCU.getClassByName(className)
.orElseThrow(() -> new KiePMMLException(MAIN_CLASS_NOT_FOUND + ": " + className));
final Double missingValuePenalty = compilationDTO.getMissingValuePenalty();
final KiePMMLNodeFactory.NodeNamesDTO nodeNamesDTO =
new KiePMMLNodeFactory.NodeNamesDTO(compilationDTO.getNode(),
createNodeClassName(), null, missingValuePenalty);
String fullNodeClassName = packageName + "." + nodeNamesDTO.nodeClassName;
Map<String, String> toReturn = getKiePMMLNodeSourcesMap(nodeNamesDTO,
compilationDTO.getFields(),
packageName);
setConstructor(compilationDTO,
modelTemplate,
fullNodeClassName);
String fullClassName = packageName + "." + className;
toReturn.put(fullClassName, cloneCU.toString());
return toReturn;
}
|
@Test
void getKiePMMLTreeModelSourcesMap() {
CommonCompilationDTO<TreeModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmml1,
treeModel1,
new PMMLCompilationContextMock(),
SOURCE_1);
Map<String, String> retrieved =
KiePMMLTreeModelFactory.getKiePMMLTreeModelSourcesMap(TreeCompilationDTO.fromCompilationDTO(source));
assertThat(retrieved).isNotNull();
source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME,
pmml2,
treeModel2,
new PMMLCompilationContextMock(),
SOURCE_2);
retrieved =
KiePMMLTreeModelFactory.getKiePMMLTreeModelSourcesMap(TreeCompilationDTO.fromCompilationDTO(source));
assertThat(retrieved).isNotNull();
}
|
@Override
public List<ImportValidationFeedback> verifyRule( Object subject ) {
List<ImportValidationFeedback> feedback = new ArrayList<>();
if ( !isEnabled() || !( subject instanceof TransMeta ) ) {
return feedback;
}
TransMeta transMeta = (TransMeta) subject;
String description = transMeta.getDescription();
if ( null != description && minLength <= description.length() ) {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.APPROVAL, "A description is present" ) );
} else {
feedback.add( new ImportValidationFeedback(
this, ImportValidationResultType.ERROR, "A description is not present or is too short." ) );
}
return feedback;
}
|
@Test
public void testVerifyRule_NullDescription_DisabledRule() {
TransformationHasDescriptionImportRule importRule = getImportRule( 10, false );
TransMeta transMeta = new TransMeta();
transMeta.setDescription( null );
List<ImportValidationFeedback> feedbackList = importRule.verifyRule( null );
assertNotNull( feedbackList );
assertTrue( feedbackList.isEmpty() );
}
|
@Override
public boolean assign(final Map<ProcessId, ClientState> clients,
final Set<TaskId> allTaskIds,
final Set<TaskId> statefulTaskIds,
final RackAwareTaskAssignor rackAwareTaskAssignor,
final AssignmentConfigs configs) {
// Pass null for RackAwareTaskAssignor to disable it if we fallback
delegate.assign(clients, allTaskIds, statefulTaskIds, null, configs);
return true;
}
|
@Test
public void shouldViolateBalanceToPreserveActiveTaskStickiness() {
final ClientState c1 = createClientWithPreviousActiveTasks(PID_1, 1, TASK_0_0, TASK_0_1, TASK_0_2);
final ClientState c2 = createClient(PID_2, 1);
final List<TaskId> taskIds = asList(TASK_0_0, TASK_0_1, TASK_0_2);
Collections.shuffle(taskIds);
final boolean probingRebalanceNeeded = new FallbackPriorTaskAssignor().assign(
clients,
new HashSet<>(taskIds),
new HashSet<>(taskIds),
null,
new AssignmentConfigs(0L, 1, 0, 60_000L, EMPTY_RACK_AWARE_ASSIGNMENT_TAGS)
);
assertThat(probingRebalanceNeeded, is(true));
assertThat(c1.activeTasks(), equalTo(mkSet(TASK_0_0, TASK_0_1, TASK_0_2)));
assertThat(c2.activeTasks(), empty());
}
|
public long put(final K key, final V value, final long timestamp) {
if (timestampedStore != null) {
timestampedStore.put(key, ValueAndTimestamp.make(value, timestamp));
return PUT_RETURN_CODE_IS_LATEST;
}
if (versionedStore != null) {
return versionedStore.put(key, value, timestamp);
}
throw new IllegalStateException("KeyValueStoreWrapper must be initialized with either timestamped or versioned store");
}
|
@Test
public void shouldPutToTimestampedStore() {
givenWrapperWithTimestampedStore();
final long putReturnCode = wrapper.put(KEY, VALUE_AND_TIMESTAMP.value(), VALUE_AND_TIMESTAMP.timestamp());
assertThat(putReturnCode, equalTo(PUT_RETURN_CODE_IS_LATEST));
verify(timestampedStore).put(KEY, VALUE_AND_TIMESTAMP);
}
|
public static GenericRecord convertToAvro(Schema schema, Message message) {
return AvroSupport.convert(schema, message);
}
|
@Test
public void oneOfSchema() throws IOException {
Schema.Parser parser = new Schema.Parser();
Schema convertedSchema = parser.parse(getClass().getClassLoader().getResourceAsStream("schema-provider/proto/oneof_schema.avsc"));
WithOneOf input = WithOneOf.newBuilder().setLong(32L).build();
GenericRecord actual = serializeAndDeserializeAvro(ProtoConversionUtil.convertToAvro(convertedSchema, input), convertedSchema);
GenericData.Record expectedRecord = new GenericData.Record(convertedSchema);
expectedRecord.put("int", null);
expectedRecord.put("long", 32L);
expectedRecord.put("message", null);
Assertions.assertEquals(expectedRecord, actual);
}
|
Getter getGetter(Object targetObject, String attributeName, boolean failOnMissingReflectiveAttribute) {
Getter getter = getterCache.getGetter(targetObject.getClass(), attributeName);
if (getter == null) {
getter = instantiateGetter(targetObject, attributeName, failOnMissingReflectiveAttribute);
if (getter.isCacheable()) {
getterCache.putGetter(targetObject.getClass(), attributeName, getter);
}
}
return getter;
}
|
@Test
public void when_getGetterExtractor_then_getterInCacheWithProperType() {
// GIVEN
AttributeConfig config
= new AttributeConfig("gimmePower", "com.hazelcast.query.impl.getters.ExtractorsTest$PowerExtractor");
Extractors extractors = createExtractors(config);
// WHEN
Getter getterFirstInvocation = extractors.getGetter(bond, "gimmePower", true);
Getter getterSecondInvocation = extractors.getGetter(bond, "gimmePower", true);
// THEN
assertThat(getterFirstInvocation).isSameAs(getterSecondInvocation);
assertThat(getterFirstInvocation).isInstanceOf(ExtractorGetter.class);
}
|
CompletableFuture<CreatePayPalOneTimePaymentMutation.CreatePayPalOneTimePayment> createPayPalOneTimePayment(
final BigDecimal amount, final String currency, final String returnUrl,
final String cancelUrl, final String locale) {
final CreatePayPalOneTimePaymentInput input = buildCreatePayPalOneTimePaymentInput(amount, currency, returnUrl,
cancelUrl, locale);
final CreatePayPalOneTimePaymentMutation mutation = new CreatePayPalOneTimePaymentMutation(input);
final HttpRequest request = buildRequest(mutation);
return httpClient.sendAsync(request, HttpResponse.BodyHandlers.ofString())
.thenApply(httpResponse ->
{
// IntelliJ users: type parameters error “no instance of type variable exists so that Data conforms to Data”
// is not accurate; this might be fixed in Kotlin 1.8: https://youtrack.jetbrains.com/issue/KTIJ-21905/
final CreatePayPalOneTimePaymentMutation.Data data = assertSuccessAndExtractData(httpResponse, mutation);
return data.createPayPalOneTimePayment;
});
}
|
@Test
void createPayPalOneTimePayment() {
final HttpResponse<Object> response = mock(HttpResponse.class);
when(httpClient.sendAsync(any(), any()))
.thenReturn(CompletableFuture.completedFuture(response));
final String paymentId = "PAYID-AAA1AAAA1A11111AA111111A";
when(response.body())
.thenReturn(createPayPalOneTimePaymentResponse(paymentId));
when(response.statusCode())
.thenReturn(200);
final CompletableFuture<CreatePayPalOneTimePaymentMutation.CreatePayPalOneTimePayment> future = braintreeGraphqlClient.createPayPalOneTimePayment(
BigDecimal.ONE, CURRENCY,
RETURN_URL, CANCEL_URL, LOCALE);
assertTimeoutPreemptively(Duration.ofSeconds(3), () -> {
final CreatePayPalOneTimePaymentMutation.CreatePayPalOneTimePayment result = future.get();
assertEquals(paymentId, result.paymentId);
assertNotNull(result.approvalUrl);
});
}
|
@Override
public void forEachEventTimeTimer(BiConsumerWithException<N, Long, Exception> consumer) {
throw new UnsupportedOperationException(
"The BatchExecutionInternalTimeService should not be used in State Processor API.");
}
|
@Test
void testForEachProcessingTimeTimerUnsupported() {
BatchExecutionInternalTimeService<Object, Object> timeService =
new BatchExecutionInternalTimeService<>(
new TestProcessingTimeService(),
LambdaTrigger.eventTimeTrigger(timer -> {}));
assertThatThrownBy(
() ->
timeService.forEachEventTimeTimer(
(o, aLong) ->
fail(
"The forEachProcessingTimeTimer() should not be supported")))
.isInstanceOf(UnsupportedOperationException.class)
.hasMessageContaining(
"The BatchExecutionInternalTimeService should not be used in State Processor API");
}
|
private int getDatanodeInfo(String[] argv, int i) throws IOException {
ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
try {
DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
System.out.println(dnInfo.getDatanodeLocalReport());
} catch (IOException ioe) {
throw new IOException("Datanode unreachable. " + ioe, ioe);
}
return 0;
}
|
@Test(timeout = 30000)
public void testGetDatanodeInfo() throws Exception {
redirectStream();
final DFSAdmin dfsAdmin = new DFSAdmin(conf);
for (int i = 0; i < cluster.getDataNodes().size(); i++) {
resetStream();
final DataNode dn = cluster.getDataNodes().get(i);
final String addr = String.format(
"%s:%d",
dn.getXferAddress().getHostString(),
dn.getIpcPort());
final int ret = ToolRunner.run(dfsAdmin,
new String[]{"-getDatanodeInfo", addr});
assertEquals(0, ret);
/* collect outputs */
final List<String> outs = Lists.newArrayList();
scanIntoList(out, outs);
/* verify results */
assertEquals(
"One line per DataNode like: Uptime: XXX, Software version: x.y.z,"
+ " Config version: core-x.y.z,hdfs-x",
1, outs.size());
assertThat(outs.get(0),
is(allOf(containsString("Uptime:"),
containsString("Software version"),
containsString("Config version"))));
}
}
|
public ImportedProject importProject(ImportProjectRequest request) {
try (DbSession dbSession = dbClient.openSession(false)) {
checkNewCodeDefinitionParam(request.newCodeDefinitionType(), request.newCodeDefinitionValue());
AlmSettingDto almSetting = dbClient.almSettingDao().selectByUuid(dbSession, request.almSettingId()).orElseThrow(() -> new IllegalArgumentException("ALM setting not found"));
DevOpsProjectDescriptor projectDescriptor = new DevOpsProjectDescriptor(almSetting.getAlm(), almSetting.getUrl(), request.repositoryIdentifier(),
request.projectIdentifier());
DevOpsProjectCreator projectCreator = devOpsProjectCreatorFactory.getDevOpsProjectCreator(almSetting, projectDescriptor)
.orElseThrow(() -> new IllegalArgumentException(format("Platform %s not supported", almSetting.getAlm().name())));
CreationMethod creationMethod = getCreationMethod(request.monorepo());
ComponentCreationData componentCreationData = projectCreator.createProjectAndBindToDevOpsPlatform(
dbSession,
creationMethod,
request.monorepo(),
request.projectKey(),
request.projectName());
ProjectDto projectDto = Optional.ofNullable(componentCreationData.projectDto()).orElseThrow();
BranchDto mainBranchDto = Optional.ofNullable(componentCreationData.mainBranchDto()).orElseThrow();
if (request.newCodeDefinitionType() != null) {
newCodeDefinitionResolver.createNewCodeDefinition(dbSession, projectDto.getUuid(), mainBranchDto.getUuid(),
mainBranchDto.getKey(), request.newCodeDefinitionType(), request.newCodeDefinitionValue());
}
componentUpdater.commitAndIndex(dbSession, componentCreationData);
ProjectAlmSettingDto projectAlmSettingDto = dbClient.projectAlmSettingDao().selectByProject(dbSession, projectDto)
.orElseThrow(() -> new IllegalStateException("Project ALM setting was not created"));
dbSession.commit();
return new ImportedProject(projectDto, projectAlmSettingDto);
}
}
|
@Test
void createdImportedProject_whenAlmSettingDoesntExist_throws() {
userSession.logIn().addPermission(PROVISION_PROJECTS);
DbSession dbSession = mockDbSession();
when(dbClient.almSettingDao().selectByUuid(dbSession, ALM_SETTING_ID)).thenReturn(Optional.empty());
ImportProjectRequest request = new ImportProjectRequest(PROJECT_KEY, PROJECT_NAME, ALM_SETTING_ID, DOP_REPOSITORY_ID, DOP_PROJECT_ID, null, null, true);
assertThatThrownBy(() -> importProjectService.importProject(request))
.isInstanceOf(IllegalArgumentException.class)
.hasMessage("ALM setting not found");
}
|
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload,
final ConnectionSession connectionSession) {
switch (commandPacketType) {
case COM_QUIT:
return new MySQLComQuitPacket();
case COM_INIT_DB:
return new MySQLComInitDbPacket(payload);
case COM_FIELD_LIST:
return new MySQLComFieldListPacket(payload);
case COM_QUERY:
return new MySQLComQueryPacket(payload);
case COM_STMT_PREPARE:
return new MySQLComStmtPreparePacket(payload);
case COM_STMT_EXECUTE:
MySQLServerPreparedStatement serverPreparedStatement =
connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex()));
return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount());
case COM_STMT_SEND_LONG_DATA:
return new MySQLComStmtSendLongDataPacket(payload);
case COM_STMT_RESET:
return new MySQLComStmtResetPacket(payload);
case COM_STMT_CLOSE:
return new MySQLComStmtClosePacket(payload);
case COM_SET_OPTION:
return new MySQLComSetOptionPacket(payload);
case COM_PING:
return new MySQLComPingPacket();
case COM_RESET_CONNECTION:
return new MySQLComResetConnectionPacket();
default:
return new MySQLUnsupportedCommandPacket(commandPacketType);
}
}
|
@Test
void assertNewInstanceWithComFieldListPacket() {
assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_FIELD_LIST, payload, connectionSession), instanceOf(MySQLComFieldListPacket.class));
}
|
@VisibleForTesting
ImmutableList<EventWithContext> eventsFromAggregationResult(EventFactory eventFactory, AggregationEventProcessorParameters parameters, AggregationResult result)
throws EventProcessorException {
final ImmutableList.Builder<EventWithContext> eventsWithContext = ImmutableList.builder();
final Set<String> sourceStreams = eventStreamService.buildEventSourceStreams(getStreams(parameters),
result.sourceStreams());
for (final AggregationKeyResult keyResult : result.keyResults()) {
if (!satisfiesConditions(keyResult)) {
LOG.debug("Skipping result <{}> because the conditions <{}> don't match", keyResult, config.conditions());
continue;
}
final String keyString = String.join("|", keyResult.key());
final String eventMessage = createEventMessageString(keyString, keyResult);
// Extract event time and range from the key result or use query time range as fallback.
// These can be different, e.g. during catch up processing.
final DateTime eventTime = keyResult.timestamp().orElse(result.effectiveTimerange().to());
final Event event = eventFactory.createEvent(eventDefinition, eventTime, eventMessage);
// The keyResult timestamp is set to the end of the range
event.setTimerangeStart(keyResult.timestamp().map(t -> t.minus(config.searchWithinMs())).orElse(parameters.timerange().getFrom()));
event.setTimerangeEnd(keyResult.timestamp().orElse(parameters.timerange().getTo()));
event.setReplayInfo(EventReplayInfo.builder()
.timerangeStart(event.getTimerangeStart())
.timerangeEnd(event.getTimerangeEnd())
.query(config.query())
.streams(sourceStreams)
.filters(config.filters())
.build());
sourceStreams.forEach(event::addSourceStream);
final Map<String, Object> fields = new HashMap<>();
// Each group value will be a separate field in the message to make it usable as event fields.
//
// Example result:
// groupBy=["application_name", "username"]
// result-key=["sshd", "jane"]
//
// Message fields:
// application_name=sshd
// username=jane
for (int i = 0; i < config.groupBy().size(); i++) {
try {
fields.put(config.groupBy().get(i), keyResult.key().get(i));
} catch (IndexOutOfBoundsException e) {
throw new EventProcessorException(
"Couldn't create events for: " + eventDefinition.title() + " (possibly due to non-existing grouping fields)",
false, eventDefinition.id(), eventDefinition, e);
}
}
// Group By fields need to be saved on the event so they are available to the subsequent notification events
event.setGroupByFields(fields.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toString())));
// The field name for the series value is composed of the series function and field. We don't take the
// series ID into account because it would be very hard to use for the user. That means a series with
// the same function and field but different ID would overwrite a previous one.
// This shouldn't be a problem though, because the same function and field will always compute the same
// value.
//
// Examples:
// aggregation_value_count_source=42
// aggregation_value_card_anonid=23
for (AggregationSeriesValue seriesValue : keyResult.seriesValues()) {
final String function = seriesValue.series().type().toLowerCase(Locale.ROOT);
final Optional<String> field = fieldFromSeries(seriesValue.series());
final String fieldName = field.map(f -> String.format(Locale.ROOT, "aggregation_value_%s_%s", function, f))
.orElseGet(() -> String.format(Locale.ROOT, "aggregation_value_%s", function));
fields.put(fieldName, seriesValue.value());
}
// This is the concatenated key value
fields.put("aggregation_key", keyString);
// TODO: Can we find a useful source value?
final Message message = messageFactory.createMessage(eventMessage, "", result.effectiveTimerange().to());
message.addFields(fields);
// Ask any event query modifier for its state and collect it into the event modifier state
final Map<String, Object> eventModifierState = eventQueryModifiers.stream()
.flatMap(modifier -> modifier.eventModifierData(result.additionalResults()).entrySet().stream())
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
LOG.debug("Creating event {}/{} - {} {} ({})", eventDefinition.title(), eventDefinition.id(), keyResult.key(), seriesString(keyResult), fields);
eventsWithContext.add(EventWithContext.builder()
.event(event)
.messageContext(message)
.eventModifierState(eventModifierState)
.build());
}
return eventsWithContext.build();
}
|
@Test
public void testEventsFromAggregationResultWithConditions() throws EventProcessorException {
final DateTime now = DateTime.now(DateTimeZone.UTC);
final AbsoluteRange timerange = AbsoluteRange.create(now.minusHours(1), now.minusHours(1).plusMillis(SEARCH_WINDOW_MS));
// We expect to get the end of the aggregation timerange as event time
final TestEvent event1 = new TestEvent(timerange.to());
final TestEvent event2 = new TestEvent(timerange.to());
when(eventFactory.createEvent(any(EventDefinition.class), any(DateTime.class), anyString()))
.thenReturn(event1) // first invocation return value
.thenReturn(event2); // second invocation return value
// There should only be one result because the second result's "abc123" value is less than 40. (it is 23)
// See result builder below
final AggregationConditions conditions = AggregationConditions.builder()
.expression(Expr.And.create(
Expr.Greater.create(Expr.NumberReference.create("abc123"), Expr.NumberValue.create(40.0d)),
Expr.Lesser.create(Expr.NumberReference.create("xyz789"), Expr.NumberValue.create(2.0d))
))
.build();
final EventDefinitionDto eventDefinitionDto = buildEventDefinitionDto(ImmutableSet.of(), ImmutableList.of(), conditions, emptyList());
final AggregationEventProcessorParameters parameters = AggregationEventProcessorParameters.builder()
.timerange(timerange)
.build();
final AggregationEventProcessor eventProcessor = new AggregationEventProcessor(eventDefinitionDto, searchFactory,
eventProcessorDependencyCheck, stateService, moreSearch, eventStreamService, messages, notificationService, permittedStreams, Set.of(), messageFactory);
final AggregationResult result = AggregationResult.builder()
.effectiveTimerange(timerange)
.totalAggregatedMessages(1)
.sourceStreams(ImmutableSet.of("stream-1", "stream-2", "stream-3"))
.keyResults(ImmutableList.of(
AggregationKeyResult.builder()
.key(ImmutableList.of("one", "two"))
.timestamp(timerange.to())
.seriesValues(ImmutableList.of(
AggregationSeriesValue.builder()
.key(ImmutableList.of("a"))
.value(42.0d)
.series(Count.builder()
.id("abc123")
.field("source")
.build())
.build(),
AggregationSeriesValue.builder()
.key(ImmutableList.of("a"))
.value(1.0d)
.series(Cardinality.builder()
.id("xyz789")
.field("source")
.build())
.build()
))
.build(),
AggregationKeyResult.builder()
.key(ImmutableList.of(now.toString(), "one", "two"))
.seriesValues(ImmutableList.of(
AggregationSeriesValue.builder()
.key(ImmutableList.of("a"))
.value(23.0d) // Doesn't match condition
.series(Count.builder()
.id("abc123")
.field("source")
.build())
.build(),
AggregationSeriesValue.builder()
.key(ImmutableList.of("a"))
.value(1.0d)
.series(Cardinality.builder()
.id("xyz789")
.field("source")
.build())
.build()
))
.build()
))
.build();
final ImmutableList<EventWithContext> eventsWithContext = eventProcessor.eventsFromAggregationResult(eventFactory, parameters, result);
assertThat(eventsWithContext).hasSize(1);
assertThat(eventsWithContext.get(0)).satisfies(eventWithContext -> {
final Event event = eventWithContext.event();
assertThat(event.getId()).isEqualTo(event1.getId());
assertThat(event.getMessage()).isEqualTo(event1.getMessage());
assertThat(event.getEventTimestamp()).isEqualTo(timerange.to());
assertThat(event.getTimerangeStart()).isEqualTo(timerange.from());
assertThat(event.getTimerangeEnd()).isEqualTo(timerange.to());
// Should contain all streams because when config.streams is empty, we search in all streams
assertThat(event.getSourceStreams()).containsOnly("stream-1", "stream-2", "stream-3");
final Message message = eventWithContext.messageContext().orElse(null);
assertThat(message).isNotNull();
assertThat(message.getField("group_field_one")).isEqualTo("one");
assertThat(message.getField("group_field_two")).isEqualTo("two");
assertThat(message.getField("aggregation_key")).isEqualTo("one|two");
assertThat(message.getField("aggregation_value_count_source")).isEqualTo(42.0d);
assertThat(message.getField("aggregation_value_card_source")).isEqualTo(1.0d);
assertThat(event.getGroupByFields().get("group_field_one")).isEqualTo("one");
assertThat(event.getGroupByFields().get("group_field_two")).isEqualTo("two");
});
}
|
@Override
public void onChange(List<JobRunrMetadata> metadataList) {
if (this.serversWithLongGCCyclesMetadataList == null || this.serversWithLongGCCyclesMetadataList.size() != metadataList.size()) {
problems.removeProblemsOfType(CpuAllocationIrregularityProblem.PROBLEM_TYPE);
if (!metadataList.isEmpty()) {
problems.addProblem(new CpuAllocationIrregularityProblem(metadataList));
problems.removeProblemsOfType(PollIntervalInSecondsTimeBoxIsTooSmallProblem.PROBLEM_TYPE);
storageProvider.deleteMetadata(PollIntervalInSecondsTimeBoxIsTooSmallNotification.class.getSimpleName());
}
this.serversWithLongGCCyclesMetadataList = metadataList;
}
}
|
@Test
void ifCpuAllocationIrregularitiesIsDeletedThenProblemIsRemoved() {
final JobRunrMetadata jobRunrMetadata = new JobRunrMetadata(CpuAllocationIrregularityNotification.class.getSimpleName(), "BackgroundJobServer " + UUID.randomUUID(), "23");
cpuAllocationIrregularityProblemHandler.onChange(asList(jobRunrMetadata));
reset(problems);
cpuAllocationIrregularityProblemHandler.onChange(emptyList());
verify(problems).removeProblemsOfType(CpuAllocationIrregularityProblem.PROBLEM_TYPE);
verify(problems, never()).addProblem(any());
}
|
public static String getGroupedNameOptional(final String serviceName, final String groupName) {
return groupName + Constants.SERVICE_INFO_SPLITER + serviceName;
}
|
@Test
void testGetGroupedNameOptional() {
String onlyGroupName = NamingUtils.getGroupedNameOptional(StringUtils.EMPTY, "groupA");
assertEquals("groupA@@", onlyGroupName);
String onlyServiceName = NamingUtils.getGroupedNameOptional("serviceA", StringUtils.EMPTY);
assertEquals("@@serviceA", onlyServiceName);
String groupNameAndServiceName = NamingUtils.getGroupedNameOptional("serviceA", "groupA");
assertEquals("groupA@@serviceA", groupNameAndServiceName);
}
|
public String extractVersion(String rawXml) {
Matcher m = p.matcher(rawXml);
if (m.find()) {
return m.group(1);
}
throw new IllegalArgumentException("Impossible to extract version from the file");
}
|
@Test
public void extractVersionWhenMoreVersionAttributesArePresent() {
String version = instance.extractVersion("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
"<ScenarioSimulationModel version=\"1.2\">\n" +
"<someUnknownTag version=\"1.1\"/>\n" +
"</ScenarioSimulationModel>");
assertThat(version).isEqualTo("1.2");
}
|
public Comparator<?> getValueComparator(int column) {
return valueComparators[column];
}
|
@Test
public void getDefaultComparatorForIntegerClass() {
ObjectTableSorter sorter = new ObjectTableSorter(createTableModel("integer", Integer.class));
assertThat(sorter.getValueComparator(0), is(CoreMatchers.notNullValue()));
}
|
@VisibleForTesting
public static JobGraph createJobGraph(StreamGraph streamGraph) {
return new StreamingJobGraphGenerator(
Thread.currentThread().getContextClassLoader(),
streamGraph,
null,
Runnable::run)
.createJobGraph();
}
|
@Test
void testDisabledCheckpointing() throws Exception {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.fromData(0).print();
StreamGraph streamGraph = env.getStreamGraph();
assertThat(streamGraph.getCheckpointConfig().isCheckpointingEnabled())
.withFailMessage("Checkpointing enabled")
.isFalse();
JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
JobCheckpointingSettings snapshottingSettings = jobGraph.getCheckpointingSettings();
assertThat(
snapshottingSettings
.getCheckpointCoordinatorConfiguration()
.getCheckpointInterval())
.isEqualTo(Long.MAX_VALUE);
assertThat(snapshottingSettings.getCheckpointCoordinatorConfiguration().isExactlyOnce())
.isFalse();
List<JobVertex> verticesSorted = jobGraph.getVerticesSortedTopologicallyFromSources();
StreamConfig streamConfig = new StreamConfig(verticesSorted.get(0).getConfiguration());
assertThat(streamConfig.getCheckpointMode()).isEqualTo(CheckpointingMode.AT_LEAST_ONCE);
}
|
public static Option<String> getRawValueWithAltKeys(Configuration conf,
ConfigProperty<?> configProperty) {
String value = conf.get(configProperty.key());
if (value != null) {
return Option.of(value);
}
for (String alternative : configProperty.getAlternatives()) {
String altValue = conf.get(alternative);
if (altValue != null) {
LOG.warn(String.format("The configuration key '%s' has been deprecated "
+ "and may be removed in the future. Please use the new key '%s' instead.",
alternative, configProperty.key()));
return Option.of(altValue);
}
}
return Option.empty();
}
|
@Test
public void testGetRawValueWithAltKeysFromHadoopConf() {
Configuration conf = new Configuration();
assertEquals(Option.empty(), getRawValueWithAltKeys(conf, TEST_BOOLEAN_CONFIG_PROPERTY));
boolean setValue = !Boolean.parseBoolean(TEST_BOOLEAN_CONFIG_PROPERTY.defaultValue());
conf.setBoolean(TEST_BOOLEAN_CONFIG_PROPERTY.key(), setValue);
assertEquals(Option.of(String.valueOf(setValue)),
getRawValueWithAltKeys(conf, TEST_BOOLEAN_CONFIG_PROPERTY));
conf = new Configuration();
conf.setBoolean(TEST_BOOLEAN_CONFIG_PROPERTY.getAlternatives().get(0), setValue);
assertEquals(Option.of(String.valueOf(setValue)),
getRawValueWithAltKeys(conf, TEST_BOOLEAN_CONFIG_PROPERTY));
}
|
@Override
public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) {
if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) {
return resolveRequestConfig(propertyName);
} else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX)
&& !propertyName.startsWith(KSQL_STREAMS_PREFIX)) {
return resolveKsqlConfig(propertyName);
}
return resolveStreamsConfig(propertyName, strict);
}
|
@Test
public void shouldResolveConsumerConfig() {
assertThat(resolver.resolve(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, true),
is(resolvedItem(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, CONSUMER_CONFIG_DEF)));
}
|
@Override
protected Optional<ErrorResponse> filter(DiscFilterRequest req) {
var now = clock.instant();
var bearerToken = requestBearerToken(req).orElse(null);
if (bearerToken == null) {
log.fine("Missing bearer token");
return Optional.of(new ErrorResponse(Response.Status.UNAUTHORIZED, "Unauthorized"));
}
var permission = Permission.getRequiredPermission(req).orElse(null);
if (permission == null) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
var requestTokenHash = requestTokenHash(bearerToken);
var clientIds = new TreeSet<String>();
var permissions = EnumSet.noneOf(Permission.class);
var matchedTokens = new HashSet<TokenVersion>();
for (Client c : allowedClients) {
if (!c.permissions().contains(permission)) continue;
var matchedToken = c.tokens().get(requestTokenHash);
if (matchedToken == null) continue;
var expiration = matchedToken.expiration().orElse(null);
if (expiration != null && now.isAfter(expiration)) continue;
matchedTokens.add(matchedToken);
clientIds.add(c.id());
permissions.addAll(c.permissions());
}
if (clientIds.isEmpty()) return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
if (matchedTokens.size() > 1) {
log.warning("Multiple tokens matched for request %s"
.formatted(matchedTokens.stream().map(TokenVersion::id).toList()));
return Optional.of(new ErrorResponse(Response.Status.FORBIDDEN, "Forbidden"));
}
var matchedToken = matchedTokens.stream().findAny().get();
addAccessLogEntry(req, "token.id", matchedToken.id());
addAccessLogEntry(req, "token.hash", matchedToken.fingerprint().toDelimitedHexString());
addAccessLogEntry(req, "token.exp", matchedToken.expiration().map(Instant::toString).orElse("<none>"));
ClientPrincipal.attachToRequest(req, clientIds, permissions);
return Optional.empty();
}
|
@Test
void rejects_tokens_on_empty_clients() {
var req = FilterTestUtils.newRequestBuilder()
.withMethod(Method.GET)
.withHeader("Authorization", "Bearer " + UNKNOWN_TOKEN.secretTokenString())
.build();
var responseHandler = new MockResponseHandler();
newFilterWithEmptyClientsConfig().filter(req, responseHandler);
assertNotNull(responseHandler.getResponse());
assertEquals(FORBIDDEN, responseHandler.getResponse().getStatus());
}
|
@SuppressWarnings("checkstyle:MissingSwitchDefault")
@Override
protected void doCommit(TableMetadata base, TableMetadata metadata) {
int version = currentVersion() + 1;
CommitStatus commitStatus = CommitStatus.FAILURE;
/* This method adds no fs scheme, and it persists in HTS that way. */
final String newMetadataLocation = rootMetadataFileLocation(metadata, version);
HouseTable houseTable = HouseTable.builder().build();
try {
// Now that we have metadataLocation we stamp it in metadata property.
Map<String, String> properties = new HashMap<>(metadata.properties());
failIfRetryUpdate(properties);
String currentTsString = String.valueOf(Instant.now(Clock.systemUTC()).toEpochMilli());
properties.put(getCanonicalFieldName("lastModifiedTime"), currentTsString);
if (base == null) {
properties.put(getCanonicalFieldName("creationTime"), currentTsString);
}
properties.put(
getCanonicalFieldName("tableVersion"),
properties.getOrDefault(
getCanonicalFieldName("tableLocation"), CatalogConstants.INITIAL_VERSION));
properties.put(getCanonicalFieldName("tableLocation"), newMetadataLocation);
String serializedSnapshotsToPut = properties.remove(CatalogConstants.SNAPSHOTS_JSON_KEY);
String serializedSnapshotRefs = properties.remove(CatalogConstants.SNAPSHOTS_REFS_KEY);
boolean isStageCreate =
Boolean.parseBoolean(properties.remove(CatalogConstants.IS_STAGE_CREATE_KEY));
logPropertiesMap(properties);
TableMetadata updatedMetadata = metadata.replaceProperties(properties);
if (serializedSnapshotsToPut != null) {
List<Snapshot> snapshotsToPut =
SnapshotsUtil.parseSnapshots(fileIO, serializedSnapshotsToPut);
Pair<List<Snapshot>, List<Snapshot>> snapshotsDiff =
SnapshotsUtil.symmetricDifferenceSplit(snapshotsToPut, updatedMetadata.snapshots());
List<Snapshot> appendedSnapshots = snapshotsDiff.getFirst();
List<Snapshot> deletedSnapshots = snapshotsDiff.getSecond();
snapshotInspector.validateSnapshotsUpdate(
updatedMetadata, appendedSnapshots, deletedSnapshots);
Map<String, SnapshotRef> snapshotRefs =
serializedSnapshotRefs == null
? new HashMap<>()
: SnapshotsUtil.parseSnapshotRefs(serializedSnapshotRefs);
updatedMetadata =
maybeAppendSnapshots(updatedMetadata, appendedSnapshots, snapshotRefs, true);
updatedMetadata = maybeDeleteSnapshots(updatedMetadata, deletedSnapshots);
}
final TableMetadata updatedMtDataRef = updatedMetadata;
metricsReporter.executeWithStats(
() ->
TableMetadataParser.write(updatedMtDataRef, io().newOutputFile(newMetadataLocation)),
InternalCatalogMetricsConstant.METADATA_UPDATE_LATENCY);
houseTable = houseTableMapper.toHouseTable(updatedMetadata);
if (!isStageCreate) {
houseTableRepository.save(houseTable);
} else {
/**
* Refresh current metadata for staged tables from newly created metadata file and disable
* "forced refresh" in {@link OpenHouseInternalTableOperations#commit(TableMetadata,
* TableMetadata)}
*/
refreshFromMetadataLocation(newMetadataLocation);
}
commitStatus = CommitStatus.SUCCESS;
} catch (InvalidIcebergSnapshotException e) {
throw new BadRequestException(e, e.getMessage());
} catch (CommitFailedException e) {
throw e;
} catch (HouseTableCallerException
| HouseTableNotFoundException
| HouseTableConcurrentUpdateException e) {
throw new CommitFailedException(e);
} catch (Throwable persistFailure) {
// Try to reconnect and determine the commit status for unknown exception
log.error(
"Encounter unexpected error while updating metadata.json for table:" + tableIdentifier,
persistFailure);
commitStatus = checkCommitStatus(newMetadataLocation, metadata);
switch (commitStatus) {
case SUCCESS:
log.debug("Calling doCommit succeeded");
break;
case FAILURE:
// logging error and exception-throwing co-existence is needed, given the exception
// handler in
// org.apache.iceberg.BaseMetastoreCatalog.BaseMetastoreCatalogTableBuilder.create swallow
// the
// nested exception information.
log.error("Exception details:", persistFailure);
throw new CommitFailedException(
persistFailure,
String.format(
"Persisting metadata file %s at version %s for table %s failed while persisting to house table",
newMetadataLocation, version, GSON.toJson(houseTable)));
case UNKNOWN:
throw new CommitStateUnknownException(persistFailure);
}
} finally {
switch (commitStatus) {
case FAILURE:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_FAILED_CTR);
break;
case UNKNOWN:
metricsReporter.count(InternalCatalogMetricsConstant.COMMIT_STATE_UNKNOWN);
break;
default:
break; /*should never happen, kept to silence SpotBugs*/
}
}
}
|
@Test
void testDoCommitCherryPickFirstSnapshot() throws IOException {
List<Snapshot> testWapSnapshots = IcebergTestUtil.getWapSnapshots().subList(0, 1);
// add 1 staged snapshot to the base metadata
TableMetadata base =
TableMetadata.buildFrom(BASE_TABLE_METADATA).addSnapshot(testWapSnapshots.get(0)).build();
Map<String, String> properties = new HashMap<>(base.properties());
try (MockedStatic<TableMetadataParser> ignoreWriteMock =
Mockito.mockStatic(TableMetadataParser.class)) {
// cherry pick the staged snapshot
properties.put(
CatalogConstants.SNAPSHOTS_JSON_KEY, SnapshotsUtil.serializedSnapshots(testWapSnapshots));
properties.put(
CatalogConstants.SNAPSHOTS_REFS_KEY,
SnapshotsUtil.serializeMap(
IcebergTestUtil.obtainSnapshotRefsFromSnapshot(testWapSnapshots.get(0))));
properties.put(getCanonicalFieldName("tableLocation"), TEST_LOCATION);
TableMetadata metadata = base.replaceProperties(properties);
openHouseInternalTableOperations.doCommit(base, metadata);
Mockito.verify(mockHouseTableMapper).toHouseTable(tblMetadataCaptor.capture());
Map<String, String> updatedProperties = tblMetadataCaptor.getValue().properties();
// verify the staged snapshot is cherry picked by using the existing one
Assertions.assertEquals(
null, updatedProperties.get(getCanonicalFieldName("staged_snapshots")));
Assertions.assertEquals(
null, updatedProperties.get(getCanonicalFieldName("appended_snapshots")));
Assertions.assertEquals(
Long.toString(testWapSnapshots.get(0).snapshotId()),
updatedProperties.get(getCanonicalFieldName("cherry_picked_snapshots")));
Assertions.assertEquals(
null, updatedProperties.get(getCanonicalFieldName("deleted_snapshots")));
Mockito.verify(mockHouseTableRepository, Mockito.times(1)).save(Mockito.eq(mockHouseTable));
}
}
|
public static boolean isRuleActiveVersionPath(final String rulePath) {
Pattern pattern = Pattern.compile(getRuleNameNode() + "/(\\w+)" + ACTIVE_VERSION_SUFFIX, Pattern.CASE_INSENSITIVE);
Matcher matcher = pattern.matcher(rulePath);
return matcher.find();
}
|
@Test
void assertIsRuleActiveVersionPath() {
assertTrue(GlobalNodePath.isRuleActiveVersionPath("/rules/transaction/active_version"));
}
|
public static <T> boolean isReferringTo(final Reference<T> ref, final T obj)
{
return ref.get() == obj;
}
|
@Test
void validateIsReferringTo()
{
final Long objOne = 42L;
final Long objTwo = 43L; // Need different value to make sure it is a different instance...
final WeakReference<Long> ref = new WeakReference<>(objOne);
assertTrue(References.isReferringTo(ref, objOne));
assertFalse(References.isReferringTo(ref, objTwo));
}
|
public Optional<EndpointCertificateSecrets> readEndpointCertificateSecrets(EndpointCertificateMetadata metadata) {
return Optional.of(readFromSecretStore(metadata));
}
|
@Test
void returns_missing_when_cert_version_not_found() {
DefaultEndpointCertificateSecretStore defaultEndpointCertificateSecretStore = new DefaultEndpointCertificateSecretStore(new MockSecretStore());
TestEndpointCertificateSecretStore zerosslStore = new TestEndpointCertificateSecretStore(null, null);
EndpointCertificateRetriever retriever = new EndpointCertificateRetriever(List.of(defaultEndpointCertificateSecretStore, zerosslStore));
{
Optional<EndpointCertificateSecrets> endpointCertificateSecrets = retriever.readEndpointCertificateSecrets(
new EndpointCertificateMetadata("key", "cert", 1, EndpointCertificateMetadata.Provider.digicert));
Assertions.assertTrue(endpointCertificateSecrets.isPresent());
Assertions.assertTrue(endpointCertificateSecrets.get().isMissing());
}
{
Optional<EndpointCertificateSecrets> endpointCertificateSecrets = retriever.readEndpointCertificateSecrets(
new EndpointCertificateMetadata("key", "cert", 1, EndpointCertificateMetadata.Provider.zerossl));
Assertions.assertTrue(endpointCertificateSecrets.isPresent());
Assertions.assertTrue(endpointCertificateSecrets.get().isMissing());
}
}
|
public static String[] convertToStringArray(Object value) {
if (value == null) {
return null;
}
String text = value.toString();
if (text == null || text.length() == 0) {
return null;
}
StringTokenizer stok = new StringTokenizer(text, ",");
final List<String> list = new ArrayList<String>();
while (stok.hasMoreTokens()) {
list.add(stok.nextToken());
}
String[] array = list.toArray(new String[list.size()]);
return array;
}
|
@Test
public void testConvertToStringArray() throws Exception {
assertNull(StringArrayConverter.convertToStringArray(null));
assertNull(StringArrayConverter.convertToStringArray(""));
String[] array = StringArrayConverter.convertToStringArray("foo");
assertEquals(1, array.length);
assertEquals("foo", array[0]);
array = StringArrayConverter.convertToStringArray("foo,bar");
assertEquals(2, array.length);
assertEquals("foo", array[0]);
assertEquals("bar", array[1]);
array = StringArrayConverter.convertToStringArray("foo,bar,baz");
assertEquals(3, array.length);
assertEquals("foo", array[0]);
assertEquals("bar", array[1]);
assertEquals("baz", array[2]);
}
|
@Override
public String toString() {
if (command != null) {
return "SmppMessage: " + command;
} else {
return "SmppMessage: " + getBody();
}
}
|
@Test
public void toStringShouldReturnTheBodyIfTheCommandIsNull() {
message = new SmppMessage(camelContext, null, new SmppConfiguration());
assertEquals("SmppMessage: null", message.toString());
}
|
public static String from(Path path) {
return from(path.toString());
}
|
@Test
void testJavascriptContentType() {
assertThat(ContentType.from(Path.of("javascript.js"))).isEqualTo(TEXT_JAVASCRIPT);
}
|
@Override
public Map<String, String> generationCodes(Long tableId) {
// 校验是否已经存在
CodegenTableDO table = codegenTableMapper.selectById(tableId);
if (table == null) {
throw exception(CODEGEN_TABLE_NOT_EXISTS);
}
List<CodegenColumnDO> columns = codegenColumnMapper.selectListByTableId(tableId);
if (CollUtil.isEmpty(columns)) {
throw exception(CODEGEN_COLUMN_NOT_EXISTS);
}
// 如果是主子表,则加载对应的子表信息
List<CodegenTableDO> subTables = null;
List<List<CodegenColumnDO>> subColumnsList = null;
if (CodegenTemplateTypeEnum.isMaster(table.getTemplateType())) {
// 校验子表存在
subTables = codegenTableMapper.selectListByTemplateTypeAndMasterTableId(
CodegenTemplateTypeEnum.SUB.getType(), tableId);
if (CollUtil.isEmpty(subTables)) {
throw exception(CODEGEN_MASTER_GENERATION_FAIL_NO_SUB_TABLE);
}
// 校验子表的关联字段存在
subColumnsList = new ArrayList<>();
for (CodegenTableDO subTable : subTables) {
List<CodegenColumnDO> subColumns = codegenColumnMapper.selectListByTableId(subTable.getId());
if (CollUtil.findOne(subColumns, column -> column.getId().equals(subTable.getSubJoinColumnId())) == null) {
throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, subTable.getId());
}
subColumnsList.add(subColumns);
}
}
// 执行生成
return codegenEngine.execute(table, columns, subTables, subColumnsList);
}
|
@Test
public void testGenerationCodes_tableNotExists() {
assertServiceException(() -> codegenService.generationCodes(randomLongId()),
CODEGEN_TABLE_NOT_EXISTS);
}
|
public static String toStr(Object value, String defaultValue) {
return convertQuietly(String.class, value, defaultValue);
}
|
@Test
public void toStrTest4() {
// 被当作八进制
@SuppressWarnings("OctalInteger") final String result = Convert.toStr(001200);
assertEquals("640", result);
}
|
@Override
public SortedSet<BrokerStatus> getAvailablePrimaryBrokers(SortedSet<BrokerStatus> primaryCandidates) {
SortedSet<BrokerStatus> availablePrimaries = new TreeSet<BrokerStatus>();
for (BrokerStatus status : primaryCandidates) {
if (this.autoFailoverPolicy.isBrokerAvailable(status)) {
availablePrimaries.add(status);
}
}
return availablePrimaries;
}
|
@Test
public void testGetAvailablePrimaryBrokers() throws Exception {
NamespaceIsolationPolicyImpl defaultPolicy = this.getDefaultPolicy();
SortedSet<BrokerStatus> brokerStatus = new TreeSet<>();
SortedSet<BrokerStatus> expectedAvailablePrimaries = new TreeSet<>();
for (int i = 0; i < 10; i++) {
BrokerStatus status = BrokerStatus.builder()
.brokerAddress(String.format("prod1-broker%d.messaging.use.example.com", i))
.active(i % 2 == 0)
.loadFactor(i * 10)
.build();
brokerStatus.add(status);
if (i % 2 == 0) {
expectedAvailablePrimaries.add(status);
}
}
SortedSet<BrokerStatus> availablePrimaries = defaultPolicy.getAvailablePrimaryBrokers(brokerStatus);
assertEquals(expectedAvailablePrimaries.size(), availablePrimaries.size());
for (BrokerStatus bs : availablePrimaries) {
if (!expectedAvailablePrimaries.contains(bs)) {
fail("Should not happen");
}
}
}
|
@Udf
public String lcase(
@UdfParameter(description = "The string to lower-case") final String input) {
if (input == null) {
return null;
}
return input.toLowerCase();
}
|
@Test
public void shouldRetainLowerCaseInput() {
final String result = udf.lcase("foo");
assertThat(result, is("foo"));
}
|
@Override
public Iterator<Text> search(String term) {
if (invertedFile.containsKey(term)) {
ArrayList<Text> hits = new ArrayList<>(invertedFile.get(term));
return hits.iterator();
} else {
return Collections.emptyIterator();
}
}
|
@Test
public void testSearchRomanticComedy() {
System.out.println("search 'romantic comedy'");
String[] terms = {"romantic", "comedy"};
Iterator<Relevance> hits = corpus.search(new BM25(), terms);
int n = 0;
while (hits.hasNext()) {
n++;
Relevance hit = hits.next();
System.out.println(hit.text + "\t" + hit.score);
}
assertEquals(78, n);
}
|
public static BaggageField constant(String name, @Nullable String value) {
return new BaggageField(name, new Constant(value));
}
|
@Test void constant() {
BaggageField constant = BaggageFields.constant("foo", "bar");
assertThat(constant.getValue(context)).isEqualTo("bar");
assertThat(constant.getValue(extracted)).isEqualTo("bar");
BaggageField constantNull = BaggageFields.constant("foo", null);
assertThat(constantNull.getValue(context)).isNull();
assertThat(constantNull.getValue(extracted)).isNull();
}
|
@Override
public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) {
if (readError == null) {
try {
processHighlightings(lineBuilder);
} catch (RangeOffsetConverterException e) {
readError = new ReadError(HIGHLIGHTING, lineBuilder.getLine());
LOG.debug(format("Inconsistency detected in Highlighting data. Highlighting will be ignored for file '%s'", file.getKey()), e);
}
}
return Optional.ofNullable(readError);
}
|
@Test
public void keep_existing_processed_highlighting_when_range_offset_converter_throw_RangeOffsetConverterException() {
TextRange textRange2 = newTextRange(LINE_2, LINE_2);
doThrow(RangeOffsetConverterException.class).when(rangeOffsetConverter).offsetToString(textRange2, LINE_2, DEFAULT_LINE_LENGTH);
TextRange textRange3 = newTextRange(LINE_3, LINE_3);
HighlightingLineReader highlightingLineReader = newReader(of(
newSingleLineTextRangeWithExpectingLabel(LINE_1, RANGE_LABEL_1), ANNOTATION,
textRange2, HIGHLIGHTING_STRING,
textRange3, COMMENT));
assertThat(highlightingLineReader.read(line1)).isEmpty();
LineReader.ReadError readErrorLine2 = new LineReader.ReadError(HIGHLIGHTING, LINE_2);
assertThat(highlightingLineReader.read(line2)).contains(readErrorLine2);
assertThat(highlightingLineReader.read(line3)).contains(readErrorLine2);
assertThat(line1.hasHighlighting()).isTrue();
assertThat(line2.hasHighlighting()).isFalse();
assertThat(line3.hasHighlighting()).isFalse();
assertThat(logTester.logs(DEBUG)).isNotEmpty();
}
|
@Override
public void checkBeforeUpdate(final DropReadwriteSplittingRuleStatement sqlStatement) {
if (!sqlStatement.isIfExists()) {
checkToBeDroppedRuleNames(sqlStatement);
}
checkToBeDroppedInUsed(sqlStatement);
}
|
@Test
void assertCheckSQLStatementWithoutToBeDroppedRule() throws RuleDefinitionException {
ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class);
when(rule.getConfiguration()).thenReturn(new ReadwriteSplittingRuleConfiguration(Collections.emptyList(), Collections.emptyMap()));
executor.setRule(rule);
assertThrows(MissingRequiredRuleException.class, () -> executor.checkBeforeUpdate(createSQLStatement()));
}
|
@Override
public List<DeptDO> getChildDeptList(Long id) {
List<DeptDO> children = new LinkedList<>();
// 遍历每一层
Collection<Long> parentIds = Collections.singleton(id);
for (int i = 0; i < Short.MAX_VALUE; i++) { // 使用 Short.MAX_VALUE 避免 bug 场景下,存在死循环
// 查询当前层,所有的子部门
List<DeptDO> depts = deptMapper.selectListByParentId(parentIds);
// 1. 如果没有子部门,则结束遍历
if (CollUtil.isEmpty(depts)) {
break;
}
// 2. 如果有子部门,继续遍历
children.addAll(depts);
parentIds = convertSet(depts, DeptDO::getId);
}
return children;
}
|
@Test
public void testGetChildDeptList() {
// mock 数据(1 级别子节点)
DeptDO dept1 = randomPojo(DeptDO.class, o -> o.setName("1"));
deptMapper.insert(dept1);
DeptDO dept2 = randomPojo(DeptDO.class, o -> o.setName("2"));
deptMapper.insert(dept2);
// mock 数据(2 级子节点)
DeptDO dept1a = randomPojo(DeptDO.class, o -> o.setName("1-a").setParentId(dept1.getId()));
deptMapper.insert(dept1a);
DeptDO dept2a = randomPojo(DeptDO.class, o -> o.setName("2-a").setParentId(dept2.getId()));
deptMapper.insert(dept2a);
// 准备参数
Long id = dept1.getParentId();
// 调用
List<DeptDO> result = deptService.getChildDeptList(id);
// 断言
assertEquals(result.size(), 2);
assertPojoEquals(dept1, result.get(0));
assertPojoEquals(dept1a, result.get(1));
}
|
@Override
public void deregisterService(String serviceName, String groupName, Instance instance) throws NacosException {
NAMING_LOGGER.info("[DEREGISTER-SERVICE] {} deregistering service {} with instance: {}", namespaceId,
serviceName, instance);
if (instance.isEphemeral()) {
return;
}
final Map<String, String> params = new HashMap<>(16);
params.put(CommonParams.NAMESPACE_ID, namespaceId);
params.put(CommonParams.SERVICE_NAME, NamingUtils.getGroupedName(serviceName, groupName));
params.put(CommonParams.CLUSTER_NAME, instance.getClusterName());
params.put(IP_PARAM, instance.getIp());
params.put(PORT_PARAM, String.valueOf(instance.getPort()));
params.put(EPHEMERAL_PARAM, String.valueOf(instance.isEphemeral()));
reqApi(UtilAndComs.nacosUrlInstance, params, HttpMethod.DELETE);
}
|
@Test
void testDeregisterServiceForEphemeral() throws Exception {
NacosRestTemplate nacosRestTemplate = mock(NacosRestTemplate.class);
final Field nacosRestTemplateField = NamingHttpClientProxy.class.getDeclaredField("nacosRestTemplate");
nacosRestTemplateField.setAccessible(true);
nacosRestTemplateField.set(clientProxy, nacosRestTemplate);
Instance instance = new Instance();
clientProxy.deregisterService("serviceName", "groupName", instance);
verify(nacosRestTemplate, never()).exchangeForm(any(), any(), any(), any(), eq(HttpMethod.DELETE), any());
}
|
public Preference<Boolean> getBoolean(@StringRes int prefKey, @BoolRes int defaultValue) {
return mRxSharedPreferences.getBoolean(
mResources.getString(prefKey), mResources.getBoolean(defaultValue));
}
|
@Test
public void testConvertTheme() {
SharedPrefsHelper.setPrefsValue(
"settings_key_keyboard_theme_key", "28860f10-cf16-11e1-9b23-0800200c9a66");
SharedPrefsHelper.setPrefsValue(RxSharedPrefs.CONFIGURATION_VERSION, 10);
SharedPreferences preferences =
PreferenceManager.getDefaultSharedPreferences(getApplicationContext());
Assert.assertFalse(preferences.contains("theme_28860f10-cf16-11e1-9b23-0800200c9a66"));
new RxSharedPrefs(getApplicationContext(), this::testRestoreFunction);
Assert.assertTrue(preferences.contains("theme_28860f10-cf16-11e1-9b23-0800200c9a66"));
Assert.assertTrue(preferences.getBoolean("theme_28860f10-cf16-11e1-9b23-0800200c9a66", false));
}
|
static <T> T getWildcardMappedObject(final Map<String, T> mapping, final String query) {
T value = mapping.get(query);
if (value == null) {
for (String key : mapping.keySet()) {
// Turn the search key into a regex, using all characters but the * as a literal.
String regex = Arrays.stream(key.split("\\*")) // split in parts that do not have a wildcard in them
.map(Pattern::quote) // each part should be used as a literal (not as a regex or partial regex)
.collect(Collectors.joining(".*")); // join all literal parts with a regex representation on the wildcard.
if (key.endsWith("*")) { // the 'split' will have removed any trailing wildcard characters. Correct for that.
regex += ".*";
}
if (query.matches(regex)) {
value = mapping.get(key);
break;
}
}
}
return value;
}
|
@Test
public void testSubdirWildcardExtensionConcat() throws Exception
{
// Setup test fixture.
final Map<String, Object> haystack = Map.of("myplugin/baz/*.jsp", new Object());
// Execute system under test.
final Object result = PluginServlet.getWildcardMappedObject(haystack, "myplugin/baz/foo.jsp99");
// Verify results.
assertNull(result);
}
|
@Override
public void onHeartbeatSuccess(ShareGroupHeartbeatResponseData response) {
if (response.errorCode() != Errors.NONE.code()) {
String errorMessage = String.format(
"Unexpected error in Heartbeat response. Expected no error, but received: %s",
Errors.forCode(response.errorCode())
);
throw new IllegalArgumentException(errorMessage);
}
MemberState state = state();
if (state == MemberState.LEAVING) {
log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " +
"already leaving the group.", memberId, memberEpoch);
return;
}
if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) {
log.debug("Member {} with epoch {} received a successful response to the heartbeat " +
"to leave the group and completed the leave operation. ", memberId, memberEpoch);
return;
}
if (isNotInGroup()) {
log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" +
" so it's not a member of the group. ", memberId, state);
return;
}
// Update the group member id label in the client telemetry reporter if the member id has
// changed. Initially the member id is empty, and it is updated when the member joins the
// group. This is done here to avoid updating the label on every heartbeat response. Also
// check if the member id is null, as the schema defines it as nullable.
if (response.memberId() != null && !response.memberId().equals(memberId)) {
clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels(
Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId())));
}
this.memberId = response.memberId();
updateMemberEpoch(response.memberEpoch());
ShareGroupHeartbeatResponseData.Assignment assignment = response.assignment();
if (assignment != null) {
if (!state.canHandleNewAssignment()) {
// New assignment received but member is in a state where it cannot take new
// assignments (ex. preparing to leave the group)
log.debug("Ignoring new assignment {} received from server because member is in {} state.",
assignment, state);
return;
}
Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>();
assignment.topicPartitions().forEach(topicPartition -> newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions())));
processAssignmentReceived(newAssignment);
}
}
|
@Test
public void testRebalanceMetricsOnSuccessfulRebalance() {
ShareMembershipManager membershipManager = createMembershipManagerJoiningGroup();
ShareGroupHeartbeatResponse heartbeatResponse = createShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData.Assignment());
membershipManager.onHeartbeatSuccess(heartbeatResponse.data());
mockOwnedPartition(membershipManager, Uuid.randomUuid(), "topic1");
CompletableFuture<Void> commitResult = mockRevocation();
receiveEmptyAssignment(membershipManager);
long reconciliationDurationMs = 1234;
time.sleep(reconciliationDurationMs);
membershipManager.poll(time.milliseconds());
// Complete commit request to complete the callback invocation
commitResult.complete(null);
assertEquals(1d, getMetricValue(metrics, rebalanceMetricsManager.rebalanceTotal));
assertEquals(120d, 1d, (double) getMetricValue(metrics, rebalanceMetricsManager.rebalanceRatePerHour));
}
|
public static RuleDescriptionSectionDtoBuilder builder() {
return new RuleDescriptionSectionDtoBuilder();
}
|
@Test
void setKey_whenDefaultAlreadySet_shouldThrow() {
RuleDescriptionSectionDto.RuleDescriptionSectionDtoBuilder builderWithDefault = RuleDescriptionSectionDto.builder()
.setDefault();
assertThatExceptionOfType(IllegalArgumentException.class)
.isThrownBy(() -> builderWithDefault.key("balbal"))
.withMessage("Only one of setDefault and key methods can be called");
}
|
public boolean isValidatedPath(final String path) {
return pathPattern.matcher(path).find();
}
|
@Test
void assertIsValidPathWithNullParentNode() {
UniqueRuleItemNodePath uniqueRuleItemNodePath = new UniqueRuleItemNodePath(new RuleRootNodePath("foo"), "test_path");
assertTrue(uniqueRuleItemNodePath.isValidatedPath("/word1/word2-/rules/foo/test_path/versions/1234"));
}
|
@Override
public int getattr(String path, FileStat stat) {
return AlluxioFuseUtils.call(
LOG, () -> getattrInternal(path, stat), FuseConstants.FUSE_GETATTR, "path=%s", path);
}
|
@Test
public void getattr() throws Exception {
// set up status
FileInfo info = new FileInfo();
info.setLength(4 * Constants.KB + 1);
info.setLastAccessTimeMs(1000);
info.setLastModificationTimeMs(1000);
String userName = System.getProperty("user.name");
info.setOwner(userName);
Optional<String> groupName = AlluxioFuseUtils.getGroupName(userName);
assertTrue(groupName.isPresent());
info.setGroup(groupName.get());
info.setFolder(true);
info.setMode(123);
info.setCompleted(true);
URIStatus status = new URIStatus(info);
// mock fs
when(mFileSystem.getStatus(any(AlluxioURI.class))).thenReturn(status);
FileStat stat = FileStat.of(ByteBuffer.allocateDirect(256));
assertEquals(0, mFuseFs.getattr("/foo", stat));
assertEquals(status.getLength(), stat.st_size.longValue());
assertEquals(9, stat.st_blocks.intValue());
assertEquals(status.getLastAccessTimeMs() / 1000, stat.st_atim.tv_sec.get());
assertEquals((status.getLastAccessTimeMs() % 1000) * 1000,
stat.st_atim.tv_nsec.longValue());
assertEquals(status.getLastModificationTimeMs() / 1000, stat.st_ctim.tv_sec.get());
assertEquals((status.getLastModificationTimeMs() % 1000) * 1000,
stat.st_ctim.tv_nsec.longValue());
assertEquals(status.getLastModificationTimeMs() / 1000, stat.st_mtim.tv_sec.get());
assertEquals((status.getLastModificationTimeMs() % 1000) * 1000,
stat.st_mtim.tv_nsec.longValue());
Optional<Long> uid = AlluxioFuseUtils.getUid(System.getProperty("user.name"));
Optional<Long> gid = AlluxioFuseUtils.getGidFromUserName(System.getProperty("user.name"));
assertTrue(uid.isPresent());
assertTrue(gid.isPresent());
assertEquals((long) uid.get(), stat.st_uid.get());
assertEquals((long) gid.get(), stat.st_gid.get());
assertEquals(123 | FileStat.S_IFDIR, stat.st_mode.intValue());
}
|
void generate(MessageSpec message) throws Exception {
if (message.struct().versions().contains(Short.MAX_VALUE)) {
throw new RuntimeException("Message " + message.name() + " does " +
"not specify a maximum version.");
}
structRegistry.register(message);
schemaGenerator.generateSchemas(message);
messageFlexibleVersions = message.flexibleVersions();
generateClass(Optional.of(message),
message.dataClassName(),
message.struct(),
message.struct().versions());
headerGenerator.generate();
}
|
@Test
public void testInvalidNullDefaultForPotentiallyNonNullableArray() throws Exception {
MessageSpec testMessageSpec = MessageGenerator.JSON_SERDE.readValue(String.join("", Arrays.asList(
"{",
" \"type\": \"request\",",
" \"name\": \"FooBar\",",
" \"validVersions\": \"0-2\",",
" \"flexibleVersions\": \"none\",",
" \"fields\": [",
" { \"name\": \"field1\", \"type\": \"[]int32\", \"versions\": \"0+\", \"nullableVersions\": \"1+\", ",
" \"default\": \"null\" }",
" ]",
"}")), MessageSpec.class);
assertStringContains("not all versions of this field are nullable",
assertThrows(RuntimeException.class, () -> {
new MessageDataGenerator("org.apache.kafka.common.message").generate(testMessageSpec);
}).getMessage());
}
|
@Override
@PublicAPI(usage = ACCESS)
public JavaClass toErasure() {
return erasure;
}
|
@Test
public void erased_unbound_type_variable_is_java_lang_Object() {
@SuppressWarnings("unused")
class ClassWithUnboundTypeParameter<T> {
}
JavaTypeVariable<JavaClass> type = new ClassFileImporter().importClass(ClassWithUnboundTypeParameter.class).getTypeParameters().get(0);
assertThatType(type.toErasure()).matches(Object.class);
}
|
public static <T, IdT> Deduplicate.WithRepresentativeValues<T, IdT> withRepresentativeValueFn(
SerializableFunction<T, IdT> representativeValueFn) {
return new Deduplicate.WithRepresentativeValues<T, IdT>(
DEFAULT_TIME_DOMAIN, DEFAULT_DURATION, representativeValueFn, null, null);
}
|
@Test
public void withLambdaRepresentativeValuesFnNoTypeDescriptorShouldThrow() {
Multimap<Integer, String> predupedContents = HashMultimap.create();
predupedContents.put(3, "foo");
predupedContents.put(4, "foos");
predupedContents.put(6, "barbaz");
predupedContents.put(6, "bazbar");
PCollection<String> dupes =
p.apply(Create.of("foo", "foos", "barbaz", "barbaz", "bazbar", "foo"));
assertThrows(
"Unable to return a default Coder for RemoveRepresentativeDupes",
IllegalStateException.class,
() ->
dupes.apply(
"RemoveRepresentativeDupes",
Deduplicate.withRepresentativeValueFn(String::length)));
}
|
public DrlxParseResult drlxParse(Class<?> patternType, String bindingId, String expression) {
return drlxParse(patternType, bindingId, expression, false);
}
|
@Test
public void bigDecimalInWithBD() {
SingleDrlxParseSuccess result = (SingleDrlxParseSuccess) parser.drlxParse(Person.class, "$p", "(money in (100B, 200B))");
assertThat(result.getExpr().toString()).isEqualTo("D.eval(org.drools.model.operators.InOperator.INSTANCE, _this.getMoney(), new java.math.BigDecimal(\"100\"), new java.math.BigDecimal(\"200\"))");
}
|
@Override
public List<RemoteInstance> queryRemoteNodes() {
List<RemoteInstance> remoteInstances = new ArrayList<>();
try {
HealthClient healthClient = client.healthClient();
// Discover only "passing" nodes
List<ServiceHealth> nodes = healthClient.getHealthyServiceInstances(serviceName).getResponse();
if (CollectionUtils.isNotEmpty(nodes)) {
nodes.forEach(node -> {
if (!Strings.isNullOrEmpty(node.getService().getAddress())) {
Address address = new Address(node.getService().getAddress(), node.getService().getPort(), false);
if (address.equals(selfAddress)) {
address.setSelf(true);
}
remoteInstances.add(new RemoteInstance(address));
}
});
}
ClusterHealthStatus healthStatus = OAPNodeChecker.isHealth(remoteInstances);
if (healthStatus.isHealth()) {
this.healthChecker.health();
} else {
this.healthChecker.unHealth(healthStatus.getReason());
}
} catch (Throwable e) {
healthChecker.unHealth(e);
throw new ServiceQueryException(e.getMessage());
}
if (log.isDebugEnabled()) {
remoteInstances.forEach(instance -> log.debug("Cosule cluster instance: {}", instance));
}
return remoteInstances;
}
|
@Test
public void queryRemoteNodes() {
registerSelfRemote();
List<ServiceHealth> serviceHealths = mockHealth();
when(consulResponse.getResponse()).thenReturn(serviceHealths);
List<RemoteInstance> remoteInstances = coordinator.queryRemoteNodes();
assertEquals(2, remoteInstances.size());
RemoteInstance selfInstance = remoteInstances.get(0);
velidate(selfRemoteAddress, selfInstance);
RemoteInstance notSelfInstance = remoteInstances.get(1);
velidate(remoteAddress, notSelfInstance);
}
|
@Override
public ExecuteContext after(ExecuteContext context) {
if (InvokeUtils.isRocketMqInvokeBySermant(Thread.currentThread().getStackTrace())) {
return context;
}
DefaultLitePullConsumerWrapper wrapper = RocketMqPullConsumerController
.getPullConsumerWrapper((DefaultLitePullConsumer)context.getObject());
Object messageQueueObject = context.getArguments()[0];
if (messageQueueObject == null || !(messageQueueObject instanceof Collection)) {
return context;
}
Collection<MessageQueue> messageQueue = (Collection<MessageQueue>) messageQueueObject;
if (wrapper == null) {
setAssignLocalInfo(messageQueue);
} else {
updateAssignWrapperInfo(wrapper, messageQueue);
}
if (handler != null) {
handler.doAfter(context);
return context;
}
// After specifying the consumption queue, it is necessary to enable or prohibition of consumption for
// consumers, according to the prohibited topic configuration
disablePullConsumption(wrapper);
return context;
}
|
@Test
public void testAfter() {
ExecuteContext context = ExecuteContext.forMemberMethod(pullConsumer, null, new Object[]{messageQueues},
null, null);
// wrapper为null
interceptor.after(context);
Assert.assertEquals(PullConsumerLocalInfoUtils.getSubscriptionType().name(), "ASSIGN");
Assert.assertEquals(PullConsumerLocalInfoUtils.getMessageQueue(), messageQueues);
PullConsumerLocalInfoUtils.removeSubscriptionType();
PullConsumerLocalInfoUtils.removeMessageQueue();
// Wrapper is null, message queue is null
context = ExecuteContext.forMemberMethod(pullConsumer, null, new Object[]{null},
null, null);
interceptor.after(context);
Assert.assertEquals(PullConsumerLocalInfoUtils.getSubscriptionType().name(), "NONE");
PullConsumerLocalInfoUtils.removeSubscriptionType();
// Wrapper is not null
context = ExecuteContext.forMemberMethod(pullConsumer, null, new Object[]{messageQueues},
null, null);
RocketMqPullConsumerController.cachePullConsumer(pullConsumer);
interceptor.after(context);
Assert.assertEquals(pullConsumerWrapper.getSubscriptionType().name(), "ASSIGN");
Assert.assertEquals(pullConsumerWrapper.getMessageQueues(), messageQueues);
Assert.assertEquals(pullConsumerWrapper.getSubscribedTopics(), topics);
}
|
@VisibleForTesting
static VertexParallelismStore computeVertexParallelismStoreForExecution(
JobGraph jobGraph,
SchedulerExecutionMode executionMode,
Function<JobVertex, Integer> defaultMaxParallelismFunc) {
if (executionMode == SchedulerExecutionMode.REACTIVE) {
return computeReactiveModeVertexParallelismStore(
jobGraph.getVertices(), defaultMaxParallelismFunc, false);
}
return SchedulerBase.computeVertexParallelismStore(
jobGraph.getVertices(), defaultMaxParallelismFunc);
}
|
@Test
void testComputeVertexParallelismStoreForExecutionInDefaultMode() {
JobVertex v1 = createNoOpVertex("v1", 1, 50);
JobVertex v2 = createNoOpVertex("v2", 50, 50);
JobGraph graph = streamingJobGraph(v1, v2);
VertexParallelismStore parallelismStore =
AdaptiveScheduler.computeVertexParallelismStoreForExecution(
graph, null, SchedulerBase::getDefaultMaxParallelism);
for (JobVertex vertex : graph.getVertices()) {
VertexParallelismInformation info = parallelismStore.getParallelismInfo(vertex.getID());
assertThat(info.getParallelism()).isEqualTo(vertex.getParallelism());
assertThat(info.getMaxParallelism()).isEqualTo(vertex.getMaxParallelism());
}
}
|
@Override
public Iterator<ConfigDataEnvironmentContributor> iterator() {
return this.root.iterator();
}
|
@Test
void iteratorWhenSingleContributorReturnsSingletonIterator() {
ConfigDataEnvironmentContributor contributor = createBoundContributor("a");
assertThat(asLocationsList(contributor.iterator())).containsExactly("a");
}
|
public void merge() {
if (conditions.size() > 1) {
Collection<ShardingCondition> result = new LinkedList<>();
result.add(conditions.remove(conditions.size() - 1));
while (!conditions.isEmpty()) {
findUniqueShardingCondition(result, conditions.remove(conditions.size() - 1)).ifPresent(result::add);
}
conditions.addAll(result);
}
}
|
@Test
void assertMerge() {
ShardingConditions multipleShardingConditions = createMultipleShardingConditions();
multipleShardingConditions.merge();
assertThat(multipleShardingConditions.getConditions().size(), is(2));
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.