patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -481,6 +481,12 @@ func buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap m
setupChrootPackages = append(setupChrootPackages, toolingPackage.Name)
}
+ logger.Log.Infof("HidepidDisabled is %v.", systemConfig.HidepidDisable)
+ hidepidEnable := true
+ if systemConfig.HidepidDisable {
+ hidepidEnable = false
+ }
+
if systemConfig.ReadOnlyVerityRoot.Enable {
// We will need the veritysetup package (and its dependencies) to manage the verity disk, add them to our
// image setup environment (setuproot chroot or live installer). | 1 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// Tool to create and install images
package main
import (
"fmt"
"os"
"path/filepath"
"gopkg.in/alecthomas/kingpin.v2"
"microsoft.com/pkggen/imagegen/configuration"
"microsoft.com/pkggen/imagegen/diskutils"
"microsoft.com/pkggen/imagegen/installutils"
"microsoft.com/pkggen/internal/exe"
"microsoft.com/pkggen/internal/file"
"microsoft.com/pkggen/internal/logger"
"microsoft.com/pkggen/internal/safechroot"
)
var (
app = kingpin.New("imager", "Tool to create and install images.")
buildDir = app.Flag("build-dir", "Directory to store temporary files while building.").ExistingDir()
configFile = exe.InputFlag(app, "Path to the image config file.")
localRepo = app.Flag("local-repo", "Path to local RPM repo").ExistingDir()
tdnfTar = app.Flag("tdnf-worker", "Path to tdnf worker tarball").ExistingFile()
repoFile = app.Flag("repo-file", "Full path to local.repo.").ExistingFile()
assets = app.Flag("assets", "Path to assets directory.").ExistingDir()
baseDirPath = app.Flag("base-dir", "Base directory for relative file paths from the config. Defaults to config's directory.").ExistingDir()
outputDir = app.Flag("output-dir", "Path to directory to place final image.").ExistingDir()
liveInstallFlag = app.Flag("live-install", "Enable to perform a live install to the disk specified in config file.").Bool()
emitProgress = app.Flag("emit-progress", "Write progress updates to stdout, such as percent complete and current action.").Bool()
logFile = exe.LogFileFlag(app)
logLevel = exe.LogLevelFlag(app)
)
const (
// additionalFilesTempDirectory is the location where installutils expects to pick up any additional files
// to add to the install directory
additionalFilesTempDirectory = "/tmp/additionalfiles"
// postInstallScriptTempDirectory is the directory where installutils expects to pick up any post install scripts
// to run inside the install directory environment
postInstallScriptTempDirectory = "/tmp/postinstall"
// sshPubKeysTempDirectory is the directory where installutils expects to pick up ssh public key files to add into
// the install directory
sshPubKeysTempDirectory = "/tmp/sshpubkeys"
)
func main() {
const defaultSystemConfig = 0
app.Version(exe.ToolkitVersion)
kingpin.MustParse(app.Parse(os.Args[1:]))
logger.InitBestEffort(*logFile, *logLevel)
if *emitProgress {
installutils.EnableEmittingProgress()
}
// Parse Config
config, err := configuration.LoadWithAbsolutePaths(*configFile, *baseDirPath)
logger.PanicOnError(err, "Failed to load configuration file (%s) with base directory (%s)", *configFile, *baseDirPath)
// Currently only process 1 system config
systemConfig := config.SystemConfigs[defaultSystemConfig]
err = buildSystemConfig(systemConfig, config.Disks, *outputDir, *buildDir)
logger.PanicOnError(err, "Failed to build system configuration")
}
func buildSystemConfig(systemConfig configuration.SystemConfig, disks []configuration.Disk, outputDir, buildDir string) (err error) {
logger.Log.Infof("Building system configuration (%s)", systemConfig.Name)
const (
assetsMountPoint = "/installer"
localRepoMountPoint = "/mnt/cdrom/RPMS"
repoFileMountPoint = "/etc/yum.repos.d"
setupRoot = "/setuproot"
installRoot = "/installroot"
rootID = "rootfs"
defaultDiskIndex = 0
defaultTempDiskName = "disk.raw"
existingChrootDir = false
leaveChrootOnDisk = false
)
var (
isRootFS bool
isLoopDevice bool
isOfflineInstall bool
diskDevPath string
kernelPkg string
encryptedRoot diskutils.EncryptedRootDevice
readOnlyRoot diskutils.VerityDevice
partIDToDevPathMap map[string]string
partIDToFsTypeMap map[string]string
mountPointToOverlayMap map[string]*installutils.Overlay
extraMountPoints []*safechroot.MountPoint
extraDirectories []string
)
// Get list of packages to install into image
packagesToInstall, err := installutils.PackageNamesFromSingleSystemConfig(systemConfig)
if err != nil {
logger.Log.Error("Failed to import packages from package lists in config file")
return
}
isRootFS = len(systemConfig.PartitionSettings) == 0
if isRootFS {
logger.Log.Infof("Creating rootfs")
additionalExtraMountPoints, additionalExtraDirectories, err := setupRootFS(outputDir, installRoot)
if err != nil {
return err
}
extraDirectories = append(extraDirectories, additionalExtraDirectories...)
extraMountPoints = append(extraMountPoints, additionalExtraMountPoints...)
isOfflineInstall = true
// Select the best kernel package for this environment.
kernelPkg, err = installutils.SelectKernelPackage(systemConfig, *liveInstallFlag)
// Rootfs images will usually not set a kernel, ignore errors
if err != nil {
logger.Log.Debugf("Rootfs did not find a kernel, this is normal: '%s'", err.Error())
} else {
logger.Log.Infof("Rootfs is including a kernel (%s)", kernelPkg)
packagesToInstall = append([]string{kernelPkg}, packagesToInstall...)
}
} else {
logger.Log.Info("Creating raw disk in build directory")
diskConfig := disks[defaultDiskIndex]
diskDevPath, partIDToDevPathMap, partIDToFsTypeMap, isLoopDevice, encryptedRoot, readOnlyRoot, err = setupDisk(buildDir, defaultTempDiskName, *liveInstallFlag, diskConfig, systemConfig.Encryption, systemConfig.ReadOnlyVerityRoot)
if err != nil {
return
}
if isLoopDevice {
isOfflineInstall = true
defer diskutils.DetachLoopbackDevice(diskDevPath)
defer diskutils.BlockOnDiskIO(diskDevPath)
}
if systemConfig.ReadOnlyVerityRoot.Enable {
defer readOnlyRoot.CleanupVerityDevice()
}
// Add additional system settings for root encryption
err = setupDiskEncryption(&systemConfig, &encryptedRoot, buildDir)
if err != nil {
return
}
// Select the best kernel package for this environment
kernelPkg, err = installutils.SelectKernelPackage(systemConfig, *liveInstallFlag)
if err != nil {
logger.Log.Errorf("Failed to select a suitable kernel to install in config (%s)", systemConfig.Name)
return
}
logger.Log.Infof("Selected (%s) for the kernel", kernelPkg)
packagesToInstall = append([]string{kernelPkg}, packagesToInstall...)
}
setupChrootDir := filepath.Join(buildDir, setupRoot)
// Create Parition to Mountpoint map
mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, diffDiskBuild := installutils.CreateMountPointPartitionMap(partIDToDevPathMap, partIDToFsTypeMap, systemConfig)
if diffDiskBuild {
mountPointToOverlayMap, err = installutils.UpdatePartitionMapWithOverlays(partIDToDevPathMap, partIDToFsTypeMap, mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, systemConfig)
// Schedule unmount of overlays after the upper layers are unmounted.
defer installutils.OverlayUnmount(mountPointToOverlayMap)
if err != nil {
logger.Log.Error("Failed to create the partition map")
return
}
}
if isOfflineInstall {
// Create setup chroot
additionalExtraMountPoints := []*safechroot.MountPoint{
safechroot.NewMountPoint(*assets, assetsMountPoint, "", safechroot.BindMountPointFlags, ""),
safechroot.NewMountPoint(*localRepo, localRepoMountPoint, "", safechroot.BindMountPointFlags, ""),
safechroot.NewMountPoint(filepath.Dir(*repoFile), repoFileMountPoint, "", safechroot.BindMountPointFlags, ""),
}
extraMountPoints = append(extraMountPoints, additionalExtraMountPoints...)
setupChroot := safechroot.NewChroot(setupChrootDir, existingChrootDir)
err = setupChroot.Initialize(*tdnfTar, extraDirectories, extraMountPoints)
if err != nil {
logger.Log.Error("Failed to create setup chroot")
return
}
defer setupChroot.Close(leaveChrootOnDisk)
// Before entering the chroot, copy in any and all host files needed and
// fix up their paths to be in the tmp directory.
err = fixupExtraFilesIntoChroot(setupChroot, &systemConfig)
if err != nil {
logger.Log.Error("Failed to copy extra files into setup chroot")
return
}
err = setupChroot.Run(func() error {
return buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, mountPointToOverlayMap, packagesToInstall, systemConfig, diskDevPath, isRootFS, encryptedRoot, readOnlyRoot, diffDiskBuild)
})
if err != nil {
logger.Log.Error("Failed to build image")
return
}
err = cleanupExtraFilesInChroot(setupChroot)
if err != nil {
logger.Log.Error("Failed to cleanup extra files in setup chroot")
return
}
// Create any partition-based artifacts
err = installutils.ExtractPartitionArtifacts(setupChrootDir, outputDir, defaultDiskIndex, disks[defaultDiskIndex], systemConfig, partIDToDevPathMap, mountPointToOverlayMap)
if err != nil {
return
}
// Copy disk artifact if necessary.
// Currently only supports one disk config
if !isRootFS {
if disks[defaultDiskIndex].Artifacts != nil {
input := filepath.Join(buildDir, defaultTempDiskName)
output := filepath.Join(outputDir, fmt.Sprintf("disk%d.raw", defaultDiskIndex))
err = file.Copy(input, output)
if err != nil {
return
}
}
}
} else {
err = buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, mountPointToOverlayMap, packagesToInstall, systemConfig, diskDevPath, isRootFS, encryptedRoot, readOnlyRoot, diffDiskBuild)
if err != nil {
logger.Log.Error("Failed to build image")
return
}
}
// Cleanup encrypted disks
if systemConfig.Encryption.Enable {
err = diskutils.CleanupEncryptedDisks(encryptedRoot, isOfflineInstall)
if err != nil {
logger.Log.Warn("Failed to cleanup encrypted disks")
return
}
}
return
}
func setupDiskEncryption(systemConfig *configuration.SystemConfig, encryptedRoot *diskutils.EncryptedRootDevice, keyFileDir string) (err error) {
if systemConfig.Encryption.Enable {
// Add a default keyfile for initramfs unlock
encryptedRoot.HostKeyFile, err = diskutils.AddDefaultKeyfile(keyFileDir, encryptedRoot.Device, systemConfig.Encryption)
if err != nil {
logger.Log.Warnf("Failed to add default keyfile: %v", err)
return
}
// Copy the default keyfile into the image
if len(systemConfig.AdditionalFiles) == 0 {
systemConfig.AdditionalFiles = make(map[string]string)
}
systemConfig.AdditionalFiles[encryptedRoot.HostKeyFile] = diskutils.DefaultKeyFilePath
logger.Log.Infof("Adding default key file to systemConfig additional files")
}
return
}
func setupRootFS(outputDir, installRoot string) (extraMountPoints []*safechroot.MountPoint, extraDirectories []string, err error) {
const rootFSDirName = "rootfs"
rootFSOutDir := filepath.Join(outputDir, rootFSDirName)
// Ensure there is not already a directory at rootFSOutDir
exists, err := file.DirExists(rootFSOutDir)
logger.PanicOnError(err, "Failed while checking if directory (%s) exists.", rootFSOutDir)
if exists {
err = fmt.Errorf("output rootfs directory (%s) already exists", rootFSOutDir)
return
}
err = os.MkdirAll(rootFSOutDir, os.ModePerm)
if err != nil {
return
}
// For a rootfs, bind-mount the output directory to the chroot directory being installed to
rootFSMountPoint := safechroot.NewMountPoint(rootFSOutDir, installRoot, "", safechroot.BindMountPointFlags, "")
extraMountPoints = []*safechroot.MountPoint{rootFSMountPoint}
extraDirectories = []string{installRoot}
return
}
func setupDisk(outputDir, diskName string, liveInstallFlag bool, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (diskDevPath string, partIDToDevPathMap, partIDToFsTypeMap map[string]string, isLoopDevice bool, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) {
const (
realDiskType = "path"
)
if diskConfig.TargetDisk.Type == realDiskType {
if liveInstallFlag {
diskDevPath = diskConfig.TargetDisk.Value
partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupRealDisk(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig)
} else {
err = fmt.Errorf("target Disk Type is set but --live-install option is not set. Please check your config or enable the --live-install option")
return
}
} else {
diskDevPath, partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupLoopDeviceDisk(outputDir, diskName, diskConfig, rootEncryption, readOnlyRootConfig)
isLoopDevice = true
}
return
}
func setupLoopDeviceDisk(outputDir, diskName string, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (diskDevPath string, partIDToDevPathMap, partIDToFsTypeMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) {
defer func() {
// Detach the loopback device on failure
if err != nil && diskDevPath != "" {
detachErr := diskutils.DetachLoopbackDevice(diskDevPath)
if detachErr != nil {
logger.Log.Errorf("Failed to detach loopback device on failed initialization. Error: %s", detachErr)
}
}
}()
// Create Raw Disk File
rawDisk, err := diskutils.CreateEmptyDisk(outputDir, diskName, diskConfig)
if err != nil {
logger.Log.Errorf("Failed to create empty disk file in (%s)", outputDir)
return
}
diskDevPath, err = diskutils.SetupLoopbackDevice(rawDisk)
if err != nil {
logger.Log.Errorf("Failed to mount raw disk (%s) as a loopback device", rawDisk)
return
}
partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = setupRealDisk(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig)
if err != nil {
logger.Log.Errorf("Failed to setup loopback disk partitions (%s)", rawDisk)
return
}
return
}
func setupRealDisk(diskDevPath string, diskConfig configuration.Disk, rootEncryption configuration.RootEncryption, readOnlyRootConfig configuration.ReadOnlyVerityRoot) (partIDToDevPathMap, partIDToFsTypeMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, err error) {
const (
defaultBlockSize = diskutils.MiB
noMaxSize = 0
)
// Set up partitions
partIDToDevPathMap, partIDToFsTypeMap, encryptedRoot, readOnlyRoot, err = diskutils.CreatePartitions(diskDevPath, diskConfig, rootEncryption, readOnlyRootConfig)
if err != nil {
logger.Log.Errorf("Failed to create partitions on disk (%s)", diskDevPath)
return
}
// Apply firmware
err = diskutils.ApplyRawBinaries(diskDevPath, diskConfig)
if err != nil {
logger.Log.Errorf("Failed to add add raw binaries to disk (%s)", diskDevPath)
return
}
return
}
// fixupExtraFilesIntoChroot will copy extra files needed for the build
// into the chroot and alter the extra files in the config to point at their new paths.
func fixupExtraFilesIntoChroot(installChroot *safechroot.Chroot, config *configuration.SystemConfig) (err error) {
var filesToCopy []safechroot.FileToCopy
for i, user := range config.Users {
for j, pubKey := range user.SSHPubKeyPaths {
newFilePath := filepath.Join(sshPubKeysTempDirectory, pubKey)
fileToCopy := safechroot.FileToCopy{
Src: pubKey,
Dest: newFilePath,
}
config.Users[i].SSHPubKeyPaths[j] = newFilePath
filesToCopy = append(filesToCopy, fileToCopy)
}
}
fixedUpAdditionalFiles := make(map[string]string)
for srcFile, dstFile := range config.AdditionalFiles {
newFilePath := filepath.Join(additionalFilesTempDirectory, srcFile)
fileToCopy := safechroot.FileToCopy{
Src: srcFile,
Dest: newFilePath,
}
fixedUpAdditionalFiles[newFilePath] = dstFile
filesToCopy = append(filesToCopy, fileToCopy)
}
config.AdditionalFiles = fixedUpAdditionalFiles
for i, script := range config.PostInstallScripts {
newFilePath := filepath.Join(postInstallScriptTempDirectory, script.Path)
fileToCopy := safechroot.FileToCopy{
Src: script.Path,
Dest: newFilePath,
}
config.PostInstallScripts[i].Path = newFilePath
filesToCopy = append(filesToCopy, fileToCopy)
}
err = installChroot.AddFiles(filesToCopy...)
return
}
func cleanupExtraFiles() (err error) {
dirsToRemove := []string{additionalFilesTempDirectory, postInstallScriptTempDirectory, sshPubKeysTempDirectory}
for _, dir := range dirsToRemove {
logger.Log.Infof("Cleaning up directory %s", dir)
err = os.RemoveAll(dir)
if err != nil {
logger.Log.Warnf("Failed to cleanup directory (%s). Error: %s", dir, err)
return
}
}
return
}
func cleanupExtraFilesInChroot(chroot *safechroot.Chroot) (err error) {
logger.Log.Infof("Proceeding to cleanup extra files in chroot %s.", chroot.RootDir())
err = chroot.Run(func() error {
return cleanupExtraFiles()
})
return
}
func buildImage(mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap map[string]string, mountPointToOverlayMap map[string]*installutils.Overlay, packagesToInstall []string, systemConfig configuration.SystemConfig, diskDevPath string, isRootFS bool, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice, diffDiskBuild bool) (err error) {
const (
installRoot = "/installroot"
verityWorkingDir = "verityworkingdir"
emptyWorkerTar = ""
rootDir = "/"
existingChrootDir = true
leaveChrootOnDisk = true
)
var installMap map[string]string
// Only invoke CreateInstallRoot for a raw disk. This call will result in mount points being created from a raw disk
// into the install root. A rootfs will not have these.
if !isRootFS {
installMap, err = installutils.CreateInstallRoot(installRoot, mountPointMap, mountPointToFsTypeMap, mountPointToMountArgsMap, mountPointToOverlayMap)
if err != nil {
err = fmt.Errorf("failed to create install root: %s", err)
return
}
defer installutils.DestroyInstallRoot(installRoot, installMap, mountPointToOverlayMap)
}
// Install any tools required for the setup root to function
setupChrootPackages := []string{}
toolingPackages := installutils.GetRequiredPackagesForInstall()
for _, toolingPackage := range toolingPackages {
setupChrootPackages = append(setupChrootPackages, toolingPackage.Name)
}
if systemConfig.ReadOnlyVerityRoot.Enable {
// We will need the veritysetup package (and its dependencies) to manage the verity disk, add them to our
// image setup environment (setuproot chroot or live installer).
verityPackages := []string{"device-mapper", "veritysetup"}
setupChrootPackages = append(setupChrootPackages, verityPackages...)
}
for _, setupChrootPackage := range setupChrootPackages {
_, err = installutils.TdnfInstall(setupChrootPackage, rootDir)
if err != nil {
err = fmt.Errorf("failed to install required setup chroot package '%s': %w", setupChrootPackage, err)
return
}
}
// Create new chroot for the new image
installChroot := safechroot.NewChroot(installRoot, existingChrootDir)
extraInstallMountPoints := []*safechroot.MountPoint{}
extraDirectories := []string{}
err = installChroot.Initialize(emptyWorkerTar, extraDirectories, extraInstallMountPoints)
if err != nil {
err = fmt.Errorf("failed to create install chroot: %s", err)
return
}
defer installChroot.Close(leaveChrootOnDisk)
// Populate image contents
err = installutils.PopulateInstallRoot(installChroot, packagesToInstall, systemConfig, installMap, mountPointToFsTypeMap, mountPointToMountArgsMap, isRootFS, encryptedRoot, diffDiskBuild)
if err != nil {
err = fmt.Errorf("failed to populate image contents: %s", err)
return
}
// Only configure the bootloader or read only partitions for actual disks, a rootfs does not need these
if !isRootFS {
err = configureDiskBootloader(systemConfig, installChroot, diskDevPath, installMap, encryptedRoot, readOnlyRoot)
if err != nil {
err = fmt.Errorf("failed to configure boot loader: %w", err)
return
}
// Snapshot the root filesystem as a read-only verity disk and update the initramfs.
if systemConfig.ReadOnlyVerityRoot.Enable {
var initramfsPathList []string
err = readOnlyRoot.SwitchDeviceToReadOnly(mountPointMap["/"], mountPointToMountArgsMap["/"])
if err != nil {
err = fmt.Errorf("failed to switch root to read-only: %w", err)
return
}
installutils.ReportAction("Hashing root for read-only with dm-verity, this may take a long time if error correction is enabled")
initramfsPathList, err = filepath.Glob(filepath.Join(installRoot, "/boot/initrd.img*"))
if err != nil || len(initramfsPathList) != 1 {
return fmt.Errorf("could not find single initramfs (%v): %w", initramfsPathList, err)
}
err = readOnlyRoot.AddRootVerityFilesToInitramfs(verityWorkingDir, initramfsPathList[0])
if err != nil {
err = fmt.Errorf("failed to include read-only root files in initramfs: %w", err)
return
}
}
}
return
}
func configureDiskBootloader(systemConfig configuration.SystemConfig, installChroot *safechroot.Chroot, diskDevPath string, installMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice, readOnlyRoot diskutils.VerityDevice) (err error) {
const rootMountPoint = "/"
const bootMountPoint = "/boot"
var rootDevice string
// Add bootloader. Prefer a seperate boot partition if one exists.
bootDevice, ok := installMap[bootMountPoint]
bootPrefix := ""
if !ok {
bootDevice = installMap[rootMountPoint]
// If we do not have a seperate boot partition we will need to add a prefix to all paths used in the configs.
bootPrefix = "/boot"
}
if installMap[rootMountPoint] == installutils.NullDevice {
// In case of overlay device being mounted at root, no need to change the bootloader.
return
}
bootUUID, err := installutils.GetUUID(bootDevice)
if err != nil {
err = fmt.Errorf("failed to get UUID: %s", err)
return
}
bootType := systemConfig.BootType
if systemConfig.Encryption.Enable && bootType == "legacy" {
err = installutils.EnableCryptoDisk(installChroot)
if err != nil {
err = fmt.Errorf("Unable to enable crypto disk: %s", err)
return
}
}
err = installutils.InstallBootloader(installChroot, systemConfig.Encryption.Enable, bootType, bootUUID, bootPrefix, diskDevPath)
if err != nil {
err = fmt.Errorf("failed to install bootloader: %s", err)
return
}
// Add grub config to image
if systemConfig.Encryption.Enable {
rootDevice = installMap[rootMountPoint]
} else if systemConfig.ReadOnlyVerityRoot.Enable {
var partUUID string
partUUID, err = installutils.GetPartUUID(readOnlyRoot.BackingDevice)
if err != nil {
err = fmt.Errorf("failed to get PARTUUID: %s", err)
return
}
rootDevice = fmt.Sprintf("verityroot:PARTUUID=%v", partUUID)
} else {
var partUUID string
partUUID, err = installutils.GetPartUUID(installMap[rootMountPoint])
if err != nil {
err = fmt.Errorf("failed to get PARTUUID: %s", err)
return
}
rootDevice = fmt.Sprintf("PARTUUID=%v", partUUID)
}
err = installutils.InstallGrubCfg(installChroot.RootDir(), rootDevice, bootUUID, bootPrefix, encryptedRoot, systemConfig.KernelCommandLine, readOnlyRoot)
if err != nil {
err = fmt.Errorf("failed to install main grub config file: %s", err)
return
}
return
}
| 1 | 14,113 | Let's be consistent everywhere: either "Disable"/"Enable" or "Disabled"/"Enabled". I'm voting for the latter.:) | microsoft-CBL-Mariner | go |
@@ -2041,6 +2041,10 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
if (isPruningEnabled()) {
throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix));
}
+ if (isGoQuorumCompatibilityMode) {
+ throw new ParameterException(
+ commandLine, String.format("%s %s", "GoQuorum mode", errorSuffix));
+ }
if (isPrivacyMultiTenancyEnabled
&& !jsonRpcConfiguration.isAuthenticationEnabled() | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import static org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration.QIP714_DEFAULT_BLOCK;
import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES;
import static org.hyperledger.besu.metrics.MetricsProtocol.PROMETHEUS;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT;
import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.Runner;
import org.hyperledger.besu.RunnerBuilder;
import org.hyperledger.besu.chainexport.RlpBlockExporter;
import org.hyperledger.besu.chainimport.JsonBlockImporter;
import org.hyperledger.besu.chainimport.RlpBlockImporter;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
import org.hyperledger.besu.cli.converter.PercentageConverter;
import org.hyperledger.besu.cli.converter.RpcApisConverter;
import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty;
import org.hyperledger.besu.cli.custom.JsonRPCAllowlistHostsProperty;
import org.hyperledger.besu.cli.custom.RpcAuthFileValidator;
import org.hyperledger.besu.cli.error.BesuExceptionHandler;
import org.hyperledger.besu.cli.options.unstable.DataStorageOptions;
import org.hyperledger.besu.cli.options.unstable.DnsOptions;
import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions;
import org.hyperledger.besu.cli.options.unstable.EthstatsOptions;
import org.hyperledger.besu.cli.options.unstable.LauncherOptions;
import org.hyperledger.besu.cli.options.unstable.MetricsCLIOptions;
import org.hyperledger.besu.cli.options.unstable.MiningOptions;
import org.hyperledger.besu.cli.options.unstable.NatOptions;
import org.hyperledger.besu.cli.options.unstable.NativeLibraryOptions;
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
import org.hyperledger.besu.cli.options.unstable.RPCOptions;
import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions;
import org.hyperledger.besu.cli.options.unstable.TransactionPoolOptions;
import org.hyperledger.besu.cli.presynctasks.PreSynchronizationTaskRunner;
import org.hyperledger.besu.cli.presynctasks.PrivateDatabaseMigrationPreSyncTask;
import org.hyperledger.besu.cli.subcommands.PasswordSubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand;
import org.hyperledger.besu.cli.subcommands.RetestethSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand;
import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand;
import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand;
import org.hyperledger.besu.cli.util.BesuCommandCustomFactory;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.config.GoQuorumOptions;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.controller.BesuControllerBuilder;
import org.hyperledger.besu.controller.TargetingGasLimitCalculator;
import org.hyperledger.besu.crypto.KeyPairSecurityModule;
import org.hyperledger.besu.crypto.KeyPairUtil;
import org.hyperledger.besu.crypto.NodeKey;
import org.hyperledger.besu.crypto.SECP256K1;
import org.hyperledger.besu.enclave.EnclaveFactory;
import org.hyperledger.besu.enclave.GoQuorumEnclave;
import org.hyperledger.besu.ethereum.api.ApiConfiguration;
import org.hyperledger.besu.ethereum.api.ImmutableApiConfiguration;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.api.tls.FileBasedPasswordProvider;
import org.hyperledger.besu.ethereum.api.tls.TlsClientAuthConfiguration;
import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration;
import org.hyperledger.besu.ethereum.blockcreation.GasLimitCalculator;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.GoQuorumPrivacyParameters;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.mainnet.precompiles.AbstractAltBnPrecompiledContract;
import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeDnsConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser;
import org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.worldstate.DefaultWorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl;
import org.hyperledger.besu.metrics.MetricsProtocol;
import org.hyperledger.besu.metrics.MetricsSystemFactory;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.StandardMetricCategory;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.BesuEvents;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.PicoCLIOptions;
import org.hyperledger.besu.plugin.services.SecurityModuleService;
import org.hyperledger.besu.plugin.services.StorageService;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry;
import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule;
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin;
import org.hyperledger.besu.services.BesuEventsImpl;
import org.hyperledger.besu.services.BesuPluginContextImpl;
import org.hyperledger.besu.services.PicoCLIOptionsImpl;
import org.hyperledger.besu.services.SecurityModuleServiceImpl;
import org.hyperledger.besu.services.StorageServiceImpl;
import org.hyperledger.besu.services.kvstore.InMemoryStoragePlugin;
import org.hyperledger.besu.util.NetworkUtility;
import org.hyperledger.besu.util.PermissioningConfigurationValidator;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.Percentage;
import org.hyperledger.besu.util.number.PositiveNumber;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.file.Path;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Base64;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.json.DecodeException;
import io.vertx.core.metrics.MetricsOptions;
import net.consensys.quorum.mainnet.launcher.LauncherManager;
import net.consensys.quorum.mainnet.launcher.config.ImmutableLauncherConfig;
import net.consensys.quorum.mainnet.launcher.exception.LauncherException;
import net.consensys.quorum.mainnet.launcher.util.ParseArgsHelper;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;
import picocli.CommandLine;
import picocli.CommandLine.AbstractParseResultHandler;
import picocli.CommandLine.Command;
import picocli.CommandLine.ExecutionException;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParameterException;
@SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives
@Command(
description = "This command runs the Besu Ethereum client full node.",
abbreviateSynopsis = true,
name = "besu",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
header = "Usage:",
synopsisHeading = "%n",
descriptionHeading = "%nDescription:%n%n",
optionListHeading = "%nOptions:%n",
footerHeading = "%n",
footer = "Besu is licensed under the Apache License 2.0")
public class BesuCommand implements DefaultCommandValues, Runnable {
@SuppressWarnings("PrivateStaticFinalLoggers")
// non-static for testing
private final Logger logger;
private CommandLine commandLine;
private final Supplier<RlpBlockImporter> rlpBlockImporter;
private final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory;
private final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory;
// Unstable CLI options
final NetworkingOptions unstableNetworkingOptions = NetworkingOptions.create();
final SynchronizerOptions unstableSynchronizerOptions = SynchronizerOptions.create();
final EthProtocolOptions unstableEthProtocolOptions = EthProtocolOptions.create();
final MetricsCLIOptions unstableMetricsCLIOptions = MetricsCLIOptions.create();
final TransactionPoolOptions unstableTransactionPoolOptions = TransactionPoolOptions.create();
private final EthstatsOptions unstableEthstatsOptions = EthstatsOptions.create();
private final DataStorageOptions unstableDataStorageOptions = DataStorageOptions.create();
private final DnsOptions unstableDnsOptions = DnsOptions.create();
private final MiningOptions unstableMiningOptions = MiningOptions.create();
private final NatOptions unstableNatOptions = NatOptions.create();
private final NativeLibraryOptions unstableNativeLibraryOptions = NativeLibraryOptions.create();
private final RPCOptions unstableRPCOptions = RPCOptions.create();
final LauncherOptions unstableLauncherOptions = LauncherOptions.create();
private final RunnerBuilder runnerBuilder;
private final BesuController.Builder controllerBuilderFactory;
private final BesuPluginContextImpl besuPluginContext;
private final StorageServiceImpl storageService;
private final SecurityModuleServiceImpl securityModuleService;
private final Map<String, String> environment;
private final MetricCategoryRegistryImpl metricCategoryRegistry =
new MetricCategoryRegistryImpl();
private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter();
// Public IP stored to prevent having to research it each time we need it.
private InetAddress autoDiscoveredDefaultIP = null;
private final PreSynchronizationTaskRunner preSynchronizationTaskRunner =
new PreSynchronizationTaskRunner();
private final Set<Integer> allocatedPorts = new HashSet<>();
// CLI options defined by user at runtime.
// Options parsing is done with CLI library Picocli https://picocli.info/
// While this variable is never read it is needed for the PicoCLI to create
// the config file option that is read elsewhere.
@SuppressWarnings("UnusedVariable")
@CommandLine.Option(
names = {CONFIG_FILE_OPTION_NAME},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "TOML config file (default: none)")
private final File configFile = null;
@CommandLine.Option(
names = {"--data-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "The path to Besu data directory (default: ${DEFAULT-VALUE})")
final Path dataPath = getDefaultBesuDataPath(this);
// Genesis file path with null default option if the option
// is not defined on command line as this default is handled by Runner
// to use mainnet json file from resources as indicated in the
// default network option
// Then we have no control over genesis default value here.
@CommandLine.Option(
names = {"--genesis-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Genesis file. Setting this option makes --network option ignored and requires --network-id to be set.")
private final File genesisFile = null;
@CommandLine.Option(
names = {"--node-private-key-file"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description =
"The node's private key file (default: a file named \"key\" in the Besu data folder)")
private final File nodePrivateKeyFile = null;
@Option(
names = "--identity",
paramLabel = "<String>",
description = "Identification for this node in the Client ID",
arity = "1")
private final Optional<String> identityString = Optional.empty();
// Completely disables P2P within Besu.
@Option(
names = {"--p2p-enabled"},
description = "Enable P2P functionality (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean p2pEnabled = true;
// Boolean option to indicate if peers should NOT be discovered, default to
// false indicates that
// the peers should be discovered by default.
//
// This negative option is required because of the nature of the option that is
// true when
// added on the command line. You can't do --option=false, so false is set as
// default
// and you have not to set the option at all if you want it false.
// This seems to be the only way it works with Picocli.
// Also many other software use the same negative option scheme for false
// defaults
// meaning that it's probably the right way to handle disabling options.
@Option(
names = {"--discovery-enabled"},
description = "Enable P2P discovery (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean peerDiscoveryEnabled = true;
// A list of bootstrap nodes can be passed
// and a hardcoded list will be used otherwise by the Runner.
// NOTE: we have no control over default value here.
@Option(
names = {"--bootnodes"},
paramLabel = "<enode://id@host:port>",
description =
"Comma separated enode URLs for P2P discovery bootstrap. "
+ "Default is a predefined list.",
split = ",",
arity = "0..*")
private final List<String> bootNodes = null;
@Option(
names = {"--max-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description = "Maximum P2P connections that can be established (default: ${DEFAULT-VALUE})")
private final Integer maxPeers = DEFAULT_MAX_PEERS;
@Option(
names = {"--remote-connections-limit-enabled"},
description =
"Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})")
private final Boolean isLimitRemoteWireConnectionsEnabled = true;
@Option(
names = {"--remote-connections-max-percentage"},
paramLabel = MANDATORY_DOUBLE_FORMAT_HELP,
description =
"The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})",
arity = "1",
converter = PercentageConverter.class)
private final Integer maxRemoteConnectionsPercentage =
Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED)
.toPercentage()
.getValue();
@Option(
names = {"--random-peer-priority-enabled"},
description =
"Allow for incoming connections to be prioritized randomly. This will prevent (typically small, stable) networks from forming impenetrable peer cliques. (default: ${DEFAULT-VALUE})")
private final Boolean randomPeerPriority = false;
@Option(
names = {"--banned-node-ids", "--banned-node-id"},
paramLabel = MANDATORY_NODE_ID_FORMAT_HELP,
description = "A list of node IDs to ban from the P2P network.",
split = ",",
arity = "1..*")
void setBannedNodeIds(final List<String> values) {
try {
bannedNodeIds =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURL::parseNodeId)
.collect(Collectors.toList());
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage());
}
}
private Collection<Bytes> bannedNodeIds = new ArrayList<>();
@Option(
names = {"--sync-mode"},
paramLabel = MANDATORY_MODE_FORMAT_HELP,
description =
"Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: FAST if a --network is supplied and privacy isn't enabled. FULL otherwise.)")
private SyncMode syncMode = null;
@Option(
names = {"--fast-sync-min-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})")
private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT;
@Option(
names = {"--network"},
paramLabel = MANDATORY_NETWORK_FORMAT_HELP,
description =
"Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}."
+ " (default: MAINNET)")
private final NetworkName network = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pHost = autoDiscoverDefaultIP().getHostAddress();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-interface"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description =
"The network interface address on which this node listens for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pInterface = NetworkUtility.INADDR_ANY;
@Option(
names = {"--p2p-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port on which to listen for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer p2pPort = EnodeURL.DEFAULT_LISTENING_PORT;
@Option(
names = {"--nat-method"},
description =
"Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}."
+ " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})")
private final NatMethod natMethod = DEFAULT_NAT_METHOD;
@Option(
names = {"--network-id"},
paramLabel = "<BIG INTEGER>",
description =
"P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)",
arity = "1")
private final BigInteger networkId = null;
@Option(
names = {"--graphql-http-enabled"},
description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isGraphQLHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--graphql-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--graphql-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT;
@Option(
names = {"--graphql-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-enabled"},
description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT;
// A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS)
@Option(
names = {"--rpc-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-api", "--rpc-http-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-http-authentication-enabled"},
description =
"Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-http-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC HTTP authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-http-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC HTTP authentication",
arity = "1")
private final File rpcHttpAuthenticationPublicKeyFile = null;
@Option(
names = {"--rpc-http-tls-enabled"},
description = "Enable TLS for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsEnabled = false;
@Option(
names = {"--rpc-http-tls-keystore-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Keystore (PKCS#12) containing key/certificate for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStoreFile = null;
@Option(
names = {"--rpc-http-tls-keystore-password-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"File containing password to unlock keystore for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStorePasswordFile = null;
@Option(
names = {"--rpc-http-tls-client-auth-enabled"},
description =
"Enable TLS client authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsClientAuthEnabled = false;
@Option(
names = {"--rpc-http-tls-known-clients-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to file containing clients certificate common name and fingerprint for client authentication")
private final Path rpcHttpTlsKnownClientsFile = null;
@Option(
names = {"--rpc-http-tls-ca-clients-enabled"},
description =
"Enable to accept clients certificate signed by a valid CA for client authentication (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsCAClientsEnabled = false;
@Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-ws-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-ws-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-ws-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-ws-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC WebSocket authentication",
arity = "1")
private final File rpcWsAuthenticationPublicKeyFile = null;
@Option(
names = {"--privacy-tls-enabled"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Enable TLS for connecting to privacy enclave (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyTlsEnabled = false;
@Option(
names = "--privacy-tls-keystore-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to a PKCS#12 formatted keystore; used to enable TLS on inbound connections.")
private final Path privacyKeyStoreFile = null;
@Option(
names = "--privacy-tls-keystore-password-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the password used to decrypt the keystore.")
private final Path privacyKeyStorePasswordFile = null;
@Option(
names = "--privacy-tls-known-enclave-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the fingerprints of the authorized privacy enclave.")
private final Path privacyTlsKnownEnclaveFile = null;
@Option(
names = {"--metrics-enabled"},
description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-protocol"},
description =
"Metrics protocol, one of PROMETHEUS, OPENTELEMETRY or NONE. (default: ${DEFAULT-VALUE})")
private MetricsProtocol metricsProtocol = PROMETHEUS;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPort = DEFAULT_METRICS_PORT;
@Option(
names = {"--metrics-category", "--metrics-categories"},
paramLabel = "<category name>",
split = ",",
arity = "1..*",
description =
"Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})")
private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES;
@Option(
names = {"--metrics-push-enabled"},
description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsPushEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-push-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT;
@Option(
names = {"--metrics-push-interval"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushInterval = 15;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-prometheus-job"},
description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPrometheusJob = "besu-client";
@Option(
names = {"--host-allowlist"},
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})",
defaultValue = "localhost,127.0.0.1")
private final JsonRPCAllowlistHostsProperty hostsAllowlist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--host-whitelist"},
hidden = true,
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Deprecated in favor of --host-allowlist. Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})")
private final JsonRPCAllowlistHostsProperty hostsWhitelist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--logging", "-l"},
paramLabel = "<LOG VERBOSITY LEVEL>",
description = "Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL")
private final Level logLevel = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--color-enabled"},
description =
"Force color output to be enabled/disabled (default: colorized only if printing to console)")
private static Boolean colorEnabled = null;
@Option(
names = {"--reorg-logging-threshold"},
description =
"How deep a chain reorganization must be in order for it to be logged (default: ${DEFAULT-VALUE})")
private final Long reorgLoggingThreshold = 6L;
@Option(
names = {"--miner-enabled"},
description = "Set if node will perform mining (default: ${DEFAULT-VALUE})")
private final Boolean isMiningEnabled = false;
@Option(
names = {"--miner-stratum-enabled"},
description = "Set if node will perform Stratum mining (default: ${DEFAULT-VALUE})")
private final Boolean iStratumMiningEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--miner-stratum-host"},
description = "Host for Stratum network mining service (default: ${DEFAULT-VALUE})")
private String stratumNetworkInterface = "0.0.0.0";
@Option(
names = {"--miner-stratum-port"},
description = "Stratum port binding (default: ${DEFAULT-VALUE})")
private final Integer stratumPort = 8008;
@Option(
names = {"--miner-coinbase"},
description =
"Account to which mining rewards are paid. You must specify a valid coinbase if "
+ "mining is enabled using --miner-enabled option",
arity = "1")
private final Address coinbase = null;
@Option(
names = {"--min-gas-price"},
description =
"Minimum price (in Wei) offered by a transaction for it to be included in a mined "
+ "block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE;
@Option(
names = {"--rpc-tx-feecap"},
description =
"Maximum transaction fees (in Wei) accepted for transaction submitted through RPC (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP;
@Option(
names = {"--min-block-occupancy-ratio"},
description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Double minBlockOccupancyRatio = DEFAULT_MIN_BLOCK_OCCUPANCY_RATIO;
@Option(
names = {"--miner-extra-data"},
description =
"A hex string representing the (32) bytes to be included in the extra data "
+ "field of a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Bytes extraData = DEFAULT_EXTRA_DATA;
@Option(
names = {"--pruning-enabled"},
description =
"Enable disk-space saving optimization that removes old state that is unlikely to be required (default: ${DEFAULT-VALUE})")
private final Boolean pruningEnabled = false;
@Option(
names = {"--permissions-nodes-config-file-enabled"},
description = "Enable node level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-nodes-config-file"},
description =
"Node permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String nodePermissionsConfigFile = null;
@Option(
names = {"--permissions-accounts-config-file-enabled"},
description = "Enable account level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-accounts-config-file"},
description =
"Account permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String accountPermissionsConfigFile = null;
@Option(
names = {"--permissions-nodes-contract-address"},
description = "Address of the node permissioning smart contract",
arity = "1")
private final Address permissionsNodesContractAddress = null;
@Option(
names = {"--permissions-nodes-contract-version"},
description = "Version of the EEA Node Permissioning interface (default: ${DEFAULT-VALUE})")
private final Integer permissionsNodesContractVersion = 1;
@Option(
names = {"--permissions-nodes-contract-enabled"},
description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesContractEnabled = false;
@Option(
names = {"--permissions-accounts-contract-address"},
description = "Address of the account permissioning smart contract",
arity = "1")
private final Address permissionsAccountsContractAddress = null;
@Option(
names = {"--permissions-accounts-contract-enabled"},
description =
"Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsContractEnabled = false;
@Option(
names = {"--privacy-enabled"},
description = "Enable private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyEnabled = false;
@Option(
names = {"--privacy-multi-tenancy-enabled"},
description = "Enable multi-tenant private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyMultiTenancyEnabled = false;
@Option(
names = {"--revert-reason-enabled"},
description =
"Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})")
private final Boolean isRevertReasonEnabled = false;
@Option(
names = {"--required-blocks", "--required-block"},
paramLabel = "BLOCK=HASH",
description = "Block number and hash peers are required to have.",
arity = "*",
split = ",")
private final Map<Long, Hash> requiredBlocks = new HashMap<>();
@Option(
names = {"--privacy-url"},
description = "The URL on which the enclave is running")
private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL;
@Option(
names = {"--privacy-public-key-file"},
description = "The enclave's public key file")
private final File privacyPublicKeyFile = null;
@Option(
names = {"--privacy-precompiled-address"},
description =
"The address to which the privacy pre-compiled contract will be mapped (default: ${DEFAULT-VALUE})",
hidden = true)
private final Integer privacyPrecompiledAddress = Address.PRIVACY;
@Option(
names = {"--privacy-marker-transaction-signing-key-file"},
description =
"The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.")
private final Path privacyMarkerTransactionSigningKeyPath = null;
@Option(
names = {"--privacy-enable-database-migration"},
description = "Enable private database metadata migration (default: ${DEFAULT-VALUE})")
private final Boolean migratePrivateDatabase = false;
@Option(
names = {"--privacy-flexible-groups-enabled", "--privacy-onchain-groups-enabled"},
description = "Enable flexible (onchain) privacy groups (default: ${DEFAULT-VALUE})")
private final Boolean isFlexiblePrivacyGroupsEnabled = false;
@Option(
names = {"--target-gas-limit"},
description =
"Sets target gas limit per block. If set each block's gas limit will approach this setting over time if the current gas limit is different.")
private final Long targetGasLimit = null;
@Option(
names = {"--tx-pool-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS;
@Option(
names = {"--tx-pool-hashes-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transaction hashes that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pooledTransactionHashesSize =
TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS_HASHES;
@Option(
names = {"--tx-pool-retention-hours"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pendingTxRetentionPeriod =
TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS;
@Option(
names = {"--tx-pool-price-bump"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
converter = PercentageConverter.class,
description =
"Price bump percentage to replace an already existing transaction (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer priceBump = TransactionPoolConfiguration.DEFAULT_PRICE_BUMP.getValue();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--key-value-storage"},
description = "Identity for the key-value storage to be used.",
arity = "1")
private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--security-module"},
paramLabel = "<NAME>",
description = "Identity for the Security Module to be used.",
arity = "1")
private String securityModuleName = DEFAULT_SECURITY_MODULE;
@Option(
names = {"--auto-log-bloom-caching-enabled"},
description = "Enable automatic log bloom caching (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean autoLogBloomCachingEnabled = true;
@Option(
names = {"--override-genesis-config"},
paramLabel = "NAME=VALUE",
description = "Overrides configuration values in the genesis file. Use with care.",
arity = "*",
hidden = true,
split = ",")
private final Map<String, String> genesisConfigOverrides =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
@Option(
names = {"--pruning-blocks-retained"},
defaultValue = "1024",
paramLabel = "<INTEGER>",
description =
"Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlocksRetained = PrunerConfiguration.DEFAULT_PRUNING_BLOCKS_RETAINED;
@Option(
names = {"--pruning-block-confirmations"},
defaultValue = "10",
paramLabel = "<INTEGER>",
description =
"Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlockConfirmations =
PrunerConfiguration.DEFAULT_PRUNING_BLOCK_CONFIRMATIONS;
@CommandLine.Option(
names = {"--pid-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Path to PID file (optional)")
private final Path pidPath = null;
@CommandLine.Option(
names = {"--api-gas-price-blocks"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Number of blocks to consider for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceBlocks = 100L;
@CommandLine.Option(
names = {"--api-gas-price-percentile"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Percentile value to measure for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Double apiGasPricePercentile = 50.0;
@CommandLine.Option(
names = {"--api-gas-price-max"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Maximum gas price for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceMax = 500_000_000_000L;
@Option(
names = {"--goquorum-compatibility-enabled"},
hidden = true,
description = "Start Besu in GoQuorum compatibility mode (default: ${DEFAULT-VALUE})")
private final Boolean isGoQuorumCompatibilityMode = false;
@CommandLine.Option(
names = {"--static-nodes-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Specifies the static node file containing the static nodes for this node to connect to")
private final Path staticNodesFile = null;
private EthNetworkConfig ethNetworkConfig;
private JsonRpcConfiguration jsonRpcConfiguration;
private GraphQLConfiguration graphQLConfiguration;
private WebSocketConfiguration webSocketConfiguration;
private ApiConfiguration apiConfiguration;
private MetricsConfiguration metricsConfiguration;
private Optional<PermissioningConfiguration> permissioningConfiguration;
private Collection<EnodeURL> staticNodes;
private BesuController besuController;
private BesuConfiguration pluginCommonConfiguration;
private final Supplier<ObservableMetricsSystem> metricsSystem =
Suppliers.memoize(() -> MetricsSystemFactory.create(metricsConfiguration()));
private Vertx vertx;
private EnodeDnsConfiguration enodeDnsConfiguration;
private KeyValueStorageProvider keyValueStorageProvider;
public BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment) {
this(
logger,
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
runnerBuilder,
controllerBuilderFactory,
besuPluginContext,
environment,
new StorageServiceImpl(),
new SecurityModuleServiceImpl());
}
@VisibleForTesting
protected BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment,
final StorageServiceImpl storageService,
final SecurityModuleServiceImpl securityModuleService) {
this.logger = logger;
this.rlpBlockImporter = rlpBlockImporter;
this.rlpBlockExporterFactory = rlpBlockExporterFactory;
this.jsonBlockImporterFactory = jsonBlockImporterFactory;
this.runnerBuilder = runnerBuilder;
this.controllerBuilderFactory = controllerBuilderFactory;
this.besuPluginContext = besuPluginContext;
this.environment = environment;
this.storageService = storageService;
this.securityModuleService = securityModuleService;
pluginCommonConfiguration = new BesuCommandConfigurationService();
besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration);
}
public void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final InputStream in,
final String... args) {
commandLine =
new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext))
.setCaseInsensitiveEnumValuesAllowed(true);
enableExperimentalEIPs();
addSubCommands(resultHandler, in);
registerConverters();
handleUnstableOptions();
preparePlugins();
parse(resultHandler, exceptionHandler, args);
}
@Override
public void run() {
try {
configureLogging(true);
configureNativeLibs();
logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString));
// Need to create vertx after cmdline has been parsed, such that metricsSystem is configurable
vertx = createVertx(createVertxOptions(metricsSystem.get()));
final BesuCommand controller = validateOptions().configure().controller();
preSynchronizationTaskRunner.runTasks(controller.besuController);
controller.startPlugins().startSynchronization();
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage(), e);
}
}
@VisibleForTesting
void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) {
this.pluginCommonConfiguration = pluginCommonConfiguration;
}
private void enableExperimentalEIPs() {
// Usage of static command line flags is strictly reserved for experimental EIPs
commandLine.addMixin("experimentalEIPs", ExperimentalEIPs.class);
}
private void addSubCommands(
final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) {
commandLine.addSubcommand(
BlocksSubCommand.COMMAND_NAME,
new BlocksSubCommand(
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
resultHandler.out()));
commandLine.addSubcommand(
PublicKeySubCommand.COMMAND_NAME,
new PublicKeySubCommand(resultHandler.out(), this::buildNodeKey));
commandLine.addSubcommand(
PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out()));
commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand());
commandLine.addSubcommand(
RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in));
commandLine.addSubcommand(
OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out()));
}
private void registerConverters() {
commandLine.registerConverter(Address.class, Address::fromHexStringStrict);
commandLine.registerConverter(Bytes.class, Bytes::fromHexString);
commandLine.registerConverter(Level.class, Level::valueOf);
commandLine.registerConverter(SyncMode.class, SyncMode::fromString);
commandLine.registerConverter(MetricsProtocol.class, MetricsProtocol::fromString);
commandLine.registerConverter(UInt256.class, (arg) -> UInt256.valueOf(new BigInteger(arg)));
commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg)));
commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString);
commandLine.registerConverter(Hash.class, Hash::fromHexString);
commandLine.registerConverter(Optional.class, Optional::of);
commandLine.registerConverter(Double.class, Double::parseDouble);
metricCategoryConverter.addCategories(BesuMetricCategory.class);
metricCategoryConverter.addCategories(StandardMetricCategory.class);
commandLine.registerConverter(MetricCategory.class, metricCategoryConverter);
}
private void handleUnstableOptions() {
// Add unstable options
final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder();
final ImmutableMap<String, Object> unstableOptions =
unstableOptionsBuild
.put("Ethereum Wire Protocol", unstableEthProtocolOptions)
.put("Metrics", unstableMetricsCLIOptions)
.put("P2P Network", unstableNetworkingOptions)
.put("RPC", unstableRPCOptions)
.put("DNS Configuration", unstableDnsOptions)
.put("NAT Configuration", unstableNatOptions)
.put("Synchronizer", unstableSynchronizerOptions)
.put("TransactionPool", unstableTransactionPoolOptions)
.put("Ethstats", unstableEthstatsOptions)
.put("Mining", unstableMiningOptions)
.put("Native Library", unstableNativeLibraryOptions)
.put("Data Storage Options", unstableDataStorageOptions)
.put("Launcher", unstableLauncherOptions)
.build();
UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions);
}
private void preparePlugins() {
besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine));
besuPluginContext.addService(SecurityModuleService.class, securityModuleService);
besuPluginContext.addService(StorageService.class, storageService);
besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry);
// register built-in plugins
new RocksDBPlugin().register(besuPluginContext);
new InMemoryStoragePlugin().register(besuPluginContext);
besuPluginContext.registerPlugins(pluginsDir());
metricCategoryRegistry
.getMetricCategories()
.forEach(metricCategoryConverter::addRegistryCategory);
// register default security module
securityModuleService.register(
DEFAULT_SECURITY_MODULE, Suppliers.memoize(this::defaultSecurityModule));
}
private SecurityModule defaultSecurityModule() {
return new KeyPairSecurityModule(loadKeyPair());
}
@VisibleForTesting
SECP256K1.KeyPair loadKeyPair() {
return KeyPairUtil.loadKeyPair(nodePrivateKeyFile());
}
private void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final String... args) {
// Create a handler that will search for a config file option and use it for
// default values
// and eventually it will run regular parsing of the remaining options.
final ConfigOptionSearchAndRunHandler configParsingHandler =
new ConfigOptionSearchAndRunHandler(
resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment);
ParseArgsHelper.getLauncherOptions(unstableLauncherOptions, args);
if (unstableLauncherOptions.isLauncherMode()
|| unstableLauncherOptions.isLauncherModeForced()) {
try {
final ImmutableLauncherConfig launcherConfig =
ImmutableLauncherConfig.builder()
.launcherScript(BesuCommand.class.getResourceAsStream("launcher.json"))
.addCommandClasses(
this, unstableNatOptions, unstableEthstatsOptions, unstableMiningOptions)
.isLauncherForced(unstableLauncherOptions.isLauncherModeForced())
.build();
final File file = new LauncherManager(launcherConfig).run();
logger.info("Config file location : {}", file.getAbsolutePath());
commandLine.parseWithHandlers(
configParsingHandler,
exceptionHandler,
String.format("%s=%s", CONFIG_FILE_OPTION_NAME, file.getAbsolutePath()));
} catch (LauncherException e) {
logger.warn("Unable to run the launcher {}", e.getMessage());
}
} else {
commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args);
}
}
private void startSynchronization() {
synchronize(
besuController,
p2pEnabled,
peerDiscoveryEnabled,
ethNetworkConfig,
maxPeers,
p2pHost,
p2pInterface,
p2pPort,
graphQLConfiguration,
jsonRpcConfiguration,
webSocketConfiguration,
apiConfiguration,
metricsConfiguration,
permissioningConfiguration,
staticNodes,
pidPath);
}
private BesuCommand startPlugins() {
besuPluginContext.addService(
BesuEvents.class,
new BesuEventsImpl(
besuController.getProtocolContext().getBlockchain(),
besuController.getProtocolManager().getBlockBroadcaster(),
besuController.getTransactionPool(),
besuController.getSyncState()));
besuPluginContext.addService(MetricsSystem.class, getMetricsSystem());
besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext);
besuPluginContext.startPlugins();
return this;
}
public void configureLogging(final boolean announce) {
// To change the configuration if color was enabled/disabled
Configurator.reconfigure();
// set log level per CLI flags
if (logLevel != null) {
if (announce) {
System.out.println("Setting logging level to " + logLevel.name());
}
Configurator.setAllLevels("", logLevel);
}
}
public static Optional<Boolean> getColorEnabled() {
return Optional.ofNullable(colorEnabled);
}
private void configureNativeLibs() {
if (unstableNativeLibraryOptions.getNativeAltbn128()) {
AbstractAltBnPrecompiledContract.enableNative();
}
if (unstableNativeLibraryOptions.getNativeSecp256k1()) {
SECP256K1.enableNative();
}
}
private BesuCommand validateOptions() {
issueOptionWarnings();
validateP2PInterface(p2pInterface);
validateMiningParams();
validateNatParams();
validateNetStatsParams();
validateDnsOptionsParams();
return this;
}
@SuppressWarnings("ConstantConditions")
private void validateMiningParams() {
if (isMiningEnabled && coinbase == null) {
throw new ParameterException(
this.commandLine,
"Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled) "
+ "or specify the beneficiary of mining (via --miner-coinbase <Address>)");
}
if (!isMiningEnabled && iStratumMiningEnabled) {
throw new ParameterException(
this.commandLine,
"Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled) "
+ "or specify mining is enabled (--miner-enabled)");
}
}
protected void validateP2PInterface(final String p2pInterface) {
final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface;
try {
if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) {
throw new ParameterException(commandLine, failMessage);
}
} catch (final UnknownHostException | SocketException e) {
throw new ParameterException(commandLine, failMessage, e);
}
}
@SuppressWarnings("ConstantConditions")
private void validateNatParams() {
if (!(natMethod.equals(NatMethod.AUTO) || natMethod.equals(NatMethod.KUBERNETES))
&& !unstableNatOptions
.getNatManagerServiceName()
.equals(DEFAULT_BESU_SERVICE_NAME_FILTER)) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name"
+ " or select the KUBERNETES mode (via --nat--method=KUBERNETES)");
}
if (natMethod.equals(NatMethod.AUTO) && !unstableNatOptions.getNatMethodFallbackEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-method-fallback-enabled` parameter cannot be used in AUTO mode. Either remove --Xnat-method-fallback-enabled"
+ " or select another mode (via --nat--method=XXXX)");
}
}
private void validateNetStatsParams() {
if (Strings.isNullOrEmpty(unstableEthstatsOptions.getEthstatsUrl())
&& !unstableEthstatsOptions.getEthstatsContact().isEmpty()) {
throw new ParameterException(
this.commandLine,
"The `--Xethstats-contact` requires ethstats server URL to be provided. Either remove --Xethstats-contact"
+ " or provide an url (via --Xethstats=nodename:secret@host:port)");
}
}
private void validateDnsOptionsParams() {
if (!unstableDnsOptions.getDnsEnabled() && unstableDnsOptions.getDnsUpdateEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xdns-update-enabled` requires dns to be enabled. Either remove --Xdns-update-enabled"
+ " or specify dns is enabled (--Xdns-enabled)");
}
}
private GenesisConfigOptions readGenesisConfigOptions() {
final GenesisConfigOptions genesisConfigOptions;
try {
final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig());
genesisConfigOptions = genesisConfigFile.getConfigOptions(genesisConfigOverrides);
} catch (final Exception e) {
throw new IllegalStateException("Unable to read genesis file for GoQuorum options", e);
}
return genesisConfigOptions;
}
private void issueOptionWarnings() {
// Check that P2P options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--p2p-enabled",
!p2pEnabled,
asList(
"--bootnodes",
"--discovery-enabled",
"--max-peers",
"--banned-node-id",
"--banned-node-ids",
"--p2p-host",
"--p2p-interface",
"--p2p-port",
"--remote-connections-max-percentage"));
// Check that mining options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--miner-enabled",
!isMiningEnabled,
asList(
"--miner-coinbase",
"--min-gas-price",
"--min-block-occupancy-ratio",
"--miner-extra-data",
"--miner-stratum-enabled",
"--Xminer-remote-sealers-limit",
"--Xminer-remote-sealers-hashrate-ttl"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--sync-mode",
!SyncMode.FAST.equals(syncMode),
singletonList("--fast-sync-min-peers"));
if (!securityModuleName.equals(DEFAULT_SECURITY_MODULE) && nodePrivateKeyFile != null) {
logger.warn(
DEPENDENCY_WARNING_MSG,
"--node-private-key-file",
"--security-module=" + DEFAULT_SECURITY_MODULE);
}
}
private BesuCommand configure() throws Exception {
checkPortClash();
syncMode =
Optional.ofNullable(syncMode)
.orElse(
genesisFile == null && !isPrivacyEnabled && network != NetworkName.DEV
? SyncMode.FAST
: SyncMode.FULL);
ethNetworkConfig = updateNetworkConfig(getNetwork());
if (isGoQuorumCompatibilityMode) {
checkGoQuorumCompatibilityConfig(ethNetworkConfig);
}
jsonRpcConfiguration = jsonRpcConfiguration();
graphQLConfiguration = graphQLConfiguration();
webSocketConfiguration = webSocketConfiguration();
apiConfiguration = apiConfiguration();
// hostsWhitelist is a hidden option. If it is specified, add the list to hostAllowlist
if (!hostsWhitelist.isEmpty()) {
// if allowlist == default values, remove the default values
if (hostsAllowlist.size() == 2
&& hostsAllowlist.containsAll(List.of("localhost", "127.0.0.1"))) {
hostsAllowlist.removeAll(List.of("localhost", "127.0.0.1"));
}
hostsAllowlist.addAll(hostsWhitelist);
}
permissioningConfiguration = permissioningConfiguration();
staticNodes = loadStaticNodes();
logger.info("Connecting to {} static nodes.", staticNodes.size());
logger.trace("Static Nodes = {}", staticNodes);
final List<EnodeURL> enodeURIs = ethNetworkConfig.getBootNodes();
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(enodeURIs, p));
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(staticNodes, p));
metricsConfiguration = metricsConfiguration();
logger.info("Security Module: {}", securityModuleName);
return this;
}
private GoQuorumPrivacyParameters configureGoQuorumPrivacy(
final KeyValueStorageProvider storageProvider) {
return new GoQuorumPrivacyParameters(
createGoQuorumEnclave(),
readEnclaveKey(),
storageProvider.createGoQuorumPrivateStorage(),
createPrivateWorldStateArchive(storageProvider));
}
private GoQuorumEnclave createGoQuorumEnclave() {
final EnclaveFactory enclaveFactory = new EnclaveFactory(Vertx.vertx());
if (privacyKeyStoreFile != null) {
return enclaveFactory.createGoQuorumEnclave(
privacyUrl, privacyKeyStoreFile, privacyKeyStorePasswordFile, privacyTlsKnownEnclaveFile);
} else {
return enclaveFactory.createGoQuorumEnclave(privacyUrl);
}
}
private String readEnclaveKey() {
final String key;
try {
key = Files.asCharSource(privacyPublicKeyFile, UTF_8).read();
} catch (final Exception e) {
throw new ParameterException(
this.commandLine,
"--privacy-public-key-file must be set when --goquorum-compatibility-enabled is set to true.",
e);
}
if (key.length() != 44) {
throw new IllegalArgumentException(
"Contents of enclave public key file needs to be 44 characters long to decode to a valid 32 byte public key.");
}
// throws exception if invalid base 64
Base64.getDecoder().decode(key);
return key;
}
private NetworkName getNetwork() {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
return network == null ? MAINNET : network;
}
private void ensureAllNodesAreInAllowlist(
final Collection<EnodeURL> enodeAddresses,
final LocalPermissioningConfiguration permissioningConfiguration) {
try {
PermissioningConfigurationValidator.areAllNodesAreInAllowlist(
enodeAddresses, permissioningConfiguration);
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage());
}
}
private BesuCommand controller() {
besuController = buildController();
return this;
}
public BesuController buildController() {
try {
return getControllerBuilder().build();
} catch (final Exception e) {
throw new ExecutionException(this.commandLine, e.getMessage(), e);
}
}
public BesuControllerBuilder getControllerBuilder() {
final KeyValueStorageProvider storageProvider = keyValueStorageProvider(keyValueStorageName);
return controllerBuilderFactory
.fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides)
.synchronizerConfiguration(buildSyncConfig())
.ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject())
.dataDirectory(dataDir())
.miningParameters(
new MiningParameters(
coinbase,
minTransactionGasPrice,
extraData,
isMiningEnabled,
iStratumMiningEnabled,
stratumNetworkInterface,
stratumPort,
unstableMiningOptions.getStratumExtranonce(),
Optional.empty(),
minBlockOccupancyRatio,
unstableMiningOptions.getRemoteSealersLimit(),
unstableMiningOptions.getRemoteSealersTimeToLive()))
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
.nodeKey(buildNodeKey())
.metricsSystem(metricsSystem.get())
.privacyParameters(privacyParameters(storageProvider))
.clock(Clock.systemUTC())
.isRevertReasonEnabled(isRevertReasonEnabled)
.storageProvider(storageProvider)
.isPruningEnabled(isPruningEnabled())
.pruningConfiguration(
new PrunerConfiguration(pruningBlockConfirmations, pruningBlocksRetained))
.genesisConfigOverrides(genesisConfigOverrides)
.gasLimitCalculator(
Optional.ofNullable(targetGasLimit)
.<GasLimitCalculator>map(TargetingGasLimitCalculator::new)
.orElse(GasLimitCalculator.constant()))
.requiredBlocks(requiredBlocks)
.reorgLoggingThreshold(reorgLoggingThreshold)
.dataStorageConfiguration(unstableDataStorageOptions.toDomainObject());
}
private GraphQLConfiguration graphQLConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--graphql-http-enabled",
!isGraphQLHttpEnabled,
asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port"));
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(isGraphQLHttpEnabled);
graphQLConfiguration.setHost(graphQLHttpHost);
graphQLConfiguration.setPort(graphQLHttpPort);
graphQLConfiguration.setHostsAllowlist(hostsAllowlist);
graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins);
graphQLConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return graphQLConfiguration;
}
private JsonRpcConfiguration jsonRpcConfiguration() {
checkRpcTlsClientAuthOptionsDependencies();
checkRpcTlsOptionsDependencies();
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-enabled",
!isRpcHttpEnabled,
asList(
"--rpc-http-api",
"--rpc-http-apis",
"--rpc-http-cors-origins",
"--rpc-http-host",
"--rpc-http-port",
"--rpc-http-authentication-enabled",
"--rpc-http-authentication-credentials-file",
"--rpc-http-authentication-public-key-file",
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
if (isRpcHttpAuthenticationEnabled
&& rpcHttpAuthenticationCredentialsFile() == null
&& rpcHttpAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file");
}
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(isRpcHttpEnabled);
jsonRpcConfiguration.setHost(rpcHttpHost);
jsonRpcConfiguration.setPort(rpcHttpPort);
jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins);
jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList()));
jsonRpcConfiguration.setHostsAllowlist(hostsAllowlist);
jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled);
jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile());
jsonRpcConfiguration.setAuthenticationPublicKeyFile(rpcHttpAuthenticationPublicKeyFile);
jsonRpcConfiguration.setTlsConfiguration(rpcHttpTlsConfiguration());
jsonRpcConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return jsonRpcConfiguration;
}
private void checkRpcTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-enabled",
!isRpcHttpTlsEnabled,
asList(
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
}
private void checkRpcTlsClientAuthOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-client-auth-enabled",
!isRpcHttpTlsClientAuthEnabled,
asList("--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled"));
}
private void checkPrivacyTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-tls-enabled",
!isPrivacyTlsEnabled,
asList(
"--privacy-tls-keystore-file",
"--privacy-tls-keystore-password-file",
"--privacy-tls-known-enclave-file"));
}
private Optional<TlsConfiguration> rpcHttpTlsConfiguration() {
if (!isRpcTlsConfigurationRequired()) {
return Optional.empty();
}
if (rpcHttpTlsKeyStoreFile == null) {
throw new ParameterException(
commandLine, "Keystore file is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (rpcHttpTlsKeyStorePasswordFile == null) {
throw new ParameterException(
commandLine,
"File containing password to unlock keystore is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (isRpcHttpTlsClientAuthEnabled
&& !isRpcHttpTlsCAClientsEnabled
&& rpcHttpTlsKnownClientsFile == null) {
throw new ParameterException(
commandLine,
"Known-clients file must be specified or CA clients must be enabled when TLS client authentication is enabled for JSON-RPC HTTP endpoint");
}
return Optional.of(
TlsConfiguration.Builder.aTlsConfiguration()
.withKeyStorePath(rpcHttpTlsKeyStoreFile)
.withKeyStorePasswordSupplier(
new FileBasedPasswordProvider(rpcHttpTlsKeyStorePasswordFile))
.withClientAuthConfiguration(rpcHttpTlsClientAuthConfiguration())
.build());
}
private TlsClientAuthConfiguration rpcHttpTlsClientAuthConfiguration() {
if (isRpcHttpTlsClientAuthEnabled) {
return TlsClientAuthConfiguration.Builder.aTlsClientAuthConfiguration()
.withKnownClientsFile(rpcHttpTlsKnownClientsFile)
.withCaClientsEnabled(isRpcHttpTlsCAClientsEnabled)
.build();
}
return null;
}
private boolean isRpcTlsConfigurationRequired() {
return isRpcHttpEnabled && isRpcHttpTlsEnabled;
}
private WebSocketConfiguration webSocketConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!isRpcWsEnabled,
asList(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file",
"--rpc-ws-authentication-public-key-file"));
if (isRpcWsAuthenticationEnabled
&& rpcWsAuthenticationCredentialsFile() == null
&& rpcWsAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(isRpcWsEnabled);
webSocketConfiguration.setHost(rpcWsHost);
webSocketConfiguration.setPort(rpcWsPort);
webSocketConfiguration.setRpcApis(rpcWsApis);
webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile());
webSocketConfiguration.setHostsAllowlist(hostsAllowlist);
webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile);
webSocketConfiguration.setTimeoutSec(unstableRPCOptions.getWsTimeoutSec());
return webSocketConfiguration;
}
private ApiConfiguration apiConfiguration() {
return ImmutableApiConfiguration.builder()
.gasPriceBlocks(apiGasPriceBlocks)
.gasPricePercentile(apiGasPricePercentile)
.gasPriceMin(minTransactionGasPrice.toLong())
.gasPriceMax(apiGasPriceMax)
.build();
}
public MetricsConfiguration metricsConfiguration() {
if (isMetricsEnabled && isMetricsPushEnabled) {
throw new ParameterException(
this.commandLine,
"--metrics-enabled option and --metrics-push-enabled option can't be used at the same "
+ "time. Please refer to CLI reference for more details about this constraint.");
}
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-enabled",
!isMetricsEnabled,
asList("--metrics-host", "--metrics-port"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-push-enabled",
!isMetricsPushEnabled,
asList(
"--metrics-push-host",
"--metrics-push-port",
"--metrics-push-interval",
"--metrics-push-prometheus-job"));
return unstableMetricsCLIOptions
.toDomainObject()
.enabled(isMetricsEnabled)
.host(metricsHost)
.port(metricsPort)
.protocol(metricsProtocol)
.metricCategories(metricCategories)
.pushEnabled(isMetricsPushEnabled)
.pushHost(metricsPushHost)
.pushPort(metricsPushPort)
.pushInterval(metricsPushInterval)
.hostsAllowlist(hostsAllowlist)
.prometheusJob(metricsPrometheusJob)
.build();
}
private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception {
if (!(localPermissionsEnabled() || contractPermissionsEnabled())) {
if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) {
logger.warn(
"Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
}
return Optional.empty();
}
final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional;
if (localPermissionsEnabled()) {
final Optional<String> nodePermissioningConfigFile =
Optional.ofNullable(nodePermissionsConfigFile);
final Optional<String> accountPermissioningConfigFile =
Optional.ofNullable(accountPermissionsConfigFile);
final LocalPermissioningConfiguration localPermissioningConfiguration =
PermissioningConfigurationBuilder.permissioningConfiguration(
permissionsNodesEnabled,
getEnodeDnsConfiguration(),
nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()),
permissionsAccountsEnabled,
accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath()));
localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration);
} else {
if (nodePermissionsConfigFile != null && !permissionsNodesEnabled) {
logger.warn(
"Node permissioning config file set {} but no permissions enabled",
nodePermissionsConfigFile);
}
if (accountPermissionsConfigFile != null && !permissionsAccountsEnabled) {
logger.warn(
"Account permissioning config file set {} but no permissions enabled",
accountPermissionsConfigFile);
}
localPermissioningConfigurationOptional = Optional.empty();
}
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
SmartContractPermissioningConfiguration.createDefault();
if (permissionsNodesContractEnabled) {
if (permissionsNodesContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No node permissioning contract address specified. Cannot enable smart contract based node permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractNodeAllowlistEnabled(
permissionsNodesContractEnabled);
smartContractPermissioningConfiguration.setNodeSmartContractAddress(
permissionsNodesContractAddress);
smartContractPermissioningConfiguration.setNodeSmartContractInterfaceVersion(
permissionsNodesContractVersion);
}
} else if (permissionsNodesContractAddress != null) {
logger.warn(
"Node permissioning smart contract address set {} but smart contract node permissioning is disabled.",
permissionsNodesContractAddress);
}
if (permissionsAccountsContractEnabled) {
if (permissionsAccountsContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No account permissioning contract address specified. Cannot enable smart contract based account permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractAccountAllowlistEnabled(
permissionsAccountsContractEnabled);
smartContractPermissioningConfiguration.setAccountSmartContractAddress(
permissionsAccountsContractAddress);
}
} else if (permissionsAccountsContractAddress != null) {
logger.warn(
"Account permissioning smart contract address set {} but smart contract account permissioning is disabled.",
permissionsAccountsContractAddress);
}
final PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
localPermissioningConfigurationOptional,
Optional.of(smartContractPermissioningConfiguration),
quorumPermissioningConfig());
return Optional.of(permissioningConfiguration);
}
private Optional<GoQuorumPermissioningConfiguration> quorumPermissioningConfig() {
if (!isGoQuorumCompatibilityMode) {
return Optional.empty();
}
try {
final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions();
final OptionalLong qip714BlockNumber = genesisConfigOptions.getQip714BlockNumber();
return Optional.of(
GoQuorumPermissioningConfiguration.enabled(
qip714BlockNumber.orElse(QIP714_DEFAULT_BLOCK)));
} catch (final Exception e) {
throw new IllegalStateException("Error reading GoQuorum permissioning options", e);
}
}
private boolean localPermissionsEnabled() {
return permissionsAccountsEnabled || permissionsNodesEnabled;
}
private boolean contractPermissionsEnabled() {
return permissionsNodesContractEnabled || permissionsAccountsContractEnabled;
}
private PrivacyParameters privacyParameters(final KeyValueStorageProvider storageProvider) {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-enabled",
!isPrivacyEnabled,
asList(
"--privacy-url",
"--privacy-public-key-file",
"--privacy-multi-tenancy-enabled",
"--privacy-tls-enabled"));
checkPrivacyTlsOptionsDependencies();
final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder();
if (isPrivacyEnabled) {
final String errorSuffix = "cannot be enabled with privacy.";
if (syncMode == SyncMode.FAST) {
throw new ParameterException(commandLine, String.format("%s %s", "Fast sync", errorSuffix));
}
if (isPruningEnabled()) {
throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix));
}
if (isPrivacyMultiTenancyEnabled
&& !jsonRpcConfiguration.isAuthenticationEnabled()
&& !webSocketConfiguration.isAuthenticationEnabled()) {
throw new ParameterException(
commandLine,
"Privacy multi-tenancy requires either http authentication to be enabled or WebSocket authentication to be enabled");
}
privacyParametersBuilder.setEnabled(true);
privacyParametersBuilder.setEnclaveUrl(privacyUrl);
privacyParametersBuilder.setMultiTenancyEnabled(isPrivacyMultiTenancyEnabled);
privacyParametersBuilder.setOnchainPrivacyGroupsEnabled(isFlexiblePrivacyGroupsEnabled);
final boolean hasPrivacyPublicKey = privacyPublicKeyFile != null;
if (hasPrivacyPublicKey && !isPrivacyMultiTenancyEnabled) {
try {
privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile);
} catch (final IOException e) {
throw new ParameterException(
commandLine, "Problem with privacy-public-key-file: " + e.getMessage(), e);
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Contents of privacy-public-key-file invalid: " + e.getMessage(), e);
}
} else if (hasPrivacyPublicKey) {
throw new ParameterException(
commandLine, "Privacy multi-tenancy and privacy public key cannot be used together");
} else if (!isPrivacyMultiTenancyEnabled) {
throw new ParameterException(
commandLine, "Please specify Enclave public key file path to enable privacy");
}
if (Wei.ZERO.compareTo(minTransactionGasPrice) < 0) {
// if gas is required, cannot use random keys to sign private tx
// ie --privacy-marker-transaction-signing-key-file must be set
if (privacyMarkerTransactionSigningKeyPath == null) {
throw new ParameterException(
commandLine,
"Not a free gas network. --privacy-marker-transaction-signing-key-file must be specified and must be a funded account. Private transactions cannot be signed by random (non-funded) accounts in paid gas networks");
}
}
if (!Address.PRIVACY.equals(privacyPrecompiledAddress)) {
logger.warn(
"--privacy-precompiled-address option is deprecated. This address is derived, based on --privacy-onchain-groups-enabled.");
}
privacyParametersBuilder.setPrivateKeyPath(privacyMarkerTransactionSigningKeyPath);
privacyParametersBuilder.setStorageProvider(
privacyKeyStorageProvider(keyValueStorageName + "-privacy"));
if (isPrivacyTlsEnabled) {
privacyParametersBuilder.setPrivacyKeyStoreFile(privacyKeyStoreFile);
privacyParametersBuilder.setPrivacyKeyStorePasswordFile(privacyKeyStorePasswordFile);
privacyParametersBuilder.setPrivacyTlsKnownEnclaveFile(privacyTlsKnownEnclaveFile);
}
privacyParametersBuilder.setEnclaveFactory(new EnclaveFactory(vertx));
} else if (isGoQuorumCompatibilityMode) {
privacyParametersBuilder.setGoQuorumPrivacyParameters(
Optional.of(configureGoQuorumPrivacy(storageProvider)));
}
if (!isPrivacyEnabled && anyPrivacyApiEnabled()) {
logger.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
}
if (!isGoQuorumCompatibilityMode
&& (rpcHttpApis.contains(RpcApis.GOQUORUM) || rpcWsApis.contains(RpcApis.GOQUORUM))) {
logger.warn("Cannot use GOQUORUM API methods when not in GoQuorum mode.");
}
final PrivacyParameters privacyParameters = privacyParametersBuilder.build();
if (isPrivacyEnabled) {
preSynchronizationTaskRunner.addTask(
new PrivateDatabaseMigrationPreSyncTask(privacyParameters, migratePrivateDatabase));
}
return privacyParameters;
}
public WorldStateArchive createPrivateWorldStateArchive(final StorageProvider storageProvider) {
final WorldStateStorage privateWorldStateStorage =
storageProvider.createPrivateWorldStateStorage();
final WorldStatePreimageStorage preimageStorage =
storageProvider.createPrivateWorldStatePreimageStorage();
return new DefaultWorldStateArchive(privateWorldStateStorage, preimageStorage);
}
private boolean anyPrivacyApiEnabled() {
return rpcHttpApis.contains(RpcApis.EEA)
|| rpcWsApis.contains(RpcApis.EEA)
|| rpcHttpApis.contains(RpcApis.PRIV)
|| rpcWsApis.contains(RpcApis.PRIV);
}
private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) {
return new PrivacyKeyValueStorageProviderBuilder()
.withStorageFactory(privacyKeyValueStorageFactory(name))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private PrivacyKeyValueStorageFactory privacyKeyValueStorageFactory(final String name) {
return (PrivacyKeyValueStorageFactory)
storageService
.getByName(name)
.orElseThrow(
() -> new StorageException("No KeyValueStorageFactory found for key: " + name));
}
private KeyValueStorageProvider keyValueStorageProvider(final String name) {
if (this.keyValueStorageProvider == null) {
this.keyValueStorageProvider =
new KeyValueStorageProviderBuilder()
.withStorageFactory(
storageService
.getByName(name)
.orElseThrow(
() ->
new StorageException(
"No KeyValueStorageFactory found for key: " + name)))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
return this.keyValueStorageProvider;
}
private SynchronizerConfiguration buildSyncConfig() {
return unstableSynchronizerOptions
.toDomainObject()
.syncMode(syncMode)
.fastSyncMinimumPeerCount(fastSyncMinPeerCount)
.build();
}
private TransactionPoolConfiguration buildTransactionPoolConfiguration() {
return unstableTransactionPoolOptions
.toDomainObject()
.txPoolMaxSize(txPoolMaxSize)
.pooledTransactionHashesSize(pooledTransactionHashesSize)
.pendingTxRetentionPeriod(pendingTxRetentionPeriod)
.priceBump(Percentage.fromInt(priceBump))
.txFeeCap(txFeeCap)
.build();
}
private boolean isPruningEnabled() {
return pruningEnabled;
}
// Blockchain synchronisation from peers.
private void synchronize(
final BesuController controller,
final boolean p2pEnabled,
final boolean peerDiscoveryEnabled,
final EthNetworkConfig ethNetworkConfig,
final int maxPeers,
final String p2pAdvertisedHost,
final String p2pListenInterface,
final int p2pListenPort,
final GraphQLConfiguration graphQLConfiguration,
final JsonRpcConfiguration jsonRpcConfiguration,
final WebSocketConfiguration webSocketConfiguration,
final ApiConfiguration apiConfiguration,
final MetricsConfiguration metricsConfiguration,
final Optional<PermissioningConfiguration> permissioningConfiguration,
final Collection<EnodeURL> staticNodes,
final Path pidPath) {
checkNotNull(runnerBuilder);
permissioningConfiguration.ifPresent(runnerBuilder::permissioningConfiguration);
final ObservableMetricsSystem metricsSystem = this.metricsSystem.get();
final Runner runner =
runnerBuilder
.vertx(vertx)
.besuController(controller)
.p2pEnabled(p2pEnabled)
.natMethod(natMethod)
.natManagerServiceName(unstableNatOptions.getNatManagerServiceName())
.natMethodFallbackEnabled(unstableNatOptions.getNatMethodFallbackEnabled())
.discovery(peerDiscoveryEnabled)
.ethNetworkConfig(ethNetworkConfig)
.p2pAdvertisedHost(p2pAdvertisedHost)
.p2pListenInterface(p2pListenInterface)
.p2pListenPort(p2pListenPort)
.maxPeers(maxPeers)
.limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled)
.fractionRemoteConnectionsAllowed(
Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue())
.randomPeerPriority(randomPeerPriority)
.networkingConfiguration(unstableNetworkingOptions.toDomainObject())
.graphQLConfiguration(graphQLConfiguration)
.jsonRpcConfiguration(jsonRpcConfiguration)
.webSocketConfiguration(webSocketConfiguration)
.apiConfiguration(apiConfiguration)
.pidPath(pidPath)
.dataDir(dataDir())
.bannedNodeIds(bannedNodeIds)
.metricsSystem(metricsSystem)
.metricsConfiguration(metricsConfiguration)
.staticNodes(staticNodes)
.identityString(identityString)
.besuPluginContext(besuPluginContext)
.autoLogBloomCaching(autoLogBloomCachingEnabled)
.ethstatsUrl(unstableEthstatsOptions.getEthstatsUrl())
.ethstatsContact(unstableEthstatsOptions.getEthstatsContact())
.storageProvider(keyValueStorageProvider(keyValueStorageName))
.build();
addShutdownHook(runner);
runner.start();
runner.awaitStop();
}
protected Vertx createVertx(final VertxOptions vertxOptions) {
return Vertx.vertx(vertxOptions);
}
private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) {
return new VertxOptions()
.setMetricsOptions(
new MetricsOptions()
.setEnabled(true)
.setFactory(new VertxMetricsAdapterFactory(metricsSystem)));
}
private void addShutdownHook(final Runner runner) {
Runtime.getRuntime()
.addShutdownHook(
new Thread(
() -> {
try {
besuPluginContext.stopPlugins();
runner.close();
LogManager.shutdown();
} catch (final Exception e) {
logger.error("Failed to stop Besu");
}
}));
}
// Used to discover the default IP of the client.
// Loopback IP is used by default as this is how smokeTests require it to be
// and it's probably a good security behaviour to default only on the localhost.
private InetAddress autoDiscoverDefaultIP() {
if (autoDiscoveredDefaultIP != null) {
return autoDiscoveredDefaultIP;
}
autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress();
return autoDiscoveredDefaultIP;
}
private EthNetworkConfig updateNetworkConfig(final NetworkName network) {
final EthNetworkConfig.Builder builder =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network));
// custom genesis file use comes with specific default values for the genesis
// file itself
// but also for the network id and the bootnodes list.
if (genesisFile != null) {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
if (this.network != null) {
// We check if network option was really provided by user and not only looking
// at the
// default value.
// if user provided it and provided the genesis file option at the same time, it
// raises a
// conflict error
throw new ParameterException(
this.commandLine,
"--network option and --genesis-file option can't be used at the same time. Please "
+ "refer to CLI reference for more details about this constraint.");
}
builder.setGenesisConfig(genesisConfig());
if (networkId == null) {
// if no network id option is defined on the CLI we have to set a default value
// from the
// genesis file.
// We do the genesis parsing only in this case as we already have network id
// constants
// for known networks to speed up the process.
// Also we have to parse the genesis as we don't already have a parsed version
// at this
// stage.
// If no chain id is found in the genesis as it's an optional, we use mainnet
// network id.
try {
builder.setNetworkId(
getGenesisConfigFile()
.getConfigOptions(genesisConfigOverrides)
.getChainId()
.orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId()));
} catch (final DecodeException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e);
} catch (final ArithmeticException e) {
throw new ParameterException(
this.commandLine,
"No networkId specified and chainId in "
+ "genesis file is too large to be used as a networkId");
}
}
if (bootNodes == null) {
// We default to an empty bootnodes list if the option is not provided on CLI
// because
// mainnet bootnodes won't work as the default value for a custom genesis,
// so it's better to have an empty list as default value that forces to create a
// custom one
// than a useless one that may make user think that it can work when it can't.
builder.setBootNodes(new ArrayList<>());
}
builder.setDnsDiscoveryUrl(null);
}
if (networkId != null) {
builder.setNetworkId(networkId);
}
if (bootNodes != null) {
if (!peerDiscoveryEnabled) {
logger.warn("Discovery disabled: bootnodes will be ignored.");
}
try {
final List<EnodeURL> listBootNodes =
bootNodes.stream()
.filter(value -> !value.isEmpty())
.map(url -> EnodeURL.fromString(url, getEnodeDnsConfiguration()))
.collect(Collectors.toList());
DiscoveryConfiguration.assertValidBootnodes(listBootNodes);
builder.setBootNodes(listBootNodes);
} catch (final IllegalArgumentException e) {
throw new ParameterException(commandLine, e.getMessage());
}
}
return builder.build();
}
private GenesisConfigFile getGenesisConfigFile() {
return GenesisConfigFile.fromConfig(genesisConfig());
}
private String genesisConfig() {
try {
return Resources.toString(genesisFile.toURI().toURL(), UTF_8);
} catch (final IOException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to load genesis file %s.", genesisFile), e);
}
}
// dataDir() is public because it is accessed by subcommands
public Path dataDir() {
return dataPath.toAbsolutePath();
}
private Path pluginsDir() {
final String pluginsDir = System.getProperty("besu.plugins.dir");
if (pluginsDir == null) {
return new File(System.getProperty("besu.home", "."), "plugins").toPath();
} else {
return new File(pluginsDir).toPath();
}
}
@VisibleForTesting
NodeKey buildNodeKey() {
return new NodeKey(securityModule());
}
private SecurityModule securityModule() {
return securityModuleService
.getByName(securityModuleName)
.orElseThrow(() -> new RuntimeException("Security Module not found: " + securityModuleName))
.get();
}
private File nodePrivateKeyFile() {
return Optional.ofNullable(nodePrivateKeyFile)
.orElseGet(() -> KeyPairUtil.getDefaultKeyFile(dataDir()));
}
private String rpcHttpAuthenticationCredentialsFile() {
final String filename = rpcHttpAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "HTTP");
}
return filename;
}
private String rpcWsAuthenticationCredentialsFile() {
final String filename = rpcWsAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
private String getDefaultPermissioningFilePath() {
return dataDir()
+ System.getProperty("file.separator")
+ DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION;
}
public MetricsSystem getMetricsSystem() {
return metricsSystem.get();
}
private Set<EnodeURL> loadStaticNodes() throws IOException {
final Path staticNodesPath;
if (staticNodesFile != null) {
staticNodesPath = staticNodesFile.toAbsolutePath();
if (!staticNodesPath.toFile().exists()) {
throw new ParameterException(
commandLine, String.format("Static nodes file %s does not exist", staticNodesPath));
}
} else {
final String staticNodesFilename = "static-nodes.json";
staticNodesPath = dataDir().resolve(staticNodesFilename);
}
logger.info("Static Nodes file = {}", staticNodesPath);
return StaticNodesParser.fromPath(staticNodesPath, getEnodeDnsConfiguration());
}
public BesuExceptionHandler exceptionHandler() {
return new BesuExceptionHandler(this::getLogLevel);
}
public EnodeDnsConfiguration getEnodeDnsConfiguration() {
if (enodeDnsConfiguration == null) {
enodeDnsConfiguration = unstableDnsOptions.toDomainObject();
}
return enodeDnsConfiguration;
}
private void checkPortClash() {
getEffectivePorts().stream()
.filter(Objects::nonNull)
.filter(port -> port > 0)
.forEach(
port -> {
if (!allocatedPorts.add(port)) {
throw new ParameterException(
commandLine,
"Port number '"
+ port
+ "' has been specified multiple times. Please review the supplied configuration.");
}
});
}
/**
* * Gets the list of effective ports (ports that are enabled).
*
* @return The list of effective ports
*/
private List<Integer> getEffectivePorts() {
final List<Integer> effectivePorts = new ArrayList<>();
addPortIfEnabled(effectivePorts, p2pPort, p2pEnabled);
addPortIfEnabled(effectivePorts, graphQLHttpPort, isGraphQLHttpEnabled);
addPortIfEnabled(effectivePorts, rpcHttpPort, isRpcHttpEnabled);
addPortIfEnabled(effectivePorts, rpcWsPort, isRpcWsEnabled);
addPortIfEnabled(effectivePorts, metricsPort, isMetricsEnabled);
addPortIfEnabled(effectivePorts, metricsPushPort, isMetricsPushEnabled);
addPortIfEnabled(effectivePorts, stratumPort, iStratumMiningEnabled);
return effectivePorts;
}
/**
* Adds port in the passed list only if enabled.
*
* @param ports The list of ports
* @param port The port value
* @param enabled true if enabled, false otherwise
*/
private void addPortIfEnabled(
final List<Integer> ports, final Integer port, final boolean enabled) {
if (enabled) {
ports.add(port);
}
}
private void checkGoQuorumCompatibilityConfig(final EthNetworkConfig ethNetworkConfig) {
if (isGoQuorumCompatibilityMode) {
final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions();
// this static flag is read by the RLP decoder
GoQuorumOptions.goQuorumCompatibilityMode = true;
if (!genesisConfigOptions.isQuorum()) {
throw new IllegalStateException(
"GoQuorum compatibility mode (enabled) can only be used if genesis file has 'isQuorum' flag set to true.");
}
genesisConfigOptions
.getChainId()
.ifPresent(
chainId ->
ensureGoQuorumCompatibilityModeNotUsedOnMainnet(
chainId, isGoQuorumCompatibilityMode));
if (genesisFile != null
&& getGenesisConfigFile().getConfigOptions().isQuorum()
&& !minTransactionGasPrice.isZero()) {
throw new ParameterException(
this.commandLine,
"--min-gas-price must be set to zero if GoQuorum compatibility is enabled in the genesis config.");
}
if (ethNetworkConfig.getNetworkId().equals(EthNetworkConfig.MAINNET_NETWORK_ID)) {
throw new ParameterException(
this.commandLine, "GoQuorum compatibility mode (enabled) cannot be used on Mainnet.");
}
}
}
private void ensureGoQuorumCompatibilityModeNotUsedOnMainnet(
final BigInteger chainId, final boolean isGoQuorumCompatibilityMode) {
if (isGoQuorumCompatibilityMode && chainId.equals(EthNetworkConfig.MAINNET_NETWORK_ID)) {
throw new IllegalStateException(
"GoQuorum compatibility mode (enabled) cannot be used on Mainnet.");
}
}
@VisibleForTesting
Level getLogLevel() {
return logLevel;
}
private class BesuCommandConfigurationService implements BesuConfiguration {
@Override
public Path getStoragePath() {
return dataDir().resolve(DATABASE_PATH);
}
@Override
public Path getDataPath() {
return dataDir();
}
@Override
public int getDatabaseVersion() {
return unstableDataStorageOptions
.toDomainObject()
.getDataStorageFormat()
.getDatabaseVersion();
}
}
}
| 1 | 24,491 | this works and is the smallest possible change but we could definitely fail earlier | hyperledger-besu | java |
@@ -1,3 +1,4 @@
+//go:build !arm64
// +build !arm64
package nodeps | 1 | // +build !arm64
package nodeps
// MariaDBDefaultVersion is the default MariaDB version
const MariaDBDefaultVersion = MariaDB103
// ValidMariaDBVersions is the versions of MariaDB that are valid
var ValidMariaDBVersions = map[string]bool{
MariaDB55: true,
MariaDB100: true,
MariaDB101: true,
MariaDB102: true,
MariaDB103: true,
MariaDB104: true,
MariaDB105: true,
MariaDB106: true,
}
// MariaDB Versions
const (
MariaDB55 = "5.5"
MariaDB100 = "10.0"
MariaDB101 = "10.1"
MariaDB102 = "10.2"
MariaDB103 = "10.3"
MariaDB104 = "10.4"
MariaDB105 = "10.5"
MariaDB106 = "10.6"
)
| 1 | 15,789 | Should not have snuck in here right? This is a golang 1.17 feature, wii definitely want to update these | drud-ddev | go |
@@ -25,6 +25,8 @@ class NotesScreenComponent extends BaseScreenComponent {
constructor() {
super();
+ this.prevFolderId_ = null;
+
this.onAppStateChange_ = async () => {
// Force an update to the notes list when app state changes
const newProps = Object.assign({}, this.props); | 1 | const React = require('react');
const { AppState, View, StyleSheet } = require('react-native');
const { stateUtils } = require('lib/reducer.js');
const { connect } = require('react-redux');
const { NoteList } = require('lib/components/note-list.js');
const Folder = require('lib/models/Folder.js');
const Tag = require('lib/models/Tag.js');
const Note = require('lib/models/Note.js');
const Setting = require('lib/models/Setting.js');
const { themeStyle } = require('lib/components/global-style.js');
const { ScreenHeader } = require('lib/components/screen-header.js');
const { _ } = require('lib/locale.js');
const { ActionButton } = require('lib/components/action-button.js');
const { dialogs } = require('lib/dialogs.js');
const DialogBox = require('react-native-dialogbox').default;
const { BaseScreenComponent } = require('lib/components/base-screen.js');
const { BackButtonService } = require('lib/services/back-button.js');
class NotesScreenComponent extends BaseScreenComponent {
static navigationOptions() {
return { header: null };
}
constructor() {
super();
this.onAppStateChange_ = async () => {
// Force an update to the notes list when app state changes
const newProps = Object.assign({}, this.props);
newProps.notesSource = '';
await this.refreshNotes(newProps);
};
this.sortButton_press = async () => {
const buttons = [];
const sortNoteOptions = Setting.enumOptions('notes.sortOrder.field');
const makeCheckboxText = function(selected, sign, label) {
const s = sign === 'tick' ? '✓' : '⬤';
return (selected ? `${s} ` : '') + label;
};
for (const field in sortNoteOptions) {
if (!sortNoteOptions.hasOwnProperty(field)) continue;
buttons.push({
text: makeCheckboxText(Setting.value('notes.sortOrder.field') === field, 'bullet', sortNoteOptions[field]),
id: { name: 'notes.sortOrder.field', value: field },
});
}
buttons.push({
text: makeCheckboxText(Setting.value('notes.sortOrder.reverse'), 'tick', `[ ${Setting.settingMetadata('notes.sortOrder.reverse').label()} ]`),
id: { name: 'notes.sortOrder.reverse', value: !Setting.value('notes.sortOrder.reverse') },
});
buttons.push({
text: makeCheckboxText(Setting.value('uncompletedTodosOnTop'), 'tick', `[ ${Setting.settingMetadata('uncompletedTodosOnTop').label()} ]`),
id: { name: 'uncompletedTodosOnTop', value: !Setting.value('uncompletedTodosOnTop') },
});
buttons.push({
text: makeCheckboxText(Setting.value('showCompletedTodos'), 'tick', `[ ${Setting.settingMetadata('showCompletedTodos').label()} ]`),
id: { name: 'showCompletedTodos', value: !Setting.value('showCompletedTodos') },
});
const r = await dialogs.pop(this, Setting.settingMetadata('notes.sortOrder.field').label(), buttons);
if (!r) return;
Setting.setValue(r.name, r.value);
};
this.backHandler = () => {
if (this.dialogbox.state.isVisible) {
this.dialogbox.close();
return true;
}
return false;
};
}
styles() {
if (!this.styles_) this.styles_ = {};
const themeId = this.props.theme;
const cacheKey = themeId;
if (this.styles_[cacheKey]) return this.styles_[cacheKey];
this.styles_ = {};
const styles = {
noteList: {
flex: 1,
},
};
this.styles_[cacheKey] = StyleSheet.create(styles);
return this.styles_[cacheKey];
}
async componentDidMount() {
await this.refreshNotes();
AppState.addEventListener('change', this.onAppStateChange_);
BackButtonService.addHandler(this.backHandler);
}
async componentWillUnmount() {
AppState.removeEventListener('change', this.onAppStateChange_);
BackButtonService.removeHandler(this.backHandler);
}
async componentDidUpdate(prevProps) {
if (prevProps.notesOrder !== this.props.notesOrder || prevProps.selectedFolderId != this.props.selectedFolderId || prevProps.selectedTagId != this.props.selectedTagId || prevProps.selectedSmartFilterId != this.props.selectedSmartFilterId || prevProps.notesParentType != this.props.notesParentType) {
await this.refreshNotes(this.props);
}
}
async refreshNotes(props = null) {
if (props === null) props = this.props;
const options = {
order: props.notesOrder,
uncompletedTodosOnTop: props.uncompletedTodosOnTop,
showCompletedTodos: props.showCompletedTodos,
caseInsensitive: true,
};
const parent = this.parentItem(props);
if (!parent) return;
const source = JSON.stringify({
options: options,
parentId: parent.id,
});
if (source == props.notesSource) return;
let notes = [];
if (props.notesParentType === 'Folder') {
notes = await Note.previews(props.selectedFolderId, options);
} else if (props.notesParentType === 'Tag') {
notes = await Tag.notes(props.selectedTagId, options);
} else if (props.notesParentType === 'SmartFilter') {
notes = await Note.previews(null, options);
}
this.props.dispatch({
type: 'NOTE_UPDATE_ALL',
notes: notes,
notesSource: source,
});
}
deleteFolder_onPress(folderId) {
dialogs.confirm(this, _('Delete notebook? All notes and sub-notebooks within this notebook will also be deleted.')).then(ok => {
if (!ok) return;
Folder.delete(folderId)
.then(() => {
this.props.dispatch({
type: 'NAV_GO',
routeName: 'Notes',
smartFilterId: 'c3176726992c11e9ac940492261af972',
});
})
.catch(error => {
alert(error.message);
});
});
}
editFolder_onPress(folderId) {
this.props.dispatch({
type: 'NAV_GO',
routeName: 'Folder',
folderId: folderId,
});
}
parentItem(props = null) {
if (!props) props = this.props;
let output = null;
if (props.notesParentType == 'Folder') {
output = Folder.byId(props.folders, props.selectedFolderId);
} else if (props.notesParentType == 'Tag') {
output = Tag.byId(props.tags, props.selectedTagId);
} else if (props.notesParentType == 'SmartFilter') {
output = { id: this.props.selectedSmartFilterId, title: _('All notes') };
} else {
return null;
// throw new Error('Invalid parent type: ' + props.notesParentType);
}
return output;
}
folderPickerOptions() {
const options = {
enabled: this.props.noteSelectionEnabled,
mustSelect: true,
};
if (this.folderPickerOptions_ && options.enabled === this.folderPickerOptions_.enabled) return this.folderPickerOptions_;
this.folderPickerOptions_ = options;
return this.folderPickerOptions_;
}
render() {
const parent = this.parentItem();
const theme = themeStyle(this.props.theme);
const rootStyle = {
flex: 1,
backgroundColor: theme.backgroundColor,
};
if (!this.props.visible) {
rootStyle.flex = 0.001; // This is a bit of a hack but it seems to work fine - it makes the component invisible but without unmounting it
}
const title = parent ? parent.title : null;
if (!parent) {
return (
<View style={rootStyle}>
<ScreenHeader title={title} showSideMenuButton={true} showBackButton={false} />
</View>
);
}
const addFolderNoteButtons = this.props.selectedFolderId && this.props.selectedFolderId != Folder.conflictFolderId();
const thisComp = this;
const actionButtonComp = this.props.noteSelectionEnabled || !this.props.visible ? null : <ActionButton addFolderNoteButtons={addFolderNoteButtons} parentFolderId={this.props.selectedFolderId}></ActionButton>;
return (
<View style={rootStyle}>
<ScreenHeader title={title} showBackButton={false} parentComponent={thisComp} sortButton_press={this.sortButton_press} folderPickerOptions={this.folderPickerOptions()} showSearchButton={true} showSideMenuButton={true} />
<NoteList style={this.styles().noteList} />
{actionButtonComp}
<DialogBox
ref={dialogbox => {
this.dialogbox = dialogbox;
}}
/>
</View>
);
}
}
const NotesScreen = connect(state => {
return {
folders: state.folders,
tags: state.tags,
selectedFolderId: state.selectedFolderId,
selectedNoteIds: state.selectedNoteIds,
selectedTagId: state.selectedTagId,
selectedSmartFilterId: state.selectedSmartFilterId,
notesParentType: state.notesParentType,
notes: state.notes,
notesSource: state.notesSource,
uncompletedTodosOnTop: state.settings.uncompletedTodosOnTop,
showCompletedTodos: state.settings.showCompletedTodos,
theme: state.settings.theme,
noteSelectionEnabled: state.noteSelectionEnabled,
notesOrder: stateUtils.notesOrder(state.settings),
};
})(NotesScreenComponent);
module.exports = { NotesScreen };
| 1 | 14,575 | Could you explain the logic with prevFolderId? | laurent22-joplin | js |
@@ -0,0 +1,13 @@
+package testutil
+
+import "github.com/facebookgo/clock"
+
+// TimestampNow get now timestamp from new clock
+func TimestampNow() uint64 {
+ return TimestampNowFromClock(clock.New())
+}
+
+// TimestampNowFromClock get now timestamp from specific clock
+func TimestampNowFromClock(c clock.Clock) uint64 {
+ return uint64(c.Now().Unix())
+} | 1 | 1 | 12,246 | Please add license header | iotexproject-iotex-core | go |
|
@@ -47,12 +47,13 @@
namespace lbann {
-lbann_comm* initialize(int& argc, char**& argv, int seed) {
+lbann_comm_ptr initialize(int& argc, char**& argv, int seed) {
// Initialize Elemental.
El::Initialize(argc, argv);
// Create a new comm object.
// Initial creation with every process in one model.
- auto* comm = new lbann_comm(0);
+ auto comm = lbann_comm_ptr{new lbann_comm(0), &lbann::finalize };
+
#if defined(LBANN_TOPO_AWARE)
// Determine the number of NUMA nodes present.
hwloc_topology_t topo; | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
// lbann_base .cpp - Basic definitions, functions
////////////////////////////////////////////////////////////////////////////////
#include "lbann/base.hpp"
#include <omp.h>
#if defined(LBANN_TOPO_AWARE)
#include <hwloc.h>
#if defined(HWLOC_API_VERSION) && (HWLOC_API_VERSION < 0x00010b00)
#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
#endif
#endif
#include "lbann/comm.hpp"
#include "lbann/utils/random.hpp"
#include "lbann/utils/omp_diagnostics.hpp"
#include "lbann/utils/stack_trace.hpp"
#ifdef LBANN_HAS_CUDNN
#include "lbann/utils/cudnn.hpp"
#endif
namespace lbann {
lbann_comm* initialize(int& argc, char**& argv, int seed) {
// Initialize Elemental.
El::Initialize(argc, argv);
// Create a new comm object.
// Initial creation with every process in one model.
auto* comm = new lbann_comm(0);
#if defined(LBANN_TOPO_AWARE)
// Determine the number of NUMA nodes present.
hwloc_topology_t topo;
hwloc_topology_init(&topo);
hwloc_topology_load(topo);
int numa_depth = hwloc_get_type_depth(topo, HWLOC_OBJ_NUMANODE);
if (numa_depth == HWLOC_TYPE_DEPTH_UNKNOWN) {
std::cout << comm->get_rank_in_world() <<
": cannot determine hwloc NUMA-node depth" << std::endl;
}
int num_numa_nodes = hwloc_get_nbobjs_by_depth(topo, numa_depth);
// Warn if there are more NUMA nodes than processes per node.
// It's probably fine if there are more processes than NUMA nodes for now.
// We can adjust that later when we better understand the threaded perf.
int ppn = comm->get_procs_per_node();
if (num_numa_nodes > ppn) {
if (comm->get_rank_in_node() == 0) {
std::cout << comm->get_rank_in_world() <<
": WARNING: node has " << num_numa_nodes <<
" NUMA nodes but you have " << ppn << " processes per node" <<
std::endl;
}
}
hwloc_topology_destroy(topo);
#endif
// Initialize local random number generators.
init_random(seed);
init_data_seq_random(seed);
return comm;
}
void finalize(lbann_comm* comm) {
#ifdef LBANN_HAS_CUDNN
cudnn::destroy();
#endif
if (comm != nullptr) {
delete comm;
}
El::Finalize();
}
/** hack to avoid long switch/case statement; users should ignore; of interest to developers */
static std::vector<std::string> pool_mode_names = { "invalid", "max", "average", "average_no_pad" };
/** returns a string representation of the pool_mode */
std::string get_pool_mode_name(pool_mode m) {
if ((int)m < 1 or (int)m >= (int)pool_mode_names.size()) {
throw(std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: "
+ " Invalid pool_mode");
}
return pool_mode_names[(int)m];
}
} // namespace lbann
| 1 | 13,625 | It bothers me that the user calls `initialize` without also calling `finalize`. It seems to me that we're essentially making `lbann_comm` a singleton object. Going further down this path, we would put `initialize` inside `lbann_comm`'s constructor and `finalize` in the destructor. This has it's own weirdness - the user must make sure it's lifespan matches the beginning and end of the program. We may also run into the case where someone might want multiple instances of `lbann_comm`, but this is not too important. | LLNL-lbann | cpp |
@@ -353,6 +353,7 @@ void test4() {
smi = MolToSmiles(*m);
CHECK_INVARIANT(smi == "c1cc[cH-]c1", smi);
TEST_ASSERT(m->getConformer().is3D() == false);
+ delete m;
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
} | 1 | //
// Copyright (C) 2002-2018 Greg Landrum and Rational Discovery LLC
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include <RDGeneral/test.h>
#include <RDGeneral/RDLog.h>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/Canon.h>
#include <GraphMol/MonomerInfo.h>
#include "FileParsers.h"
#include "SequenceParsers.h"
#include "SequenceWriters.h"
#include "MolFileStereochem.h"
#include <GraphMol/SmilesParse/SmilesParse.h>
#include <GraphMol/SmilesParse/SmilesWrite.h>
#include <GraphMol/SmilesParse/SmartsWrite.h>
#include <GraphMol/Substruct/SubstructMatch.h>
#include <GraphMol/FileParsers/ProximityBonds.h>
#include <RDGeneral/FileParseException.h>
#include <RDGeneral/BadFileException.h>
#include <RDGeneral/LocaleSwitcher.h>
#include <clocale>
#include <cstdlib>
#include <string>
#include <fstream>
#include <boost/lexical_cast.hpp>
using namespace RDKit;
void test1() {
BOOST_LOG(rdInfoLog) << "testing atom query parsing" << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/list-query.mol";
RWMol *m = MolFileToMol(fName, false);
// MolOps::sanitizeMol(*m);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 6);
std::string smi = MolToSmiles(*m);
TEST_ASSERT(smi == "C1=CC=CC=C1");
m->updatePropertyCache();
smi = MolToSmarts(*m);
TEST_ASSERT(smi == "[#6]1=[#6]-[#6]=[#6]-[#6]=[#6,#7,#15]-1");
smi = "C1=CC=CC=C1";
RWMol *m2 = SmilesToMol(smi, false, false);
MatchVectType mv;
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 6);
// sanitize it, which will aromatize the bonds... we will not match:
MolOps::sanitizeMol(*m2);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "N1=CC=CC=C1";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 6);
delete m2;
smi = "S1=CC=CC=C1";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "P1=CC=CC=C1";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 6);
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/not-list-query.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC(=C)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "CC(=O)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CC(=N)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CC(=O)C(=C)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "C(=C)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
// make sure new-style atom lists override old-style atom lists:
delete m;
fName = rdbase +
"/Code/GraphMol/FileParsers/test_data/conflicting-list-query.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC(=C)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "CC(=O)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
// longer list queries, this was issue 2413431:
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/list-query-long.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(14)->hasQuery());
smi = "C1COC2=CC3=CC4=C(C=CC=C4)C=C3C=C2C1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C[Se]C2=CC3=CC4=C(C=CC=C4)C=C3C=C2C1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C[Te]C2=CC3=CC4=C(C=CC=C4)C=C3C=C2C1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C[As]C2=CC3=CC4=C(C=CC=C4)C=C3C=C2C1";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test2() {
BOOST_LOG(rdInfoLog) << "testing bond query parsing" << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/bond-query.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getNumAtoms() == 5);
std::string smi = MolToSmiles(*m);
TEST_ASSERT(smi == "C=CC~CC");
smi = "C1=CC=CC=C1";
RWMol *m2 = SmilesToMol(smi, false, false);
MatchVectType mv;
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
// sanitize it (making bonds aromatic) ... we will not match:
MolOps::sanitizeMol(*m2);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C=CC=CC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "C=CCCC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/RingBondQuery.mol";
delete m;
m = MolFileToMol(fName);
TEST_ASSERT(m->getNumAtoms() == 5);
delete m2;
smi = "C1CCC1C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1CC2C1C2";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ChainBondQuery.mol";
delete m;
m = MolFileToMol(fName);
TEST_ASSERT(m->getNumAtoms() == 5);
delete m2;
smi = "C1CCC1C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "C1CC2C1C2";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
// - - - - - - - - - - - - - - - - - - - - - - - -
// this was github issue #269
// - - - - - - - - - - - - - - - - - - - - - - - -
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/bond-query4.mol";
delete m;
m = MolFileToMol(fName);
TEST_ASSERT(m->getNumAtoms() == 5);
delete m2;
smi = "C1CCC1C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1CCC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "C1C=CC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "C1C#CC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CCCC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CC=CC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CC#CC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/bond-query5.mol";
delete m;
m = MolFileToMol(fName);
TEST_ASSERT(m->getNumAtoms() == 5);
delete m2;
smi = "C1CCC1C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1CCC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C=CC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C#CC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CCCC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "CC=CC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "CC#CC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/bond-query6.mol";
delete m;
m = MolFileToMol(fName);
TEST_ASSERT(m->getNumAtoms() == 5);
delete m2;
smi = "C1CCC1C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1CCC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C=CC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C#CC1=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CCCC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "CC=CC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "CC#CC=C";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test4() {
// basic writing test
BOOST_LOG(rdInfoLog) << " ----------> Test4 " << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
std::string fName = rdbase + "test_data/mol1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
std::string smi = MolToSmiles(*m);
CHECK_INVARIANT(smi == "c1cc[cH-]c1", smi);
TEST_ASSERT(m->getConformer().is3D() == false);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
smi = MolToSmiles(*m);
CHECK_INVARIANT(smi == "c1cc[cH-]c1", smi);
TEST_ASSERT(m->getConformer().is3D() == false);
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void test5() {
// formerly problematic molecules
BOOST_LOG(rdInfoLog) << " ----------> Test5 " << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
std::string fName = rdbase + "test_data/issue123.mol";
RWMol *m = MolFileToMol(fName);
CHECK_INVARIANT(m, "");
TEST_ASSERT(m->getNumAtoms() == 23);
TEST_ASSERT(m->getConformer().is3D() == true);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 23);
TEST_ASSERT(m->getConformer().is3D() == true);
delete m;
// now try without removing the Hs:
m = MolFileToMol(fName, true, false);
CHECK_INVARIANT(m, "");
TEST_ASSERT(m->getNumAtoms() == 39);
}
void test6() {
BOOST_LOG(rdInfoLog) << "testing chirality parsing" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
std::string fName = rdbase + "test_data/chiral1.mol";
RWMol *m;
std::string smi, cip;
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@](F)(Cl)Br");
delete m;
fName = rdbase + "test_data/chiral1a.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@](F)(Cl)Br");
delete m;
fName = rdbase + "test_data/chiral2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@@](F)(Cl)Br");
delete m;
fName = rdbase + "test_data/chiral2a.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@@](F)(Cl)Br");
delete m;
fName = rdbase + "test_data/chiral3.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(m->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
#if 1
smi = MolToSmiles(*m, true);
// BOOST_LOG(rdInfoLog) << " smi: " << smi << std::endl;
TEST_ASSERT(smi == "C[C@H](F)Cl");
#endif
delete m;
fName = rdbase + "test_data/chiral3a.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(m->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
#if 1
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@H](F)Cl");
#endif
delete m;
fName = rdbase + "test_data/chiral4.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(m->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
#if 1
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@@H](F)Cl");
#endif
delete m;
fName = rdbase + "test_data/chiral4a.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(m->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
#if 1
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@@H](F)Cl");
#endif
delete m;
fName = rdbase + "test_data/chiral5.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(!m->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
#if 1
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "CC(C)(Cl)Br");
#endif
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test7() {
BOOST_LOG(rdInfoLog) << "testing roundtrip chirality parsing" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
RWMol *m, *m2;
std::string fName;
std::string smi, molBlock, smi2, cip;
fName = rdbase + "test_data/chiral1.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@](F)(Cl)Br");
molBlock = MolToMolBlock(*m);
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2)
smi2 = MolToSmiles(*m2, true);
TEST_ASSERT(smi == smi2);
delete m;
delete m2;
fName = rdbase + "test_data/chiral2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@@](F)(Cl)Br");
molBlock = MolToMolBlock(*m);
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2)
smi2 = MolToSmiles(*m2, true);
TEST_ASSERT(smi == smi2);
delete m;
delete m2;
fName = rdbase + "test_data/chiral3.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(m->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
#if 1
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@H](F)Cl");
molBlock = MolToMolBlock(*m);
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2)
MolOps::assignStereochemistry(*m2);
TEST_ASSERT(m2->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m2->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
smi2 = MolToSmiles(*m2, true);
TEST_ASSERT(smi == smi2);
delete m2;
#endif
delete m;
fName = rdbase + "test_data/chiral4.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(m->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
#if 1
smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C[C@@H](F)Cl");
molBlock = MolToMolBlock(*m);
// BOOST_LOG(rdInfoLog) << molBlock << std::endl;
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2)
MolOps::assignStereochemistry(*m2);
TEST_ASSERT(m2->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m2->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
// smi2 = MolToSmiles(*m2,true);
// TEST_ASSERT(smi==smi2);
delete m2;
#endif
delete m;
fName = rdbase + "test_data/Issue142d.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
#if 1
smi = MolToSmiles(*m, true);
m2 = SmilesToMol(smi);
smi2 = MolToSmiles(*m2, true);
if (smi != smi2) {
BOOST_LOG(rdInfoLog) << "\n " << smi << "\n !=\n " << smi2 << std::endl;
}
TEST_ASSERT(smi == smi2);
delete m2;
BOOST_LOG(rdInfoLog) << "SMI: " << smi << std::endl;
std::cout << "***************************************" << std::endl;
molBlock = MolToMolBlock(*m);
std::cout << "***************************************" << std::endl;
BOOST_LOG(rdInfoLog) << molBlock << std::endl;
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2)
smi2 = MolToSmiles(*m2, true);
if (smi != smi2) {
BOOST_LOG(rdInfoLog) << "\n " << smi << "\n !=\n " << smi2 << std::endl;
}
TEST_ASSERT(smi == smi2);
delete m2;
#endif
delete m;
fName = rdbase + "test_data/Issue142b.mol";
m = MolFileToMol(fName);
// BOOST_LOG(rdInfoLog) << m->getNumAtoms() << "\n";
// BOOST_LOG(rdInfoLog) << "-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-" <<
// std::endl;
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 9);
MolOps::assignStereochemistry(*m);
TEST_ASSERT(m->getAtomWithIdx(0)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(0)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
TEST_ASSERT(m->getAtomWithIdx(3)->hasProp(common_properties::_CIPCode));
m->getAtomWithIdx(3)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
#if 1
smi = MolToSmiles(*m, true);
m2 = SmilesToMol(smi);
smi2 = MolToSmiles(*m2, true);
if (smi != smi2) {
BOOST_LOG(rdInfoLog) << "\n " << smi << "\n !=\n " << smi2 << std::endl;
}
TEST_ASSERT(smi == smi2);
delete m2;
// BOOST_LOG(rdInfoLog) << "SMI: "<< smi << std::endl;
BOOST_LOG(rdInfoLog) << m->getNumAtoms() << " "
<< m->getConformer().getNumAtoms() << "\n";
molBlock = MolToMolBlock(*m);
// BOOST_LOG(rdInfoLog) << molBlock << std::endl;
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2)
smi2 = MolToSmiles(*m2, true);
if (smi != smi2) {
BOOST_LOG(rdInfoLog) << "\n " << smi << "\n !=\n " << smi2 << std::endl;
}
TEST_ASSERT(smi == smi2);
delete m2;
#endif
delete m;
fName = rdbase + "test_data/issue142a.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 28);
#if 1
smi = MolToSmiles(*m, true);
m2 = SmilesToMol(smi);
smi2 = MolToSmiles(*m2, true);
if (smi != smi2) {
BOOST_LOG(rdInfoLog) << "\n " << smi << "\n !=\n " << smi2 << std::endl;
}
TEST_ASSERT(smi == smi2);
delete m2;
molBlock = MolToMolBlock(*m);
// BOOST_LOG(rdInfoLog) << molBlock << std::endl;
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2)
smi2 = MolToSmiles(*m2, true);
if (smi != smi2) {
BOOST_LOG(rdInfoLog) << "\n " << smi << "\n !=\n " << smi2 << std::endl;
}
TEST_ASSERT(smi == smi2);
delete m2;
#endif
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test8() {
BOOST_LOG(rdInfoLog) << "testing reading without sanitization" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
RWMol *m;
std::string fName;
std::string smi, molBlock, smi2;
// in this case the test means to not remove Hs:
fName = rdbase + "test_data/unsanitary.mol";
m = MolFileToMol(fName, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 6);
delete m;
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
delete m;
fName = rdbase + "test_data/unsanitary2.mol";
m = MolFileToMol(fName, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 9);
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue145() {
BOOST_LOG(rdInfoLog) << "testing Issue145:\n Mol parsing: molecule yields "
"non-canonical smiles from mol block"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
RWMol *m, *m2;
std::string fName;
std::string smi, molBlock, smi2;
fName = rdbase + "test_data/issue145.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 19);
smi = MolToSmiles(*m, true);
m2 = SmilesToMol(smi);
smi2 = MolToSmiles(*m2, true);
if (smi != smi2) {
BOOST_LOG(rdInfoLog) << "\n " << smi << "\n !=\n " << smi2 << std::endl;
}
TEST_ASSERT(smi == smi2);
delete m2;
molBlock = MolToMolBlock(*m);
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2)
smi2 = MolToSmiles(*m2, true);
if (smi != smi2) {
BOOST_LOG(rdInfoLog) << "\n " << smi << "\n !=\n " << smi2 << std::endl;
}
TEST_ASSERT(smi == smi2);
delete m;
delete m2;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue148() {
BOOST_LOG(rdInfoLog) << "testing Issue148:\n Mol files containing mis-drawn "
"nitro groups not properly parsed"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
RWMol *m;
std::string fName;
std::string smi, molBlock, smi2;
fName = rdbase + "test_data/issue148.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 9);
TEST_ASSERT(m->getAtomWithIdx(0)->getFormalCharge() == 1);
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue180() {
BOOST_LOG(rdInfoLog) << "testing Issue180: bad Z/E assignments" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
RWMol *m;
std::string fName;
std::string code;
Bond *bond;
fName = rdbase + "test_data/Issue180.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
bond = m->getBondWithIdx(2);
TEST_ASSERT(bond->getBondType() == Bond::DOUBLE);
TEST_ASSERT(bond->getStereo() == Bond::STEREOZ);
bond = m->getBondWithIdx(5);
TEST_ASSERT(bond->getBondType() == Bond::DOUBLE);
TEST_ASSERT(bond->getStereo() == Bond::STEREOE);
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue264() {
BOOST_LOG(rdInfoLog) << "testing Issue264: bad stereochemistry from mol files"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
RWMol *m1, *m2;
std::string smi1, smi2;
std::string fName;
fName = rdbase + "test_data/Issue264-1.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
fName = rdbase + "test_data/Issue264-2.mol";
m2 = MolFileToMol(fName);
TEST_ASSERT(m2);
smi1 = MolToSmiles(*m1, false);
smi2 = MolToSmiles(*m2, false);
TEST_ASSERT(smi1 == smi2);
smi1 = MolToSmiles(*m1, true);
smi2 = MolToSmiles(*m2, true);
BOOST_LOG(rdInfoLog) << smi1 << std::endl;
BOOST_LOG(rdInfoLog) << smi2 << std::endl;
TEST_ASSERT(smi1 != smi2);
delete m1;
delete m2;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue399() {
BOOST_LOG(rdInfoLog) << "testing Issue399: bond wedging cleanup" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
RWMol *m1;
std::string smi1, smi2;
std::string fName;
fName = rdbase + "Issue399a.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
#if 1
smi1 = MolToSmiles(*m1, true);
TEST_ASSERT(smi1 == "C[C@H]1CO1");
#endif
MolOps::assignStereochemistry(*m1);
TEST_ASSERT(m1->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
m1->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, smi2);
TEST_ASSERT(smi2 == "S");
#if 1
WedgeMolBonds(*m1, &m1->getConformer());
TEST_ASSERT(m1->getBondWithIdx(0)->getBondDir() == Bond::BEGINWEDGE);
TEST_ASSERT(m1->getBondWithIdx(1)->getBondDir() == Bond::NONE);
TEST_ASSERT(m1->getBondWithIdx(2)->getBondDir() == Bond::NONE);
TEST_ASSERT(m1->getBondWithIdx(3)->getBondDir() == Bond::NONE);
#endif
delete m1;
// make sure we prefer wedging bonds to Hs:
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
MolOps::addHs(*m1, false, true);
TEST_ASSERT(m1->getAtomWithIdx(7)->getAtomicNum() == 1);
TEST_ASSERT(m1->getBondBetweenAtoms(1, 7));
TEST_ASSERT(m1->getBondBetweenAtoms(1, 7)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m1->getBondBetweenAtoms(1, 7)->getBondDir() == Bond::NONE);
WedgeMolBonds(*m1, &m1->getConformer());
TEST_ASSERT(m1->getBondBetweenAtoms(1, 7)->getBondDir() == Bond::BEGINDASH);
delete m1;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileChgLines() {
BOOST_LOG(rdInfoLog) << "testing handling of charge lines" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
// SF.Net Issue1603923: problems with multiple chg lines
{
RWMol *m1;
std::string fName;
fName = rdbase + "MolFileChgBug.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getAtomWithIdx(24)->getFormalCharge() == -1);
TEST_ASSERT(m1->getAtomWithIdx(25)->getFormalCharge() == -1);
delete m1;
}
// many charges in one molecule:
{
RWMol *m1;
std::string fName;
fName = rdbase + "manycharges.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getFormalCharge() == -1);
TEST_ASSERT(m1->getAtomWithIdx(13)->getFormalCharge() == -1);
std::string molBlock = MolToMolBlock(*m1);
// std::cerr<<molBlock<<std::endl;
delete m1;
m1 = MolBlockToMol(molBlock);
// m1->debugMol(std::cerr);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getFormalCharge() == -1);
TEST_ASSERT(m1->getAtomWithIdx(13)->getFormalCharge() == -1);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testDblBondStereochem() {
BOOST_LOG(rdInfoLog) << "testing basic double bond stereochemistry"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
RWMol *m1;
std::string fName = rdbase + "simple_z.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondWithIdx(0)->getStereo() == Bond::STEREOZ);
delete m1;
}
{
RWMol *m1;
std::string fName = rdbase + "simple_e.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondWithIdx(0)->getStereo() == Bond::STEREOE);
delete m1;
}
{
RWMol *m1;
std::string fName = rdbase + "simple_either.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondWithIdx(0)->getStereo() == Bond::STEREOANY);
TEST_ASSERT(m1->getBondWithIdx(0)->getBondDir() == Bond::EITHERDOUBLE);
delete m1;
}
// the next group for sf.net issue 3009836
BOOST_LOG(rdInfoLog) << " sub-test for issue 3099836" << std::endl;
{
RWMol *m1;
std::string fName = rdbase + "Issue3009836.1.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondBetweenAtoms(3, 4)->getStereo() == Bond::STEREOZ);
delete m1;
}
{
RWMol *m1;
std::string fName = rdbase + "Issue3009836.2.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondBetweenAtoms(3, 4)->getStereo() == Bond::STEREOZ);
delete m1;
}
{
RWMol *m1;
std::string fName = rdbase + "Issue3009836.3.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondBetweenAtoms(6, 7)->getStereo() == Bond::STEREOE);
TEST_ASSERT(m1->getBondBetweenAtoms(10, 11)->getStereo() == Bond::STEREOZ);
delete m1;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testSymmetricDblBondStereochem() {
// this was sf.net issue 1718794:
// http://sourceforge.net/tracker/index.php?func=detail&aid=1718794&group_id=160139&atid=814650)
BOOST_LOG(rdInfoLog) << "testing double bonds with symmetric substituents"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
RWMol *m1;
std::string fName, smi;
fName = rdbase + "cistrans.1a.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondWithIdx(0)->getStereo() == Bond::STEREOE);
smi = MolToSmiles(*m1, true);
TEST_ASSERT(smi == "C/C=C/Cl");
fName = rdbase + "cistrans.2a.mol";
delete m1;
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondWithIdx(0)->getStereo() == Bond::STEREOZ);
smi = MolToSmiles(*m1, true);
TEST_ASSERT(smi == "C/C=C\\Cl");
fName = rdbase + "cistrans.1.mol";
delete m1;
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondWithIdx(0)->getStereo() == Bond::STEREOE);
smi = MolToSmiles(*m1, true);
TEST_ASSERT(smi == "C/C=C/C");
fName = rdbase + "cistrans.2.mol";
delete m1;
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondWithIdx(0)->getStereo() == Bond::STEREOZ);
smi = MolToSmiles(*m1, true);
TEST_ASSERT(smi == "C/C=C\\C");
fName = rdbase + "cistrans.3.mol";
delete m1;
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getBondWithIdx(0)->getStereo() == Bond::STEREOANY);
smi = MolToSmiles(*m1, true);
TEST_ASSERT(smi == "CC=CC");
delete m1;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testRingDblBondStereochem() {
// this was sf.net issue 1725068:
// http://sourceforge.net/tracker/index.php?func=detail&aid=1725068&group_id=160139&atid=814650
BOOST_LOG(rdInfoLog)
<< "testing double bonds in rings with stereochem specifications"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
RWMol *m1;
std::string fName, smi;
fName = rdbase + "badringstereochem3.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
smi = MolToSmiles(*m1, true);
TEST_ASSERT(smi.find("/", 0) == std::string::npos);
TEST_ASSERT(smi.find("\\", 0) == std::string::npos);
delete m1;
fName = rdbase + "badringstereochem2.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
smi = MolToSmiles(*m1, true);
TEST_ASSERT(smi.find("/", 0) == std::string::npos);
TEST_ASSERT(smi.find("\\", 0) == std::string::npos);
delete m1;
fName = rdbase + "badringstereochem.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
smi = MolToSmiles(*m1, true);
TEST_ASSERT(smi.find("/", 0) == std::string::npos);
TEST_ASSERT(smi.find("\\", 0) == std::string::npos);
delete m1;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileRGroups() {
BOOST_LOG(rdInfoLog) << "testing mol file R-group parsing" << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/rgroups1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
unsigned int idx;
std::string label;
TEST_ASSERT(m->getAtomWithIdx(3)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(3)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 2);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(3)->getIsotope(), 2));
TEST_ASSERT(m->getAtomWithIdx(4)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(4)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 1);
TEST_ASSERT(m->getAtomWithIdx(4)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(4)->getIsotope(), 1));
// test sf.net issue 3316600:
TEST_ASSERT(m->getAtomWithIdx(3)->hasProp(common_properties::dummyLabel));
m->getAtomWithIdx(3)->getProp(common_properties::dummyLabel, label);
TEST_ASSERT(label == "R2");
TEST_ASSERT(m->getAtomWithIdx(3)->getSymbol() == "R2");
TEST_ASSERT(m->getAtomWithIdx(4)->hasProp(common_properties::dummyLabel));
m->getAtomWithIdx(4)->getProp(common_properties::dummyLabel, label);
TEST_ASSERT(label == "R1");
TEST_ASSERT(m->getAtomWithIdx(4)->getSymbol() == "R1");
RWMol *m2;
MatchVectType mv;
std::string smi;
smi = "C1C(O)C1C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "C1CC(O)C1C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C(CO)C1CC";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "CC(=O)CC";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/rgroups2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(3)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(3)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 1);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(3)->getIsotope(), 1));
TEST_ASSERT(m->getAtomWithIdx(4)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(4)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 1);
TEST_ASSERT(m->getAtomWithIdx(4)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(4)->getIsotope(), 1));
smi = "C1C(O)C1C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "C1CC(O)C1C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "C1C(CO)C1CC";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 5);
delete m2;
smi = "CC(=O)CC";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/rgroups3.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(3)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(3)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 11);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(3)->getIsotope(), 11));
TEST_ASSERT(m->getAtomWithIdx(4)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(4)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 503);
TEST_ASSERT(m->getAtomWithIdx(4)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(4)->getIsotope(), 503));
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileDegreeQueries() {
BOOST_LOG(rdInfoLog) << "testing mol file degree queries" << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/subst1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
RWMol *m2;
MatchVectType mv;
std::string smi;
smi = "CC(=O)O";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "CC(=O)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "CC(=O)";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/subst2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC(=O)O";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "CC(=O)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "CC(O)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "CC(O)";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CC(O)(C)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/subst3.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC(=O)O";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 3);
delete m2;
smi = "CC(=O)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 3);
delete m2;
smi = "CC(O)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
smi = "CC(O)";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CC(O)(C)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/subst4.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC(=O)O";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
{
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/combined.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC(=O)[CH-]C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "CC(=O)[C-](C)C";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "CC(=O)CC";
m2 = SmilesToMol(smi, false, false);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
}
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileRBCQueries() {
BOOST_LOG(rdInfoLog) << "testing mol file ring-bond count queries"
<< std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName;
RWMol *m;
RWMol *m2;
MatchVectType mv;
std::string smi;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C1CCC1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m2;
smi = "C12C3C4C1C5C2C3C45";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_3.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C1CCC1";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C12C3C4C1C5C2C3C45";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_0.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m2;
smi = "C1CCC1";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_4.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C1CCC1";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C12C3C4C1C5C2C3C45";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C1CS234C5CC2CC13CC4C5";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m2;
smi = "C1C2CC3CC4CC1S234";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_star.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m2;
smi = "C1CCC1";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_star2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C1CCC1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "C1CC2C1CC2";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "C12C3C4C1C5C2C3C45";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_star3.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CC";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C1CCC1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 4);
delete m2;
smi = "C1CC2C1CC2";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C12C3C4C1C5C2C3C45";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileUnsaturationQueries() {
BOOST_LOG(rdInfoLog) << "testing mol file unsaturation queries" << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName;
RWMol *m;
RWMol *m2;
MatchVectType mv;
std::string smi;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/unsaturation.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
smi = "CO";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C=O";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m2;
smi = "CCO";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 0);
delete m2;
smi = "C=CO";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m2;
smi = "C#CO";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 2);
delete m2;
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileQueryToSmarts() {
BOOST_LOG(rdInfoLog) << "testing mol file queries -> SMARTS " << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName;
RWMol *m;
std::string sma;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
sma = MolToSmarts(*m, true);
TEST_ASSERT(sma == "[#6&x2]-[#6]")
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_3.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
sma = MolToSmarts(*m, true);
TEST_ASSERT(sma == "[#6&x3]-[#6]")
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_0.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
sma = MolToSmarts(*m, true);
TEST_ASSERT(sma == "[#6&x0]-[#6]")
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_4.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
sma = MolToSmarts(*m, true);
TEST_ASSERT(sma == "[#16&x4]-[#6]")
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_star.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
sma = MolToSmarts(*m, true);
TEST_ASSERT(sma == "[#6&x0]-[#6]")
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/ringcount_star2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
sma = MolToSmarts(*m, true);
TEST_ASSERT(sma.find("[#6&x2]") != std::string::npos);
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/unsaturation.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
sma = MolToSmarts(*m, true);
TEST_ASSERT(sma == "[#6&$(*=,:,#*)]~[#8]")
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMissingFiles() {
BOOST_LOG(rdInfoLog) << "testing handling of missing files" << std::endl;
std::string fName;
bool ok;
RWMol *m;
(void)m;
fName = "bogus_file.mol";
ok = false;
try {
m = MolFileToMol(fName);
} catch (BadFileException &e) {
ok = true;
}
TEST_ASSERT(ok);
ok = false;
try {
m = TPLFileToMol(fName);
} catch (BadFileException &e) {
ok = true;
}
TEST_ASSERT(ok);
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue1965035() {
BOOST_LOG(rdInfoLog)
<< "testing issue Issue1965035: problems with WedgeMolBonds "
<< std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName;
RWMol *m;
std::string sma;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/Issue1965035.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
// the mol file parser removes bond wedging info:
TEST_ASSERT(m->getBondWithIdx(4)->getBondDir() == Bond::NONE);
// but a chiral tag is assigned:
TEST_ASSERT(m->getAtomWithIdx(2)->getChiralTag() == Atom::CHI_TETRAHEDRAL_CW);
WedgeMolBonds(*m, &m->getConformer());
TEST_ASSERT(m->getBondWithIdx(4)->getBondDir() == Bond::BEGINDASH);
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testRadicals() {
BOOST_LOG(rdInfoLog) << "testing handling of radicals " << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName;
RWMol *m;
std::string smiles;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/radical.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(0)->getNumRadicalElectrons() == 0);
TEST_ASSERT(m->getAtomWithIdx(1)->getNumRadicalElectrons() == 1);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(0)->getNumRadicalElectrons() == 0);
TEST_ASSERT(m->getAtomWithIdx(1)->getNumRadicalElectrons() == 1);
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testBadBondOrders() {
BOOST_LOG(rdInfoLog)
<< "testing handling of bogus bond orders (issue 2337369)" << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName;
RWMol *m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/bondorder0.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondBetweenAtoms(0, 1)->getBondType() == Bond::UNSPECIFIED);
TEST_ASSERT(!m->getBondBetweenAtoms(0, 1)->hasQuery());
delete m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/bondorder9.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondBetweenAtoms(0, 1)->hasQuery());
TEST_ASSERT(m->getBondBetweenAtoms(0, 1)->getQuery()->getDescription() ==
"BondNull");
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testAtomParity() {
BOOST_LOG(rdInfoLog) << "testing handling of atom stereo parity flags"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
int parity;
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/parity.simple1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(!m->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 1);
// if we don't perceive the stereochem first, no parity
// flags end up in the output:
std::string molBlock = MolToMolBlock(*m);
RWMol *m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(!m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
delete m2;
// now perceive stereochem, then look for the parity
// flags:
MolOps::assignChiralTypesFrom3D(*m);
molBlock = MolToMolBlock(*m);
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m2->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 1);
delete m2;
delete m;
}
{
int parity;
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/parity.simple2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(!m->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 2);
MolOps::assignChiralTypesFrom3D(*m);
std::string molBlock = MolToMolBlock(*m);
RWMol *m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m2->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 2);
delete m2;
delete m;
}
{
// a case with an H on the chiral center:
int parity;
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/parity.simpleH1.mol";
RWMol *m = MolFileToMol(fName, true, false);
TEST_ASSERT(m);
TEST_ASSERT(!m->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 1);
MolOps::assignChiralTypesFrom3D(*m);
std::string molBlock = MolToMolBlock(*m);
RWMol *m2 = MolBlockToMol(molBlock, true, false);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m2->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 1);
delete m2;
// if we remove the H and write things out, we should
// still get the right answer back:
m2 = (RWMol *)MolOps::removeHs(*((ROMol *)m));
molBlock = MolToMolBlock(*m2);
delete m2;
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m2->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 1);
delete m2;
delete m;
}
{
// a case with an H on the chiral center:
int parity;
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/parity.simpleH2.mol";
RWMol *m = MolFileToMol(fName, true, false);
TEST_ASSERT(m);
TEST_ASSERT(!m->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 2);
MolOps::assignChiralTypesFrom3D(*m);
std::string molBlock = MolToMolBlock(*m);
RWMol *m2 = MolBlockToMol(molBlock, true, false);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m2->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 2);
delete m2;
m2 = (RWMol *)MolOps::removeHs(*((ROMol *)m));
molBlock = MolToMolBlock(*m2);
delete m2;
m2 = MolBlockToMol(molBlock);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m2->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 2);
delete m2;
delete m;
}
{
// a case with an N as the "chiral" center
int parity;
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/parity.nitrogen.mol";
RWMol *m = MolFileToMol(fName, true, false);
TEST_ASSERT(m);
TEST_ASSERT(!m->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::molParity));
m->getAtomWithIdx(1)->getProp(common_properties::molParity, parity);
TEST_ASSERT(parity == 1);
MolOps::assignChiralTypesFrom3D(*m);
std::string molBlock = MolToMolBlock(*m);
RWMol *m2 = MolBlockToMol(molBlock, true, false);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(!m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
delete m2;
delete m;
}
{
// a case with two Hs on the chiral center:
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/parity.twoHs.mol";
RWMol *m = MolFileToMol(fName, true, false);
TEST_ASSERT(m);
TEST_ASSERT(!m->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(!m->getAtomWithIdx(1)->hasProp(common_properties::molParity));
// add a bogus chiral spec:
m->getAtomWithIdx(0)->setChiralTag(Atom::CHI_TETRAHEDRAL_CW);
std::string molBlock = MolToMolBlock(*m);
RWMol *m2 = (RWMol *)MolOps::removeHs(*((ROMol *)m));
molBlock = MolToMolBlock(*m2);
delete m2;
m2 = MolBlockToMol(molBlock, true, false);
TEST_ASSERT(m2);
TEST_ASSERT(!m2->getAtomWithIdx(0)->hasProp(common_properties::molParity));
TEST_ASSERT(!m2->getAtomWithIdx(1)->hasProp(common_properties::molParity));
delete m2;
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue2692246() {
// basic writing test
BOOST_LOG(rdInfoLog) << " Testing issue 2692246 " << std::endl;
std::string smiles(120, 'C');
smiles += "[CH3+]";
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 121);
TEST_ASSERT(m->getAtomWithIdx(120)->getFormalCharge() == 1);
delete m;
BOOST_LOG(rdInfoLog) << " done" << std::endl;
}
void testKekulizationSkip() {
// basic writing test
BOOST_LOG(rdInfoLog) << " Testing mol blocks without kekulization "
<< std::endl;
std::string smiles("c1ccccc1");
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
std::string molBlock = MolToMolBlock(*m, true, -1, false);
TEST_ASSERT(molBlock.find("1 2 4") != std::string::npos);
TEST_ASSERT(molBlock.find("2 3 4") != std::string::npos);
TEST_ASSERT(molBlock.find("3 4 4") != std::string::npos);
molBlock = MolToMolBlock(*m);
TEST_ASSERT(molBlock.find("1 2 4") == std::string::npos);
TEST_ASSERT(molBlock.find("2 3 4") == std::string::npos);
TEST_ASSERT(molBlock.find("3 4 4") == std::string::npos);
delete m;
BOOST_LOG(rdInfoLog) << " done" << std::endl;
}
void testMolFileAtomValues() {
BOOST_LOG(rdInfoLog) << "testing atom values in mol files" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/";
{
RWMol *m;
std::string fName, val;
fName = rdbase + "test_data/AtomProps1.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(
!m->getAtomWithIdx(0)->hasProp(common_properties::molFileValue));
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::molFileValue));
m->getAtomWithIdx(1)->getProp(common_properties::molFileValue, val);
TEST_ASSERT(val == "acidchloride");
TEST_ASSERT(getAtomValue(m->getAtomWithIdx(1)) == "acidchloride")
TEST_ASSERT(
m->getAtomWithIdx(0)->hasProp(common_properties::molAtomMapNumber));
TEST_ASSERT(
m->getAtomWithIdx(1)->hasProp(common_properties::molAtomMapNumber));
TEST_ASSERT(
m->getAtomWithIdx(2)->hasProp(common_properties::molAtomMapNumber));
TEST_ASSERT(
!m->getAtomWithIdx(3)->hasProp(common_properties::molAtomMapNumber));
TEST_ASSERT(m->getAtomWithIdx(0)->getAtomMapNum() == 1);
TEST_ASSERT(m->getAtomWithIdx(1)->getAtomMapNum() == 2);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomMapNum() == 3);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomMapNum() == 0);
// round trip
m->getAtomWithIdx(0)->setAtomMapNum(4);
setAtomRLabel(m->getAtomWithIdx(3), 1);
setAtomAlias(m->getAtomWithIdx(0), "acidchloride");
setAtomValue(m->getAtomWithIdx(0), "foobar");
RWMol *m2 = MolBlockToMol(MolToMolBlock(*m));
TEST_ASSERT(m2);
TEST_ASSERT(m->getAtomWithIdx(0)->getAtomMapNum() == 4);
TEST_ASSERT(m->getAtomWithIdx(1)->getAtomMapNum() == 2);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomMapNum() == 3);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomMapNum() == 0);
TEST_ASSERT(getAtomRLabel(m->getAtomWithIdx(3)) == 1);
TEST_ASSERT(getAtomAlias(m->getAtomWithIdx(0)) == "acidchloride");
TEST_ASSERT(getAtomValue(m->getAtomWithIdx(0)) == "foobar");
delete m;
delete m2;
}
{
RWMol *m;
std::string fName, val;
fName = rdbase + "test_data/AtomProps2.mol";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(
!m->getAtomWithIdx(0)->hasProp(common_properties::molFileValue));
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::molFileValue));
m->getAtomWithIdx(1)->getProp(common_properties::molFileValue, val);
TEST_ASSERT(val == "acidchloride");
TEST_ASSERT(m->getAtomWithIdx(2)->hasProp(common_properties::molFileValue));
m->getAtomWithIdx(2)->getProp(common_properties::molFileValue, val);
TEST_ASSERT(val == "testing");
TEST_ASSERT(m->getAtomWithIdx(3)->getFormalCharge() == -1);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileAtomQueries() {
BOOST_LOG(rdInfoLog) << "testing handling of A, Q, and * in mol files"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/query_star.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
RWMol *m2;
MatchVectType mv;
std::string smi;
smi = "[H]c1ccccc1";
m2 = SmilesToMol(smi, false, false);
MolOps::sanitizeMol(*m2);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 7);
delete m2;
smi = "Cc1ccccc1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 7);
delete m2;
smi = "Clc1ccccc1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 7);
delete m2;
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/query_A.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
RWMol *m2;
MatchVectType mv;
std::string smi;
smi = "[H]c1ccccc1";
m2 = SmilesToMol(smi, false, false);
MolOps::sanitizeMol(*m2);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "Cc1ccccc1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 7);
delete m2;
smi = "Clc1ccccc1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 7);
delete m2;
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/query_Q.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
RWMol *m2;
MatchVectType mv;
std::string smi;
smi = "[H]c1ccccc1";
m2 = SmilesToMol(smi, false, false);
MolOps::sanitizeMol(*m2);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "Cc1ccccc1";
m2 = SmilesToMol(smi);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smi = "Clc1ccccc1";
m2 = SmilesToMol(smi);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 7);
delete m2;
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testListsAndValues() {
BOOST_LOG(rdInfoLog)
<< "testing handling of mol files with atom lists and values"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/lists_plus_values.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
std::string value;
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::molFileValue));
m->getAtomWithIdx(1)->getProp(common_properties::molFileValue, value);
TEST_ASSERT(value == "halogen");
TEST_ASSERT(m->getAtomWithIdx(1)->hasQuery());
TEST_ASSERT(m->getAtomWithIdx(1)->getQuery()->getDescription() == "AtomOr");
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test1V3K() {
BOOST_LOG(rdInfoLog) << "testing basic handling of v3000 mol files"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 8);
TEST_ASSERT(m->getNumBonds() == 8);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.3.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 9);
TEST_ASSERT(m->getNumBonds() == 9);
TEST_ASSERT(m->getAtomWithIdx(4)->getFormalCharge() == -1);
TEST_ASSERT(m->getAtomWithIdx(4)->getIsotope() == 17);
// m->debugMol(std::cerr);
// TEST_ASSERT(m->getBondWithIdx(8)->getBondDir()==Bond::BEGINWEDGE);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.5a.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
TEST_ASSERT(m->getNumBonds() == 4);
TEST_ASSERT(m->getAtomWithIdx(0)->getChiralTag() != Atom::CHI_UNSPECIFIED);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.5b.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
TEST_ASSERT(m->getNumBonds() == 4);
TEST_ASSERT(m->getAtomWithIdx(0)->getChiralTag() == Atom::CHI_UNSPECIFIED);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.6a.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
TEST_ASSERT(m->getNumBonds() == 3);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREOE);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.6b.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
TEST_ASSERT(m->getNumBonds() == 3);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREOANY);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.crash1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 7);
TEST_ASSERT(m->getNumBonds() == 7);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test2V3K() {
BOOST_LOG(rdInfoLog) << "testing more queries from v3000 mol files"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 8);
TEST_ASSERT(m->getNumBonds() == 8);
std::string smiles = "O=C(O)C1OCCC1";
RWMol *m2 = SmilesToMol(smiles);
MatchVectType mv;
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "O=C(O)C1OCCC1";
m2 = SmilesToMol(smiles);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "O=C(O)C1SCCS1";
m2 = SmilesToMol(smiles);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "O=C(O)C1OCCN1";
m2 = SmilesToMol(smiles);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "O=C(O)C1OCCO1";
m2 = SmilesToMol(smiles);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.4a.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 2);
TEST_ASSERT(m->getNumBonds() == 1);
std::string smiles = "OC1OCC1";
RWMol *m2 = SmilesToMol(smiles);
MatchVectType mv;
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "C1OCC1";
m2 = SmilesToMol(smiles);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "COCC";
m2 = SmilesToMol(smiles);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.4b.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 2);
TEST_ASSERT(m->getNumBonds() == 1);
std::string smiles = "OC1OCC1";
RWMol *m2 = SmilesToMol(smiles);
MatchVectType mv;
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "C1OCC1";
m2 = SmilesToMol(smiles);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "COCC";
m2 = SmilesToMol(smiles);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/v3k.rbc.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
TEST_ASSERT(m->getNumBonds() == 2);
std::string smiles = "C1CC1";
RWMol *m2 = SmilesToMol(smiles);
TEST_ASSERT(m2);
MatchVectType mv;
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "CCC";
m2 = SmilesToMol(smiles);
TEST_ASSERT(m2);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
smiles = "N1NC2NNC12";
m2 = SmilesToMol(smiles);
TEST_ASSERT(m2);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/chebi_15469.v3k.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 53);
TEST_ASSERT(m->getNumBonds() == 55);
TEST_ASSERT(m->getAtomWithIdx(52)->getAtomicNum() == 0);
TEST_ASSERT(!m->getAtomWithIdx(52)->hasQuery());
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/chebi_57262.v3k.2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 22);
TEST_ASSERT(m->getNumBonds() == 21);
TEST_ASSERT(m->getAtomWithIdx(18)->getAtomicNum() == 0);
TEST_ASSERT(!m->getAtomWithIdx(18)->hasQuery());
TEST_ASSERT(m->getAtomWithIdx(18)->getIsotope() == 1);
TEST_ASSERT(m->getAtomWithIdx(21)->getAtomicNum() == 0);
TEST_ASSERT(!m->getAtomWithIdx(21)->hasQuery());
TEST_ASSERT(m->getAtomWithIdx(21)->getIsotope() == 2);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/chebi_57262.v3k.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 22);
TEST_ASSERT(m->getNumBonds() == 21);
TEST_ASSERT(m->getAtomWithIdx(18)->getAtomicNum() == 0);
TEST_ASSERT(!m->getAtomWithIdx(18)->hasQuery());
TEST_ASSERT(m->getAtomWithIdx(18)->getIsotope() == 1);
TEST_ASSERT(m->getAtomWithIdx(21)->getAtomicNum() == 0);
TEST_ASSERT(!m->getAtomWithIdx(21)->hasQuery());
TEST_ASSERT(m->getAtomWithIdx(21)->getIsotope() == 2);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void test3V3K() {
BOOST_LOG(rdInfoLog) << "testing basic writing of v3000 mol files"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
std::string fName;
{
// charges
fName = rdbase + "issue148.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 9);
TEST_ASSERT(m->getAtomWithIdx(0)->getFormalCharge() == 1);
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 9);
TEST_ASSERT(m->getAtomWithIdx(0)->getFormalCharge() == 1);
delete m;
}
{
// multiple charge lines
fName = rdbase + "MolFileChgBug.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(24)->getFormalCharge() == -1);
TEST_ASSERT(m->getAtomWithIdx(25)->getFormalCharge() == -1);
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(24)->getFormalCharge() == -1);
TEST_ASSERT(m->getAtomWithIdx(25)->getFormalCharge() == -1);
delete m;
}
{
// radicals
fName = rdbase + "radical.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(0)->getNumRadicalElectrons() == 0);
TEST_ASSERT(m->getAtomWithIdx(1)->getNumRadicalElectrons() == 1);
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(0)->getNumRadicalElectrons() == 0);
TEST_ASSERT(m->getAtomWithIdx(1)->getNumRadicalElectrons() == 1);
delete m;
}
{
// radical and valence
fName = rdbase + "CH.v3k.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 1);
TEST_ASSERT(m->getAtomWithIdx(0)->getNoImplicit());
TEST_ASSERT(m->getAtomWithIdx(0)->getNumExplicitHs() == 1);
TEST_ASSERT(m->getAtomWithIdx(0)->getNumRadicalElectrons() == 1);
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
// no bonds in this one, make sure there's no bond block:
TEST_ASSERT(mb.find("BEGIN ATOM") != std::string::npos);
TEST_ASSERT(mb.find("BEGIN BOND") == std::string::npos);
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 1);
TEST_ASSERT(m->getAtomWithIdx(0)->getNoImplicit());
TEST_ASSERT(m->getAtomWithIdx(0)->getNumExplicitHs() == 1);
TEST_ASSERT(m->getAtomWithIdx(0)->getNumRadicalElectrons() == 1);
delete m;
}
{
// R Groups
fName = rdbase + "rgroups1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
unsigned int idx;
std::string label;
TEST_ASSERT(
m->getAtomWithIdx(3)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(3)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 2);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(3)->getIsotope(), 2));
TEST_ASSERT(
m->getAtomWithIdx(4)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(4)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 1);
TEST_ASSERT(m->getAtomWithIdx(4)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(4)->getIsotope(), 1));
TEST_ASSERT(m->getAtomWithIdx(3)->hasProp(common_properties::dummyLabel));
m->getAtomWithIdx(3)->getProp(common_properties::dummyLabel, label);
TEST_ASSERT(label == "R2");
TEST_ASSERT(m->getAtomWithIdx(3)->getSymbol() == "R2");
TEST_ASSERT(m->getAtomWithIdx(4)->hasProp(common_properties::dummyLabel));
m->getAtomWithIdx(4)->getProp(common_properties::dummyLabel, label);
TEST_ASSERT(label == "R1");
TEST_ASSERT(m->getAtomWithIdx(4)->getSymbol() == "R1");
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(
m->getAtomWithIdx(3)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(3)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 2);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(3)->getIsotope(), 2));
TEST_ASSERT(
m->getAtomWithIdx(4)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(4)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 1);
TEST_ASSERT(m->getAtomWithIdx(4)->getAtomicNum() == 0);
TEST_ASSERT(feq(m->getAtomWithIdx(4)->getIsotope(), 1));
TEST_ASSERT(m->getAtomWithIdx(3)->hasProp(common_properties::dummyLabel));
m->getAtomWithIdx(3)->getProp(common_properties::dummyLabel, label);
TEST_ASSERT(label == "R2");
TEST_ASSERT(m->getAtomWithIdx(3)->getSymbol() == "R2");
TEST_ASSERT(m->getAtomWithIdx(4)->hasProp(common_properties::dummyLabel));
m->getAtomWithIdx(4)->getProp(common_properties::dummyLabel, label);
TEST_ASSERT(label == "R1");
TEST_ASSERT(m->getAtomWithIdx(4)->getSymbol() == "R1");
delete m;
}
{
// automatic cut over to v3k
std::string smiles(1024, 'C');
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 1024);
Conformer *conf = new Conformer(m->getNumAtoms());
m->addConformer(conf, true);
std::string mb = MolToMolBlock(*m);
TEST_ASSERT(mb.find("V2000") == std::string::npos);
TEST_ASSERT(mb.find("V3000") != std::string::npos);
delete m;
}
{
// D in CTAB
fName = rdbase + "D_in_CTAB.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomicNum() == 1);
TEST_ASSERT(m->getAtomWithIdx(2)->getIsotope() == 2);
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomicNum() == 1);
TEST_ASSERT(m->getAtomWithIdx(2)->getIsotope() == 2);
delete m;
}
{
// T in CTAB
fName = rdbase + "T_in_CTAB.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomicNum() == 1);
TEST_ASSERT(m->getAtomWithIdx(2)->getIsotope() == 3);
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomicNum() == 1);
TEST_ASSERT(m->getAtomWithIdx(2)->getIsotope() == 3);
delete m;
}
{
// atom list
fName = rdbase + "list-query.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 6);
std::string sma = MolToSmarts(*m);
TEST_ASSERT(sma == "[#6]1:[#6]:[#6]:[#6]:[#6]:[#6,#7,#15]:1");
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 6);
sma = MolToSmarts(*m);
TEST_ASSERT(sma == "[#6]1:[#6]:[#6]:[#6]:[#6]:[#6,#7,#15]:1");
}
{
// not atom list
fName = rdbase + "not-list-query.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
std::string sma = MolToSmarts(*m);
TEST_ASSERT(sma == "[#6]-[#6](-[#6])=[!#7&!#8]");
std::string mb = MolToMolBlock(*m, true, -1, true, true);
delete m;
m = MolBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
sma = MolToSmarts(*m);
TEST_ASSERT(sma == "[#6]-[#6](-[#6])=[!#7&!#8]");
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue2963522() {
BOOST_LOG(rdInfoLog) << " Testing issue 2963522 " << std::endl;
{
std::string smiles = "CC=CC";
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREONONE);
auto *conf = new Conformer(m->getNumAtoms());
conf->setAtomPos(0, RDGeom::Point3D(-1, 1, 0));
conf->setAtomPos(1, RDGeom::Point3D(0, 1, 0));
conf->setAtomPos(2, RDGeom::Point3D(0, -1, 0));
conf->setAtomPos(3, RDGeom::Point3D(1, -1, 0));
m->addConformer(conf, true);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREOANY);
delete m;
}
{
std::string smiles = "C/C=C\\C";
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREOZ);
auto *conf = new Conformer(m->getNumAtoms());
conf->setAtomPos(0, RDGeom::Point3D(-1, 1, 0));
conf->setAtomPos(1, RDGeom::Point3D(0, 1, 0));
conf->setAtomPos(2, RDGeom::Point3D(0, -1, 0));
conf->setAtomPos(3, RDGeom::Point3D(-1, -1, 0));
m->addConformer(conf, true);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREOZ);
delete m;
}
{
std::string smiles = "C/C=C/C";
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREOE);
auto *conf = new Conformer(m->getNumAtoms());
conf->setAtomPos(0, RDGeom::Point3D(-1, 1, 0));
conf->setAtomPos(1, RDGeom::Point3D(0, 1, 0));
conf->setAtomPos(2, RDGeom::Point3D(0, -1, 0));
conf->setAtomPos(3, RDGeom::Point3D(1, -1, 0));
m->addConformer(conf, true);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREOE);
delete m;
}
{
std::string smiles = "C1C=CC1";
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREONONE);
auto *conf = new Conformer(m->getNumAtoms());
conf->setAtomPos(0, RDGeom::Point3D(-1, 1, 0));
conf->setAtomPos(1, RDGeom::Point3D(0, 1, 0));
conf->setAtomPos(2, RDGeom::Point3D(0, -1, 0));
conf->setAtomPos(3, RDGeom::Point3D(-1, -1, 0));
m->addConformer(conf, true);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREONONE);
delete m;
}
{
// this was issue 3009756:
std::string smiles = "CC(=O)C";
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREONONE);
auto *conf = new Conformer(m->getNumAtoms());
conf->setAtomPos(0, RDGeom::Point3D(-1, 0, 0));
conf->setAtomPos(1, RDGeom::Point3D(0, 0, 0));
conf->setAtomPos(2, RDGeom::Point3D(0, 1, 0));
conf->setAtomPos(3, RDGeom::Point3D(1, 0, 0));
m->addConformer(conf, true);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREONONE);
delete m;
}
{
// this was issue 3009756:
std::string smiles = "CC(=C)Cl";
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREONONE);
auto *conf = new Conformer(m->getNumAtoms());
conf->setAtomPos(0, RDGeom::Point3D(-1, 0, 0));
conf->setAtomPos(1, RDGeom::Point3D(0, 0, 0));
conf->setAtomPos(2, RDGeom::Point3D(0, 1, 0));
conf->setAtomPos(3, RDGeom::Point3D(1, 0, 0));
m->addConformer(conf, true);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(1)->getStereo() == Bond::STEREONONE);
delete m;
}
BOOST_LOG(rdInfoLog) << " done" << std::endl;
}
void testIssue3073163() {
BOOST_LOG(rdInfoLog) << " Testing issue 3073163 " << std::endl;
{
std::string smiles = "C[2H]";
RWMol *m = SmilesToMol(smiles);
TEST_ASSERT(m);
smiles = "[2#1]";
RWMol *p = SmartsToMol(smiles);
TEST_ASSERT(p);
MatchVectType mv;
TEST_ASSERT(SubstructMatch(*m, *p, mv));
std::string mb = MolToMolBlock(*m);
// std::cerr<<"mb:\n"<<mb<<"----\n";
RWMol *m2 = MolBlockToMol(mb);
TEST_ASSERT(m2);
// std::cerr<<" mol: "<<MolToSmiles(*m,true)<<std::endl;
// std::cerr<<" mol2: "<<MolToSmiles(*m2,true)<<std::endl;
TEST_ASSERT(SubstructMatch(*m2, *p, mv));
delete m2;
delete m;
delete p;
}
BOOST_LOG(rdInfoLog) << " done" << std::endl;
}
void testIssue3154208() {
BOOST_LOG(rdInfoLog) << " Testing Issue3154208 (a large mol failure)"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/largemol.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 476);
TEST_ASSERT(m->getNumBonds() == 531);
std::cerr << "generating smiles" << std::endl;
std::string smiles = MolToSmiles(*m, false, false, -1, false);
std::cerr << "smiles: " << smiles << std::endl;
std::cerr << "converting back" << std::endl;
RWMol *m2 = SmilesToMol(smiles);
TEST_ASSERT(m2);
TEST_ASSERT(m2->getNumAtoms() == 476);
TEST_ASSERT(m2->getNumBonds() == 531);
MatchVectType mv;
std::cerr << "check isomorphism" << std::endl;
TEST_ASSERT(SubstructMatch(*m, *m2, mv));
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
#if 1
BOOST_LOG(rdInfoLog) << "Large molecule canonical smiles test" << std::endl;
std::string csmiles = MolToSmiles(*m);
for (unsigned int i = 0; i < 50; ++i) {
if (!(i % 10)) {
BOOST_LOG(rdInfoLog) << "Iteration: " << i + 1 << " of 50" << std::endl;
}
std::string nsmiles = MolToSmiles(*m, false, false, 2 * i, false);
RWMol *nm = SmilesToMol(nsmiles);
TEST_ASSERT(nm);
TEST_ASSERT(nm->getNumAtoms() == 476);
TEST_ASSERT(nm->getNumBonds() == 531);
nsmiles = MolToSmiles(*m);
if (nsmiles != csmiles) {
std::cerr << "MISMATCH:\n" << nsmiles << "\n" << csmiles << "\n";
}
TEST_ASSERT(nsmiles == csmiles);
delete nm;
}
#endif
delete m;
}
BOOST_LOG(rdInfoLog) << " done" << std::endl;
}
void testIssue3228150() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Issue 3228150: round-trip stereochemistry failure"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName;
RWMol *m;
fName = rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3228150.sdf";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(0)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(m->getBondWithIdx(2)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(2)->getStereo() == Bond::STEREOZ);
std::string smi1 = MolToSmiles(*m, true);
BOOST_LOG(rdInfoLog) << " : " << smi1 << std::endl;
m->clearComputedProps();
m->updatePropertyCache();
std::string smi2 = MolToSmiles(*m, true);
BOOST_LOG(rdInfoLog) << " : " << smi2 << std::endl;
TEST_ASSERT(smi1 == smi2);
delete m;
}
{
std::string fName;
RWMol *m;
fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3228150.full.sdf";
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(2)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(2)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(m->getBondWithIdx(4)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(4)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(m->getBondWithIdx(6)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(6)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(m->getBondWithIdx(8)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(8)->getStereo() == Bond::STEREOZ);
std::string smi1 = MolToSmiles(*m, true);
BOOST_LOG(rdInfoLog) << " : " << smi1 << std::endl;
MolOps::assignStereochemistry(*m, true, true);
TEST_ASSERT(m->getBondWithIdx(2)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(2)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(m->getBondWithIdx(4)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(4)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(m->getBondWithIdx(6)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(6)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(m->getBondWithIdx(8)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(8)->getStereo() == Bond::STEREOZ);
std::string smi2 = MolToSmiles(*m, true);
smi2 = MolToSmiles(*m, true);
BOOST_LOG(rdInfoLog) << " : " << smi2 << std::endl;
TEST_ASSERT(smi1 == smi2);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue3313540() {
BOOST_LOG(rdInfoLog) << "testing writing mol file R-groups (issue 3313540)"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/rgroups1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
unsigned int idx;
TEST_ASSERT(
m->getAtomWithIdx(3)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(3)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 2);
TEST_ASSERT(
m->getAtomWithIdx(4)->hasProp(common_properties::_MolFileRLabel));
m->getAtomWithIdx(4)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 1);
std::string mb = MolToMolBlock(*m);
RWMol *m2 = MolBlockToMol(mb);
TEST_ASSERT(m2);
TEST_ASSERT(
m2->getAtomWithIdx(3)->hasProp(common_properties::_MolFileRLabel));
m2->getAtomWithIdx(3)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 2);
TEST_ASSERT(
m2->getAtomWithIdx(4)->hasProp(common_properties::_MolFileRLabel));
m2->getAtomWithIdx(4)->getProp(common_properties::_MolFileRLabel, idx);
TEST_ASSERT(idx == 1);
delete m;
delete m2;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue3359739() {
// basic writing test
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3359739 " << std::endl;
std::string smi = "[C]C";
RWMol *m = SmilesToMol(smi);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(0)->getNumRadicalElectrons() == 3);
std::string molBlock = MolToMolBlock(*m);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
// NOTE: the following is correct according to the current
// state of the code and what the CTAB format supports,
// but it's definitely not chemically correct
TEST_ASSERT(m->getAtomWithIdx(0)->getNumRadicalElectrons() == 1);
delete m;
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3374639() {
// basic writing test
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3374639 " << std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3374639.2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3374639.1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3374639.full.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getAtomWithIdx(16)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(16)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
delete m;
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testThreeCoordinateChirality() {
// basic writing test
BOOST_LOG(rdInfoLog) << " ----------> Test three-coordinate chirality "
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/three_coordinate_chirality.1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
delete m;
}
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/three_coordinate_chirality.2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
delete m;
}
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/three_coordinate_chirality.3.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(!m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
delete m;
}
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/three_coordinate_chirality.4.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(!m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
delete m;
}
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/three_coordinate_chirality.5.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
delete m;
}
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/three_coordinate_chirality.6.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(1)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
delete m;
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3375647() {
// basic writing test
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3375647 " << std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3375647.1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getBondBetweenAtoms(2, 11)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondBetweenAtoms(2, 11)->getBondDir() !=
Bond::EITHERDOUBLE);
TEST_ASSERT(m->getBondBetweenAtoms(2, 11)->getStereo() == Bond::STEREOZ);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3375647.2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getBondBetweenAtoms(2, 11)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondBetweenAtoms(2, 11)->getBondDir() !=
Bond::EITHERDOUBLE);
TEST_ASSERT(m->getBondBetweenAtoms(2, 11)->getStereo() == Bond::STEREOE);
delete m;
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3375684() {
// basic writing test
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3375684 " << std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3375684.1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getBondBetweenAtoms(6, 7)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondBetweenAtoms(6, 7)->getBondDir() ==
Bond::EITHERDOUBLE);
delete m;
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3375684.2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m->getBondBetweenAtoms(3, 9)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondBetweenAtoms(3, 9)->getBondDir() !=
Bond::EITHERDOUBLE);
TEST_ASSERT(m->getBondBetweenAtoms(3, 9)->getStereo() == Bond::STEREOE);
delete m;
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testChiralPhosphorous() {
// basic writing test
BOOST_LOG(rdInfoLog) << " ----------> Test handling of chiral phosphorous "
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/chiral_phosphorous.1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(5)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(5)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "R");
delete m;
}
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/chiral_phosphorous.2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(5)->hasProp(common_properties::_CIPCode));
std::string cip;
m->getAtomWithIdx(5)->getProp(common_properties::_CIPCode, cip);
TEST_ASSERT(cip == "S");
delete m;
}
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/chiral_phosphorous.3.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(!m->getAtomWithIdx(5)->hasProp(common_properties::_CIPCode));
delete m;
}
{
std::string fName =
rdbase +
"/Code/GraphMol/FileParsers/test_data/chiral_phosphorous.4.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(!m->getAtomWithIdx(5)->hasProp(common_properties::_CIPCode));
delete m;
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3392107() {
// basic writing test
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3392107 " << std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3392107.1.mol";
RWMol *m = MolFileToMol(fName);
std::string smi;
MatchVectType mv;
RWMol *m2;
smi = "C1CCCCC1";
m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 6);
delete m2;
smi = "C1CCCCN1";
m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 6);
delete m2;
smi = "C1CCNCN1";
m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
TEST_ASSERT(SubstructMatch(*m2, *m, mv));
TEST_ASSERT(mv.size() == 6);
delete m2;
smi = "C1NCNCN1";
m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
TEST_ASSERT(!SubstructMatch(*m2, *m, mv));
delete m2;
delete m;
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3432136() {
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3432136 " << std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3432136_1.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(!m);
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3432136_1.v3k.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(!m);
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3432136_2.v3k.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3432136_2.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3477283() {
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3477283 " << std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3477283.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3484552() {
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3484552 " << std::endl;
{
std::string smi = "C[13CH3]";
RWMol *m = SmilesToMol(smi);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(1)->getMass() > 12.999);
std::string molBlock = MolToMolBlock(*m);
TEST_ASSERT(molBlock.find("M ISO") != std::string::npos);
delete m;
m = MolBlockToMol(molBlock);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(1)->getMass() > 12.999);
delete m;
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3514824() {
BOOST_LOG(rdInfoLog) << " ----------> Test issue 3514824 " << std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3514824.2.mol";
RWMol *m = MolFileToMol(fName, false);
TEST_ASSERT(m);
m->updatePropertyCache();
MolOps::findSSSR(*m);
TEST_ASSERT(m->getRingInfo());
TEST_ASSERT(m->getRingInfo()->isInitialized());
TEST_ASSERT(m->getRingInfo()->numRings() == 6);
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3514824.mol";
RWMol *m = MolFileToMol(fName, false);
TEST_ASSERT(m);
m->updatePropertyCache();
MolOps::findSSSR(*m);
TEST_ASSERT(m->getRingInfo());
TEST_ASSERT(m->getRingInfo()->isInitialized());
TEST_ASSERT(m->getRingInfo()->numRings() == 8);
}
BOOST_LOG(rdInfoLog) << " Finished <---------- " << std::endl;
}
void testIssue3525799() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Issue 3525799: bad smiles for r groups" << std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue3525799.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
std::string smiles = MolToSmiles(*m, true);
std::cerr << "smiles: " << smiles << std::endl;
TEST_ASSERT(smiles ==
"[1*]c1c([2*])c([3*])c([4*])c(-c2c([9*])oc3c([8*])c([7*])c([6*]"
")c([5*])c3c2=O)c1[10*]");
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue3557675() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Bad issue 3557676: handling of D and T in CTABs"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/D_in_CTAB.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomicNum() == 1);
TEST_ASSERT(m->getAtomWithIdx(2)->getIsotope() == 2);
}
{
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/T_in_CTAB.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomicNum() == 1);
TEST_ASSERT(m->getAtomWithIdx(2)->getIsotope() == 3);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testSkipLines() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing skip lines in CTABs" << std::endl;
std::string rdbase = getenv("RDBASE");
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/SkipLines.sdf";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 1);
delete m;
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testIssue269() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Bad issue 269: handling of bad atom symbols in CTABs"
<< std::endl;
std::string rdbase = getenv("RDBASE");
{
// since the new elements were added, the original version of this no longer
// fails. The test input file has been updated to still have an atomic
// symbol that is not recognized. We'll be ok until Mv is an element. :-)
std::string fName =
rdbase + "/Code/GraphMol/FileParsers/test_data/Issue269.mol";
RWMol *m = nullptr;
try {
m = MolFileToMol(fName);
} catch (...) {
}
TEST_ASSERT(!m);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileChiralFlag() {
BOOST_LOG(rdInfoLog) << "testing handling of chiral flags" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
// SF.Net Issue1603923: problems with multiple chg lines
{
RWMol *m1;
std::string fName;
fName = rdbase + "chiral_flag.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->hasProp(common_properties::_MolFileChiralFlag));
unsigned int cflag;
m1->getProp(common_properties::_MolFileChiralFlag, cflag);
TEST_ASSERT(cflag == 1);
delete m1;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileTotalValence() {
BOOST_LOG(rdInfoLog) << "testing handling of mol file valence flags"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
RWMol *m1;
std::string fName;
fName = rdbase + "Na.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getNumAtoms() == 1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNoImplicit());
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumExplicitHs() == 0);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumRadicalElectrons() == 1);
delete m1;
}
{
RWMol *m1;
std::string fName;
fName = rdbase + "CH.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getNumAtoms() == 1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNoImplicit());
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumExplicitHs() == 1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumRadicalElectrons() == 1);
delete m1;
}
{
RWMol *m1;
std::string fName;
fName = rdbase + "CH2.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getNumAtoms() == 1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNoImplicit());
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumExplicitHs() == 2);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumRadicalElectrons() == 2);
delete m1;
}
{
RWMol *m1;
std::string fName;
fName = rdbase + "CH3.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getNumAtoms() == 1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNoImplicit());
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumExplicitHs() == 3);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumRadicalElectrons() == 1);
delete m1;
}
{
// make sure we get it for v3k mol blocks too:
RWMol *m1;
std::string fName;
fName = rdbase + "CH.v3k.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getNumAtoms() == 1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNoImplicit());
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumExplicitHs() == 1);
TEST_ASSERT(m1->getAtomWithIdx(0)->getNumRadicalElectrons() == 1);
delete m1;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub88() {
BOOST_LOG(rdInfoLog)
<< "testing github issue 88: M END not being read from V3K ctabs"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "github88.v3k.mol";
bool ok = false;
try {
MolFileToMol(fName);
} catch (FileParseException &e) {
ok = true;
}
TEST_ASSERT(ok);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub82() {
BOOST_LOG(rdInfoLog) << "testing github issue 82: stereochemistry only "
"perceived if sanitization is done"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "github82.1.mol";
ROMol *m;
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(2)->getChiralTag() != Atom::CHI_UNSPECIFIED);
TEST_ASSERT(m->getAtomWithIdx(3)->getChiralTag() != Atom::CHI_UNSPECIFIED);
TEST_ASSERT(m->getAtomWithIdx(4)->getChiralTag() == Atom::CHI_UNSPECIFIED);
delete m;
m = MolFileToMol(fName, true, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(2)->getChiralTag() != Atom::CHI_UNSPECIFIED);
TEST_ASSERT(m->getAtomWithIdx(3)->getChiralTag() != Atom::CHI_UNSPECIFIED);
TEST_ASSERT(m->getAtomWithIdx(4)->getChiralTag() == Atom::CHI_UNSPECIFIED);
delete m;
m = MolFileToMol(fName, false, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(2)->getChiralTag() != Atom::CHI_UNSPECIFIED);
TEST_ASSERT(m->getAtomWithIdx(3)->getChiralTag() != Atom::CHI_UNSPECIFIED);
TEST_ASSERT(m->getAtomWithIdx(4)->getChiralTag() == Atom::CHI_UNSPECIFIED);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileWithHs() {
BOOST_LOG(rdInfoLog) << "testing impact of Hs in mol files on stereochemistry"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "chiral_3h.mol";
ROMol *m;
m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(3)->getChiralTag() != Atom::CHI_UNSPECIFIED);
delete m;
m = MolFileToMol(fName, true, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(3)->getChiralTag() != Atom::CHI_UNSPECIFIED);
delete m;
m = MolFileToMol(fName, false, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(3)->getChiralTag() != Atom::CHI_UNSPECIFIED);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileWithRxn() {
BOOST_LOG(rdInfoLog) << "testing reading reactions in mol files" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "rxn1.mol";
ROMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 18);
TEST_ASSERT(m->getNumBonds() == 16);
TEST_ASSERT(m->getAtomWithIdx(0)->hasProp(common_properties::molRxnRole));
TEST_ASSERT(
m->getAtomWithIdx(0)->getProp<int>(common_properties::molRxnRole) == 1);
TEST_ASSERT(
m->getAtomWithIdx(0)->hasProp(common_properties::molRxnComponent));
TEST_ASSERT(m->getAtomWithIdx(0)->getProp<int>(
common_properties::molRxnComponent) == 1);
TEST_ASSERT(m->getAtomWithIdx(17)->hasProp(common_properties::molRxnRole));
TEST_ASSERT(m->getAtomWithIdx(17)->getProp<int>(
common_properties::molRxnRole) == 2);
TEST_ASSERT(
m->getAtomWithIdx(17)->hasProp(common_properties::molRxnComponent));
TEST_ASSERT(m->getAtomWithIdx(17)->getProp<int>(
common_properties::molRxnComponent) == 3);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testPDBFile() {
BOOST_LOG(rdInfoLog) << "testing reading pdb files" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "1CRN.pdb";
ROMol *m = PDBFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 327);
TEST_ASSERT(m->getNumBonds() == 337);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo());
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getResidueNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(9)->getMonomerInfo())
->getSerialNumber() == 10);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(9)->getMonomerInfo())
->getResidueNumber() == 2);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getName() == " N ");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getResidueName() == "THR");
TEST_ASSERT(feq(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getTempFactor(),
13.79));
TEST_ASSERT(m->getNumConformers() == 1);
TEST_ASSERT(feq(m->getConformer().getAtomPos(0).x, 17.047));
TEST_ASSERT(feq(m->getConformer().getAtomPos(0).y, 14.099));
TEST_ASSERT(feq(m->getConformer().getAtomPos(0).z, 3.625));
std::string mb = MolToPDBBlock(*m);
delete m;
m = PDBBlockToMol(mb);
TEST_ASSERT(m->getNumAtoms() == 327);
TEST_ASSERT(m->getNumBonds() == 337);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo());
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getResidueNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(9)->getMonomerInfo())
->getSerialNumber() == 10);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(9)->getMonomerInfo())
->getResidueNumber() == 2);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getName() == " N ");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getResidueName() == "THR");
TEST_ASSERT(feq(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getTempFactor(),
13.79));
TEST_ASSERT(m->getNumConformers() == 1);
TEST_ASSERT(feq(m->getConformer().getAtomPos(0).x, 17.047));
TEST_ASSERT(feq(m->getConformer().getAtomPos(0).y, 14.099));
TEST_ASSERT(feq(m->getConformer().getAtomPos(0).z, 3.625));
// test adding hydrogens
ROMol *nm = MolOps::addHs(*m, false, false, NULL, true);
AtomPDBResidueInfo *info =
(AtomPDBResidueInfo *)(nm->getAtomWithIdx(nm->getNumAtoms() - 1)
->getMonomerInfo());
TEST_ASSERT(info->getMonomerType() == AtomMonomerInfo::PDBRESIDUE);
TEST_ASSERT(info->getName() == " H7 ");
TEST_ASSERT(info->getResidueName() == "ASN");
}
{
std::string fName;
fName = rdbase + "2FVD.pdb";
ROMol *m = PDBFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 2501);
TEST_ASSERT(m->getNumBonds() == 2383);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo());
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getResidueNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getIsHeteroAtom() == 0);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getSerialNumber() == 2294);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getResidueNumber() == 299);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getIsHeteroAtom() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getChainId() == "A");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getName() == " CA ");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getResidueName() == "MET");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getName() == " N1 ");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getResidueName() == "LIA");
std::string mb = MolToPDBBlock(*m, -1, 32);
delete m;
m = PDBBlockToMol(mb);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 2501);
TEST_ASSERT(m->getNumBonds() == 2383);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo());
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getResidueNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getIsHeteroAtom() == 0);
// FIX:
// TEST_ASSERT(static_cast<AtomPDBResidueInfo
// *>(m->getAtomWithIdx(2292)->getMonomerInfo())->getSerialNumber()==2294);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getResidueNumber() == 299);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getIsHeteroAtom() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getChainId() == "A");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getName() == " CA ");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getResidueName() == "MET");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getName() == " N1 ");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2292)->getMonomerInfo())
->getResidueName() == "LIA");
}
{ // DNA
std::string fName;
fName = rdbase + "4BNA.pdb";
ROMol *m = PDBFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumHeavyAtoms() == 602);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo());
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getResidueName() == " DC");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(57)->getMonomerInfo())
->getResidueName() == " DG");
std::string mb = MolToPDBBlock(*m);
delete m;
m = PDBBlockToMol(mb);
TEST_ASSERT(m->getNumHeavyAtoms() == 602);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo());
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getResidueName() == " DC");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(57)->getMonomerInfo())
->getResidueName() == " DG");
delete m;
}
{ // RNA
std::string fName;
fName = rdbase + "4TNA.pdb";
ROMol *m = PDBFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumHeavyAtoms() == 1656);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo());
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getResidueName() == " G");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(90)->getMonomerInfo())
->getResidueName() == " A");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(197)->getMonomerInfo())
->getResidueName() == "2MG");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(197)->getMonomerInfo())
->getIsHeteroAtom());
std::string mb = MolToPDBBlock(*m);
delete m;
m = PDBBlockToMol(mb);
TEST_ASSERT(m->getNumHeavyAtoms() == 1656);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getResidueName() == " G");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(90)->getMonomerInfo())
->getResidueName() == " A");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(197)->getMonomerInfo())
->getResidueName() == "2MG");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(197)->getMonomerInfo())
->getIsHeteroAtom());
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testSequences() {
BOOST_LOG(rdInfoLog) << "testing reading sequences" << std::endl;
{
std::string seq = "CGCGAATTACCGCG"; // made up
int flavor = 6; // DNA
ROMol *m = SequenceToMol(seq, true, flavor);
TEST_ASSERT(m);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getResidueName() == " DC");
seq = MolToSequence(*m);
TEST_ASSERT(seq == "CGCGAATTACCGCG");
seq = MolToHELM(*m);
// std::cerr << seq << std::endl;
TEST_ASSERT(seq ==
"RNA1{[dR](C)P.[dR](G)P.[dR](C)P.[dR](G)P.[dR](A)P.[dR](A)P.["
"dR](T)P.[dR](T)P.[dR](A)P.[dR](C)P.[dR](C)P.[dR](G)P.[dR](C)P."
"[dR](G)}$$$$");
{
std::string lseq = MolToHELM(*m);
TEST_ASSERT(lseq == seq);
ROMol *m2 = HELMToMol(seq);
TEST_ASSERT(m2)
lseq = MolToSequence(*m2);
TEST_ASSERT(lseq == "CGCGAATTACCGCG");
lseq = MolToHELM(*m2);
TEST_ASSERT(lseq == seq);
delete m2;
}
{
ROMol *nm = MolOps::addHs(*m);
TEST_ASSERT(nm);
std::string pdb = MolToPDBBlock(*nm);
delete nm;
nm = PDBBlockToMol(pdb);
TEST_ASSERT(nm);
std::string lseq = MolToSequence(*nm);
TEST_ASSERT(lseq == "CGCGAATTACCGCG");
delete nm;
}
delete m;
}
{
std::string seq = "CGCGAAUUACCGCG"; // made up
int flavor = 2; // RNA
ROMol *m = SequenceToMol(seq, true, flavor);
TEST_ASSERT(m);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo())
->getSerialNumber() == 1);
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())
->getResidueName() == " C");
seq = MolToSequence(*m);
TEST_ASSERT(seq == "CGCGAAUUACCGCG");
seq = MolToHELM(*m);
TEST_ASSERT(seq ==
"RNA1{R(C)P.R(G)P.R(C)P.R(G)P.R(A)P.R(A)P.R(U)P.R(U)P.R(A)P.R("
"C)P.R(C)P.R(G)P.R(C)P.R(G)}$$$$");
{
std::string lseq = MolToHELM(*m);
TEST_ASSERT(lseq == seq);
ROMol *m2 = HELMToMol(seq);
TEST_ASSERT(m2)
lseq = MolToSequence(*m2);
TEST_ASSERT(lseq == "CGCGAAUUACCGCG");
lseq = MolToHELM(*m2);
TEST_ASSERT(lseq == seq);
delete m2;
}
{
ROMol *nm = MolOps::addHs(*m);
TEST_ASSERT(nm);
std::string pdb = MolToPDBBlock(*nm);
delete nm;
nm = PDBBlockToMol(pdb);
TEST_ASSERT(nm);
std::string lseq = MolToSequence(*nm);
TEST_ASSERT(lseq == "CGCGAAUUACCGCG");
delete nm;
}
delete m;
}
}
void testGithub1023() {
BOOST_LOG(rdInfoLog) << "GetSSSR interrupted by segmentation fault"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "github1023.pdb";
bool sanitize = false;
ROMol *m = PDBFileToMol(fName, sanitize);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 119);
TEST_ASSERT(m->getNumBonds() == 399);
MolOps::findSSSR(*m); // this was seg faulting
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub166() {
BOOST_LOG(rdInfoLog)
<< "testing Github 166: skipping sanitization on reading pdb files"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "1CRN.pdb";
ROMol *m = PDBFileToMol(fName, false, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 327);
TEST_ASSERT(m->getNumBonds() == 337);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testZBO() {
BOOST_LOG(rdInfoLog) << "testing ZBO parsing" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "FeCO5.mol";
ROMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 11);
TEST_ASSERT(m->getNumBonds() == 10);
TEST_ASSERT(m->getBondWithIdx(0)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(2)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(6)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(7)->getBondType() == Bond::ZERO);
}
{
std::string fName;
fName = rdbase + "CrBz.mol";
ROMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 7);
TEST_ASSERT(m->getNumBonds() == 12);
TEST_ASSERT(m->getBondWithIdx(6)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(7)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(8)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(9)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(10)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(11)->getBondType() == Bond::ZERO);
// make sure we don't screw up aromaticity:
TEST_ASSERT(m->getBondWithIdx(0)->getIsAromatic());
}
{
std::string fName;
fName = rdbase + "CrBz2.mol";
ROMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 13);
TEST_ASSERT(m->getNumBonds() == 24);
TEST_ASSERT(m->getBondWithIdx(6)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(7)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(8)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(9)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(10)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(11)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(18)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(19)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(20)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(21)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(22)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getBondWithIdx(23)->getBondType() == Bond::ZERO);
}
{
std::string fName;
fName = rdbase + "H3BNH3.mol";
ROMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 2);
TEST_ASSERT(m->getNumBonds() == 1);
TEST_ASSERT(m->getBondWithIdx(0)->getBondType() == Bond::ZERO);
TEST_ASSERT(m->getAtomWithIdx(0)->getFormalCharge() == 0);
TEST_ASSERT(m->getAtomWithIdx(1)->getFormalCharge() == 0);
TEST_ASSERT(m->getAtomWithIdx(0)->getNumExplicitHs() == 3);
TEST_ASSERT(m->getAtomWithIdx(1)->getNumExplicitHs() == 0);
TEST_ASSERT(m->getAtomWithIdx(0)->getTotalNumHs() == 3);
TEST_ASSERT(m->getAtomWithIdx(1)->getTotalNumHs() == 3);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub164() {
BOOST_LOG(rdInfoLog) << "testing Github 164: problems with Xe from mol files"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "Github164.mol";
ROMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3);
TEST_ASSERT(m->getNumBonds() == 2);
TEST_ASSERT(m->getAtomWithIdx(0)->getExplicitValence() == 2);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub194() {
BOOST_LOG(rdInfoLog) << "testing github issue 194: bad bond types from pdb"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "1CRN.pdb";
ROMol *m = PDBFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 327);
TEST_ASSERT(m->getNumBonds() == 337);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo());
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
// the root cause: problems in SamePDBResidue:
TEST_ASSERT(SamePDBResidue(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo()),
static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(1)->getMonomerInfo())));
TEST_ASSERT(SamePDBResidue(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo()),
static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2)->getMonomerInfo())));
TEST_ASSERT(!SamePDBResidue(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(0)->getMonomerInfo()),
static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(9)->getMonomerInfo())));
// the symptom, bond orders:
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(2)->getMonomerInfo())
->getName() == " C ");
TEST_ASSERT(static_cast<AtomPDBResidueInfo *>(
m->getAtomWithIdx(3)->getMonomerInfo())
->getName() == " O ");
TEST_ASSERT(m->getBondBetweenAtoms(2, 3));
TEST_ASSERT(m->getBondBetweenAtoms(2, 3)->getBondType() == Bond::DOUBLE);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub196() {
BOOST_LOG(rdInfoLog)
<< "testing github issue 196: left justitified bond topology"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "github196.mol";
ROMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 19);
TEST_ASSERT(m->getNumBonds() == 20);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub191() {
BOOST_LOG(rdInfoLog) << "-----------------------\n Testing github issue 191: "
"wavy bonds to Hs should affect attached double bond "
"stereochemistry."
<< std::endl;
{
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *m = MolFileToMol(pathName + "github191.1.mol");
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(0)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREOE);
std::string smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "C/C=C/C");
delete m;
}
{
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *m = MolFileToMol(pathName + "github191.2.mol");
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(0)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREOANY);
std::string smi = MolToSmiles(*m, true);
TEST_ASSERT(smi == "CC=CC");
delete m;
}
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testGithub210() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing Github 210: flag possible stereocenters "
"when calling assignStereochemistry()"
<< std::endl;
{
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *m = MolFileToMol(pathName + "github210.mol");
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
TEST_ASSERT(
m->getAtomWithIdx(4)->hasProp(common_properties::_ChiralityPossible));
delete m;
}
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
namespace {
std::string getResidue(const ROMol &, const Atom *at) {
if (at->getMonomerInfo()->getMonomerType() != AtomMonomerInfo::PDBRESIDUE)
return "";
return static_cast<const AtomPDBResidueInfo *>(at->getMonomerInfo())
->getResidueName();
}
} // namespace
void testPDBResidues() {
BOOST_LOG(rdInfoLog) << "testing splitting on PDB residues" << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName;
fName = rdbase + "2NW4.pdb";
ROMol *m = PDBFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
std::map<std::string, boost::shared_ptr<ROMol>> res =
MolOps::getMolFragsWithQuery(*m, getResidue, false);
TEST_ASSERT(res.size() == 22);
TEST_ASSERT(res.find(std::string("8NH")) != res.end());
TEST_ASSERT(res.find(std::string("ALA")) != res.end());
TEST_ASSERT(res[std::string("8NH")]->getNumAtoms() == 21);
const ROMol *lig = res[std::string("8NH")].get();
TEST_ASSERT(lig->getNumConformers() == 1);
TEST_ASSERT(feq(lig->getConformer().getAtomPos(0).x, 23.517));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(0).y, 5.263));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(0).z, 4.399));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(11).x, 27.589));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(11).y, -0.311));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(11).z, 3.743));
}
{
std::string fName;
fName = rdbase + "2NW4.pdb";
ROMol *m = PDBFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
std::vector<std::string> keep;
keep.push_back("8NH");
std::map<std::string, boost::shared_ptr<ROMol>> res =
MolOps::getMolFragsWithQuery(*m, getResidue, false, &keep);
TEST_ASSERT(res.size() == 1);
TEST_ASSERT(res.find(std::string("8NH")) != res.end());
TEST_ASSERT(res[std::string("8NH")]->getNumAtoms() == 21);
const ROMol *lig = res[std::string("8NH")].get();
TEST_ASSERT(lig->getNumConformers() == 1);
TEST_ASSERT(feq(lig->getConformer().getAtomPos(0).x, 23.517));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(0).y, 5.263));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(0).z, 4.399));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(11).x, 27.589));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(11).y, -0.311));
TEST_ASSERT(feq(lig->getConformer().getAtomPos(11).z, 3.743));
}
{
std::string fName;
fName = rdbase + "2NW4.pdb";
ROMol *m = PDBFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getAtomWithIdx(0)->getMonomerInfo()->getMonomerType() ==
AtomMonomerInfo::PDBRESIDUE);
std::vector<std::string> keep;
keep.push_back("8NH");
std::map<std::string, boost::shared_ptr<ROMol>> res =
MolOps::getMolFragsWithQuery(*m, getResidue, false, &keep, true);
TEST_ASSERT(res.size() == 21);
TEST_ASSERT(res.find(std::string("8NH")) == res.end());
TEST_ASSERT(res.find(std::string("ALA")) != res.end());
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub337() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing Github 337: No double bond stereo "
"perception from CTABs when sanitization is turned "
"off"
<< std::endl;
{
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *m = MolFileToMol(pathName + "unsanitized_stereo.mol", false);
TEST_ASSERT(m);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREONONE);
std::string molBlock = MolToMolBlock(*m);
TEST_ASSERT(molBlock.find(" 1 2 2 0") != std::string::npos);
delete m;
}
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testGithub360() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing Github 360: Computed props on non-sanitized "
"molecule interfering with substructure matching"
<< std::endl;
{
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *dbm = SmilesToMol("C1Cc2ccccc2CN1");
TEST_ASSERT(dbm);
RWMol *tmpl = MolFileToMol(pathName + "github360.mol", false);
TEST_ASSERT(tmpl);
MatchVectType mv;
TEST_ASSERT(SubstructMatch(*dbm, *tmpl, mv));
delete dbm;
delete tmpl;
}
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testGithub741() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing Github 741: Support CTABs where the second "
"letter in atom symbols is capitalized"
<< std::endl;
{
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *mol = MolFileToMol(pathName + "github741.mol");
TEST_ASSERT(mol);
TEST_ASSERT(mol->getAtomWithIdx(1)->getSymbol() == "Br");
TEST_ASSERT(mol->getAtomWithIdx(2)->getSymbol() == "Br");
delete mol;
}
{
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *mol = MolFileToMol(pathName + "github741.v3k.mol");
TEST_ASSERT(mol);
TEST_ASSERT(mol->getAtomWithIdx(1)->getSymbol() == "Br");
TEST_ASSERT(mol->getAtomWithIdx(2)->getSymbol() == "Br");
delete mol;
}
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testGithub188() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing Github 188: Bad E/Z assignment from CTAB"
<< std::endl;
{
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *mol = MolFileToMol(pathName + "github188.mol");
TEST_ASSERT(mol);
TEST_ASSERT(mol->getBondBetweenAtoms(16, 17));
TEST_ASSERT(mol->getBondBetweenAtoms(16, 17)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(mol->getBondBetweenAtoms(13, 14));
TEST_ASSERT(mol->getBondBetweenAtoms(13, 14)->getStereo() == Bond::STEREOZ);
delete mol;
}
{
std::cerr << "----------------------------------" << std::endl;
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *mol = MolFileToMol(pathName + "github188_2.mol");
TEST_ASSERT(mol);
TEST_ASSERT(mol->getBondBetweenAtoms(16, 17));
TEST_ASSERT(mol->getBondBetweenAtoms(16, 17)->getStereo() == Bond::STEREOZ);
TEST_ASSERT(mol->getBondBetweenAtoms(13, 14));
TEST_ASSERT(mol->getBondBetweenAtoms(13, 14)->getStereo() == Bond::STEREOZ);
delete mol;
}
{
std::cerr << "----------------------------------" << std::endl;
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *mol = MolFileToMol(pathName + "github192.mol");
TEST_ASSERT(mol);
for (unsigned int i = 0; i < mol->getNumBonds(); ++i) {
const Bond *bnd = mol->getBondWithIdx(i);
if (bnd->getBondType() == Bond::DOUBLE &&
i != mol->getBondBetweenAtoms(26, 27)->getIdx() &&
i != mol->getBondBetweenAtoms(6, 8)->getIdx()) {
TEST_ASSERT(bnd->getStereo() == Bond::STEREOE);
}
}
delete mol;
}
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testRCSBSdf() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing right-aligned elements in RCSB SDF files"
<< std::endl;
std::string pathName = getenv("RDBASE");
pathName += "/Code/GraphMol/FileParsers/test_data/";
RWMol *mol = MolFileToMol(pathName + "s58_rcsb.mol");
TEST_ASSERT(mol);
delete mol;
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testParseCHG() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing PDB charge parsing" << std::endl;
// BAD PDB Ligand with CHG line too long (>8) and right and mid-justified
// symbols
const std::string molblock_chg =
"2D1G_DVT_B_2001\n"
" RCSB PDB01151500373D\n"
"Coordinates from PDB:2D1G:B:2001 Model:1 without hydrogens\n"
" 38 60 0 0 0 0 999 V2000\n"
" 19.0320 93.5880 16.2640 O 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 19.6400 94.8240 15.4350 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 18.1700 95.2990 14.2790 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 19.6000 96.2720 16.5920 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 20.3140 97.8870 15.9210 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 20.1880 98.9370 17.1150 O 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 21.5140 94.4720 15.5850 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 22.5590 95.9150 15.0470 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 20.6050 96.4040 14.1390 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 22.1700 97.4330 15.9940 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 23.1150 97.3160 13.7160 O 0 3 0 0 0 0 0 0 0 0 0 0\n"
" 23.9960 95.5550 15.6610 O 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 22.6470 94.8660 13.3320 O 0 3 0 0 0 0 0 0 0 0 0 0\n"
" 23.7710 96.1210 12.1910 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 24.0950 97.6940 11.3470 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 25.2350 95.8110 12.7760 O 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 23.5040 94.7850 10.9890 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 22.0160 95.1170 9.8340 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 20.1580 95.5900 9.6670 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 22.2300 94.0400 8.6680 O 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 22.6710 96.7240 9.1340 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 20.8370 94.9790 12.6510 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 21.1160 93.9980 11.2840 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 20.1410 93.8410 13.6860 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 21.6740 96.5770 11.6070 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 22.5990 98.1610 10.2810 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 23.1590 99.4050 9.4060 O 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 22.1350 99.1410 12.0500 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 21.4440 98.0120 13.0890 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 21.1330 99.0140 14.4480 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 19.1670 95.7180 12.0250 O 0 3 0 0 0 0 0 0 0 0 0 0\n"
" 19.7240 97.0880 10.6480 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 20.7360 98.5460 10.1630 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 18.2530 97.4600 10.0390 O 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 19.6430 98.1340 12.3760 O 0 3 0 0 0 0 0 0 0 0 0 0\n"
" 18.5370 96.9390 13.5530 V 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 18.8160 98.2640 14.7770 O 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 17.0530 97.2880 13.0100 O 0 5 0 0 0 0 0 0 0 0 0 0\n"
" 1 2 1 0 0 0 0\n"
" 2 3 1 0 0 0 0\n"
" 2 4 1 0 0 0 0\n"
" 2 7 1 0 0 0 0\n"
" 2 9 1 0 0 0 0\n"
" 2 24 1 0 0 0 0\n"
" 3 36 1 0 0 0 0\n"
" 4 5 1 0 0 0 0\n"
" 5 6 1 0 0 0 0\n"
" 5 9 1 0 0 0 0\n"
" 5 10 1 0 0 0 0\n"
" 5 30 1 0 0 0 0\n"
" 5 37 1 0 0 0 0\n"
" 7 8 1 0 0 0 0\n"
" 8 9 1 0 0 0 0\n"
" 8 10 1 0 0 0 0\n"
" 8 11 1 0 0 0 0\n"
" 8 12 1 0 0 0 0\n"
" 8 13 1 0 0 0 0\n"
" 9 22 1 0 0 0 0\n"
" 9 29 1 0 0 0 0\n"
" 9 36 1 0 0 0 0\n"
" 11 14 1 0 0 0 0\n"
" 11 29 1 0 0 0 0\n"
" 13 14 1 0 0 0 0\n"
" 13 22 1 0 0 0 0\n"
" 14 15 1 0 0 0 0\n"
" 14 16 1 0 0 0 0\n"
" 14 17 1 0 0 0 0\n"
" 14 28 1 0 0 0 0\n"
" 15 26 1 0 0 0 0\n"
" 17 18 1 0 0 0 0\n"
" 18 19 1 0 0 0 0\n"
" 18 20 1 0 0 0 0\n"
" 18 21 1 0 0 0 0\n"
" 18 23 1 0 0 0 0\n"
" 18 28 1 0 0 0 0\n"
" 19 32 1 0 0 0 0\n"
" 21 26 1 0 0 0 0\n"
" 22 23 1 0 0 0 0\n"
" 22 24 1 0 0 0 0\n"
" 22 28 1 0 0 0 0\n"
" 22 31 1 0 0 0 0\n"
" 25 26 1 0 0 0 0\n"
" 25 29 1 0 0 0 0\n"
" 26 27 1 0 0 0 0\n"
" 26 28 1 0 0 0 0\n"
" 26 33 1 0 0 0 0\n"
" 28 29 1 0 0 0 0\n"
" 28 32 1 0 0 0 0\n"
" 29 30 1 0 0 0 0\n"
" 29 35 1 0 0 0 0\n"
" 31 32 1 0 0 0 0\n"
" 31 36 1 0 0 0 0\n"
" 32 33 1 0 0 0 0\n"
" 32 34 1 0 0 0 0\n"
" 32 35 1 0 0 0 0\n"
" 35 36 1 0 0 0 0\n"
" 36 37 1 0 0 0 0\n"
" 36 38 1 0 0 0 0\n"
"M CHG 24 1 -1 2 -1 5 -1 6 -1 8 -1 9 4 11 1 12 "
"-1 13 1 14 -1 16 -1 18 -1 20 -1 22 -1 26 -1 27 -1 28 "
" 4 29 -1 31 1 32 -1 34 -1 35 1 36 -1 38 -1\n"
"M END\n";
const int charges[] = {1, -1, 2, -1, 5, -1, 6, -1, 8, -1, 9, 4, 11,
1, 12, -1, 13, 1, 14, -1, 16, -1, 18, -1, 20, -1,
22, -1, 26, -1, 27, -1, 28, 4, 29, -1, 31, 1, 32,
-1, 34, -1, 35, 1, 36, -1, 38, -1, 0, 0};
// Shouldn't seg fault, throw exception or have a null mol
RWMol *m = MolBlockToMol(molblock_chg);
size_t i = 0;
while (1) {
if (charges[i] == 0) break;
TEST_ASSERT(
m->getAtomWithIdx((unsigned int)charges[i] - 1)->getFormalCharge() ==
charges[i + 1]);
i += 2;
}
TEST_ASSERT(m);
std::string out = MolToMolBlock(*m);
const std::string sub = "M CHG";
std::vector<size_t> positions;
size_t pos = out.find(sub, 0);
while (pos != std::string::npos) {
positions.push_back(pos);
size_t num_entries =
strtol(out.substr(pos + sub.size(), 3).c_str(), nullptr, 10);
TEST_ASSERT(num_entries == 8);
pos = out.find(sub, pos + 1);
}
TEST_ASSERT(positions.size() == 3); // 24/3 == 8
delete m;
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testMDLAtomProps() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing MDL atom properties" << std::endl;
std::string smi = "CC";
ROMOL_SPTR mol(SmilesToMol(smi, false, false));
setAtomAlias(mol->getAtomWithIdx(0), "foo");
setAtomValue(mol->getAtomWithIdx(0), "bar");
setAtomRLabel(mol->getAtomWithIdx(0), 1);
mol.reset(MolBlockToMol(MolToMolBlock(*mol.get())));
TEST_ASSERT(getAtomAlias(mol->getAtomWithIdx(0)) == "foo");
TEST_ASSERT(getAtomValue(mol->getAtomWithIdx(0)) == "bar");
TEST_ASSERT(getAtomRLabel(mol->getAtomWithIdx(0)) == 1);
try {
setAtomRLabel(mol->getAtomWithIdx(0), 100);
TEST_ASSERT(0);
} catch (...) {
}
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testSupplementalSmilesLabel() {
BOOST_LOG(rdInfoLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << "Testing supplemental SMILES labels" << std::endl;
std::string smi = "C";
ROMOL_SPTR mol(SmilesToMol(smi, false, false));
setSupplementalSmilesLabel(mol->getAtomWithIdx(0), "xxx");
smi = MolToSmiles(*mol.get());
TEST_ASSERT(smi == "Cxxx");
TEST_ASSERT(getSupplementalSmilesLabel(mol->getAtomWithIdx(0)) == "xxx");
BOOST_LOG(rdInfoLog) << "Finished" << std::endl;
}
void testGithub1034() {
BOOST_LOG(rdInfoLog)
<< "Test github 1034: Squiggle bonds from CTABs lost post-parsing"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{ // double bond
std::string fName;
fName = rdbase + "github1034.1.mol";
bool sanitize = true;
RWMol *m = MolFileToMol(fName, sanitize);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
TEST_ASSERT(m->getBondWithIdx(0)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREOANY);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m->getBondWithIdx(2)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m->getBondWithIdx(2)->getBondDir() != Bond::UNKNOWN);
TEST_ASSERT(
m->getBondWithIdx(2)->hasProp(common_properties::_UnknownStereo));
}
{ // double bond
std::string fName;
fName = rdbase + "github1034.1.mol";
bool sanitize = false;
RWMol *m = MolFileToMol(fName, sanitize);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
TEST_ASSERT(m->getBondWithIdx(2)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m->getBondWithIdx(2)->getBondDir() != Bond::UNKNOWN);
int explicit_unknown_stereo;
TEST_ASSERT(
m->getBondWithIdx(2)->getPropIfPresent<int>(
common_properties::_UnknownStereo, explicit_unknown_stereo) &&
explicit_unknown_stereo)
TEST_ASSERT(m->getBondWithIdx(0)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREONONE);
MolOps::sanitizeMol(*m);
TEST_ASSERT(m->getBondWithIdx(0)->getBondType() == Bond::DOUBLE);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREONONE);
TEST_ASSERT(m->getBondWithIdx(1)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m->getBondWithIdx(1)->getBondDir() == Bond::NONE);
TEST_ASSERT(m->getBondWithIdx(2)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m->getBondWithIdx(2)->getBondDir() != Bond::UNKNOWN);
MolOps::assignStereochemistry(*m, true, true);
TEST_ASSERT(m->getBondWithIdx(0)->getStereo() == Bond::STEREOANY);
}
{ // chiral center
std::string fName;
fName = rdbase + "github1034.2.mol";
bool sanitize = true;
RWMol *m = MolFileToMol(fName, sanitize);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
TEST_ASSERT(m->getAtomWithIdx(0)->getChiralTag() == Atom::CHI_UNSPECIFIED);
TEST_ASSERT(m->getBondWithIdx(3)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m->getBondWithIdx(3)->getBondDir() == Bond::NONE);
TEST_ASSERT(
m->getBondWithIdx(3)->hasProp(common_properties::_UnknownStereo));
}
{ // chiral center
std::string fName;
fName = rdbase + "github1034.2.mol";
bool sanitize = false;
RWMol *m = MolFileToMol(fName, sanitize);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 5);
TEST_ASSERT(m->getBondWithIdx(3)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m->getBondWithIdx(3)->getBondDir() != Bond::UNKNOWN);
int explicit_unknown_stereo;
TEST_ASSERT(
m->getBondWithIdx(3)->getPropIfPresent<int>(
common_properties::_UnknownStereo, explicit_unknown_stereo) &&
explicit_unknown_stereo)
MolOps::sanitizeMol(*m);
TEST_ASSERT(m->getBondWithIdx(3)->getBondType() == Bond::SINGLE);
TEST_ASSERT(m->getBondWithIdx(3)->getBondDir() != Bond::UNKNOWN);
TEST_ASSERT(m->getAtomWithIdx(0)->getChiralTag() == Atom::CHI_UNSPECIFIED);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub1049() {
BOOST_LOG(rdInfoLog) << "Test github 1049: MolOps::cleanUp() being called by "
"CTAB parser even when sanitization isn't on"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{ // no stereo (this one worked before)
std::string fName;
fName = rdbase + "github1049.1.mol";
bool sanitize = false;
RWMol *m = MolFileToMol(fName, sanitize);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 7);
TEST_ASSERT(m->getAtomWithIdx(1)->getAtomicNum() == 7);
TEST_ASSERT(m->getAtomWithIdx(1)->getFormalCharge() == 0);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomicNum() == 8);
TEST_ASSERT(m->getAtomWithIdx(2)->getFormalCharge() == 0);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomicNum() == 8);
TEST_ASSERT(m->getAtomWithIdx(3)->getFormalCharge() == 0);
MolOps::sanitizeMol(*m);
TEST_ASSERT(m->getAtomWithIdx(1)->getFormalCharge() == 1);
TEST_ASSERT((m->getAtomWithIdx(2)->getFormalCharge() == -1 &&
m->getAtomWithIdx(3)->getFormalCharge() == 0) ||
(m->getAtomWithIdx(2)->getFormalCharge() == 0 &&
m->getAtomWithIdx(3)->getFormalCharge() == -1));
delete m;
}
{ // with stereo (this one did not work)
std::string fName;
fName = rdbase + "github1049.2.mol";
bool sanitize = false;
RWMol *m = MolFileToMol(fName, sanitize);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 7);
TEST_ASSERT(m->getAtomWithIdx(1)->getAtomicNum() == 7);
TEST_ASSERT(m->getAtomWithIdx(1)->getFormalCharge() == 0);
TEST_ASSERT(m->getAtomWithIdx(2)->getAtomicNum() == 8);
TEST_ASSERT(m->getAtomWithIdx(2)->getFormalCharge() == 0);
TEST_ASSERT(m->getAtomWithIdx(3)->getAtomicNum() == 8);
TEST_ASSERT(m->getAtomWithIdx(3)->getFormalCharge() == 0);
MolOps::sanitizeMol(*m);
TEST_ASSERT(m->getAtomWithIdx(1)->getFormalCharge() == 1);
TEST_ASSERT((m->getAtomWithIdx(2)->getFormalCharge() == -1 &&
m->getAtomWithIdx(3)->getFormalCharge() == 0) ||
(m->getAtomWithIdx(2)->getFormalCharge() == 0 &&
m->getAtomWithIdx(3)->getFormalCharge() == -1));
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolFileDativeBonds() {
BOOST_LOG(rdInfoLog) << "Test MDL molfiles with dative bonds (V3000 only)"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
// Read molfiles with dative bonds.
{
std::string fName = rdbase + "dative_bonds_one.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumBonds() == 5);
TEST_ASSERT(m->getBondWithIdx(4)->getBondType() == Bond::DATIVE);
std::string smiles = MolToSmiles(*m);
TEST_ASSERT(smiles == "CCC(=O)O->[Cu]");
delete m;
}
{
std::string fName = rdbase + "dative_bonds_two.mol";
RWMol *m = MolFileToMol(fName);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumBonds() == 10);
TEST_ASSERT(m->getBondWithIdx(8)->getBondType() == Bond::DATIVE);
TEST_ASSERT(m->getBondWithIdx(9)->getBondType() == Bond::DATIVE);
std::string smiles = MolToSmiles(*m);
TEST_ASSERT(smiles == "CCC(=O)O->[Cu]<-OC(O)CC");
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub1251() {
BOOST_LOG(rdInfoLog)
<< "Test github 1251: MolFromMolBlock sanitizing when it should not be"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName = rdbase + "github1251.mol";
RWMol *m = MolFileToMol(fName, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 39);
TEST_ASSERT(m->getNumBonds() == 44);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub1029() {
BOOST_LOG(rdInfoLog)
<< "Test github 1029: PDB reader fails for arginine explicit hydrogens "
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{ // the original bug report
std::string fName = rdbase + "github1029.1.pdb";
bool sanitize = true, removeHs = false;
ROMol *m = PDBFileToMol(fName, sanitize, removeHs);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 3268);
TEST_ASSERT(m->getNumBonds() == 3302);
TEST_ASSERT(m->getAtomWithIdx(121)->getExplicitValence() == 4);
TEST_ASSERT(m->getAtomWithIdx(121)->getFormalCharge() == 1);
delete m;
}
{ // a second report that came in
std::string fName = rdbase + "github1029.1jld_chaina.pdb";
bool sanitize = false, removeHs = false;
ROMol *m = PDBFileToMol(fName, sanitize, removeHs);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 1533);
TEST_ASSERT(m->getNumBonds() == 1545);
TEST_ASSERT(m->getAtomWithIdx(123)->getExplicitValence() == 4);
TEST_ASSERT(m->getAtomWithIdx(123)->getFormalCharge() == 1);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub1340() {
BOOST_LOG(rdInfoLog) << "Test github 1340: PDB parser creating H-H bonds "
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{ // a second report that came in
std::string fName = rdbase + "github1340.1jld_snip.pdb";
bool sanitize = true, removeHs = false;
ROMol *m = PDBFileToMol(fName, sanitize, removeHs);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 37);
TEST_ASSERT(m->getNumBonds() == 35);
TEST_ASSERT(m->getAtomWithIdx(10)->getAtomicNum() == 1);
TEST_ASSERT(m->getAtomWithIdx(34)->getAtomicNum() == 1);
RDGeom::Point3D p10 = m->getConformer().getAtomPos(10);
RDGeom::Point3D p34 = m->getConformer().getAtomPos(34);
TEST_ASSERT((p34 - p10).length() < 1.0);
TEST_ASSERT(!m->getBondBetweenAtoms(10, 34));
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolBlockChirality() {
BOOST_LOG(rdInfoLog)
<< "Test automatic generation of coordinates for mol block chirality "
<< std::endl;
{
std::string smi = "C[C@H](Cl)Br";
auto mol = SmilesToMol(smi);
TEST_ASSERT(mol);
auto mb = MolToMolBlock(*mol, true);
auto mol2 = MolBlockToMol(mb);
TEST_ASSERT(mol2);
auto csmi1 = MolToSmiles(*mol, true);
auto csmi2 = MolToSmiles(*mol2, true);
TEST_ASSERT(csmi1.find("@") != std::string::npos);
TEST_ASSERT(csmi1 == csmi2);
delete mol;
delete mol2;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testMolBlock3DStereochem() {
BOOST_LOG(rdInfoLog)
<< "Test automatic perception of stereochem from 3D structure "
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName = rdbase + "stereo3d_1.mol";
auto m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
std::string smi = "F[C@](Cl)(Br)I";
auto m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
auto csmi1 = MolToSmiles(*m1, true);
auto csmi2 = MolToSmiles(*m2, true);
TEST_ASSERT(csmi1.find("@") != std::string::npos);
TEST_ASSERT(csmi1 == csmi2);
delete m1;
delete m2;
}
{
std::string fName = rdbase + "stereo3d_2.mol";
auto m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
std::string smi = "F[C@@](Cl)(Br)I";
auto m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
auto csmi1 = MolToSmiles(*m1, true);
auto csmi2 = MolToSmiles(*m2, true);
TEST_ASSERT(csmi1.find("@") != std::string::npos);
TEST_ASSERT(csmi1 == csmi2);
delete m1;
delete m2;
}
{
std::string fName = rdbase + "stereo3d_conflict.mol";
auto m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
std::string smi = "F[C@](Cl)(Br)I";
auto m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
auto csmi1 = MolToSmiles(*m1, true);
auto csmi2 = MolToSmiles(*m2, true);
TEST_ASSERT(csmi1.find("@") != std::string::npos);
TEST_ASSERT(csmi1 == csmi2);
delete m1;
delete m2;
}
{
std::string fName = rdbase + "stereo3d_unknown.mol";
auto m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
std::string smi = "F[C](Cl)(Br)I";
auto m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
auto csmi1 = MolToSmiles(*m1, true);
auto csmi2 = MolToSmiles(*m2, true);
TEST_ASSERT(csmi1.find("@") == std::string::npos);
TEST_ASSERT(csmi1 == csmi2);
delete m1;
delete m2;
}
{
std::string fName = rdbase + "stereo3d_trans.mol";
auto m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
std::string smi = "F/C=C/F";
auto m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
auto csmi1 = MolToSmiles(*m1, true);
auto csmi2 = MolToSmiles(*m2, true);
TEST_ASSERT(csmi1.find("/") != std::string::npos);
TEST_ASSERT(csmi1 == csmi2);
delete m1;
delete m2;
}
{
std::string fName = rdbase + "stereo3d_cis.mol";
auto m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
std::string smi = "F/C=C\\F";
auto m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
auto csmi1 = MolToSmiles(*m1, true);
auto csmi2 = MolToSmiles(*m2, true);
TEST_ASSERT(csmi1.find("/") != std::string::npos);
TEST_ASSERT(csmi1.find("\\") != std::string::npos);
TEST_ASSERT(csmi1 == csmi2);
delete m1;
delete m2;
}
{
std::string fName = rdbase + "stereo3d_dblunknown.mol";
auto m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
std::string smi = "FC=CF";
auto m2 = SmilesToMol(smi);
TEST_ASSERT(m2);
auto csmi1 = MolToSmiles(*m1, true);
auto csmi2 = MolToSmiles(*m2, true);
TEST_ASSERT(csmi1.find("/") == std::string::npos);
TEST_ASSERT(csmi1.find("\\") == std::string::npos);
TEST_ASSERT(csmi1 == csmi2);
delete m1;
delete m2;
}
}
void testMarvinSMATag() {
BOOST_LOG(rdInfoLog) << "Test Marvin MRV SMA tag " << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName = rdbase + "mrv-sma.mol";
RWMol *m = MolFileToMol(fName, false);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 4);
TEST_ASSERT(m->getAtomWithIdx(0)->getProp<std::string>(
common_properties::MRV_SMA) == "[#6;r6]");
TEST_ASSERT(m->getAtomWithIdx(1)->getProp<std::string>(
common_properties::MRV_SMA) == "[#16;H1]");
TEST_ASSERT(m->getAtomWithIdx(2)->getProp<std::string>(
common_properties::MRV_SMA) == "[#6;r6]");
TEST_ASSERT(m->getAtomWithIdx(3)->getProp<std::string>(
common_properties::MRV_SMA) == "[#7;H2A]");
// this should be similar to [#7;AH2:4][c;r6:3]:[c;r6:1]-[#16H1:2]
// RDKit makes these recursive smarts, not "ANDED" smarts which are a
// simpler case
std::string sma = MolToSmarts(*m);
TEST_ASSERT(sma ==
"[#6&$([#6&r6]):1](-[#16&$([#16&H1]):2]):[#6&$([#6&r6]):3]-[#7&"
"$([#7&H2&A]):4]");
delete m;
}
{
std::string fName = rdbase + "mrv-sma-bad.mol";
bool ok = false;
try {
MolFileToMol(fName, false);
} catch (FileParseException &e) {
ok = true;
TEST_ASSERT(
std::string("Cannot parse smarts: 'MyDogHasFleas' on line 12") ==
e.message());
}
TEST_ASSERT(ok);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub1689() {
BOOST_LOG(rdInfoLog) << "Test github 1689: Play nice with naughty MOL blocks"
<< std::endl;
std::string molb =
"rdkit_blank_line_before_M_END_test.sdf\n"
" ChemDraw12181709392D\n"
"\n"
" 2 1 0 0 0 0 0 0 0 0999 V2000\n"
" -0.3572 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 0.3572 0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0\n"
" 1 2 1 0 \n"
"\n"
"M END\n";
{
bool sanitize = true, removeHs = true, strictParsing = false;
ROMol *m = MolBlockToMol(molb, sanitize, removeHs, strictParsing);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 2);
TEST_ASSERT(m->getNumBonds() == 1);
delete m;
}
{
bool sanitize = true, removeHs = true, strictParsing = true;
bool ok = false;
try {
MolBlockToMol(molb, sanitize, removeHs, strictParsing);
} catch (FileParseException &e) {
ok = true;
}
TEST_ASSERT(ok);
}
}
void testWedgeBondToDoublebond() {
BOOST_LOG(rdInfoLog) << "Test wedged bonds to double bonds " << std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{ // a second report that came in
std::string fName = rdbase + "wedged_single_to_double_bond.mol";
bool sanitize = false, removeHs = false;
ROMol *m = MolFileToMol(fName, sanitize, removeHs);
TEST_ASSERT(m);
TEST_ASSERT(m->getNumAtoms() == 49);
TEST_ASSERT(m->getNumBonds() == 51);
delete m;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub1615() {
BOOST_LOG(rdInfoLog) << "testing github #1615: add function WedgeBond()"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
RWMol *m1;
std::string fName = rdbase + "Issue399a.mol";
m1 = MolFileToMol(fName);
TEST_ASSERT(m1);
TEST_ASSERT(m1->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
TEST_ASSERT(m1->getAtomWithIdx(1)->getProp<std::string>(
common_properties::_CIPCode) == "S");
TEST_ASSERT(m1->getBondWithIdx(0)->getBondDir() == Bond::NONE);
WedgeBond(m1->getBondWithIdx(0), 1, &m1->getConformer());
TEST_ASSERT(m1->getBondWithIdx(0)->getBondDir() == Bond::BEGINWEDGE);
delete m1;
}
{
std::string mb =
"example\n"
" ChemDraw04050615582D\n"
"\n"
" 4 4 0 0 0 0 0 0 0 0999 V2000\n"
" -0.7697 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 "
"0\n"
" 0.0553 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 "
"0\n"
" 0.7697 0.4125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 "
"0\n"
" 0.7697 -0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 "
"0\n"
" 2 1 1 0\n"
" 2 3 1 0\n"
" 3 4 1 0\n"
" 2 4 1 0\n"
"M END\n";
RWMol *m1 = MolBlockToMol(mb);
TEST_ASSERT(m1);
TEST_ASSERT(!m1->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
TEST_ASSERT(m1->getBondWithIdx(0)->getBondDir() == Bond::NONE);
TEST_ASSERT(m1->getAtomWithIdx(1)->getChiralTag() == Atom::CHI_UNSPECIFIED);
m1->getAtomWithIdx(1)->setChiralTag(Atom::CHI_TETRAHEDRAL_CW);
MolOps::assignStereochemistry(*m1, true, true);
TEST_ASSERT(m1->getAtomWithIdx(1)->hasProp(common_properties::_CIPCode));
TEST_ASSERT(m1->getAtomWithIdx(1)->getProp<std::string>(
common_properties::_CIPCode) == "S");
TEST_ASSERT(m1->getBondWithIdx(0)->getBondDir() == Bond::NONE);
WedgeBond(m1->getBondWithIdx(0), 1, &m1->getConformer());
TEST_ASSERT(m1->getBondWithIdx(0)->getBondDir() == Bond::BEGINWEDGE);
delete m1;
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void testGithub2000() {
BOOST_LOG(rdInfoLog)
<< "testing github #2000: Error while parsing empty atom list"
<< std::endl;
std::string rdbase = getenv("RDBASE");
rdbase += "/Code/GraphMol/FileParsers/test_data/";
{
std::string fName = rdbase + "github2000.sdf";
std::unique_ptr<RWMol> m1(MolFileToMol(fName));
TEST_ASSERT(m1);
TEST_ASSERT(!m1->getAtomWithIdx(0)->hasQuery());
}
{
std::string fName = rdbase + "github2000.2.sdf";
bool ok = false;
try {
std::unique_ptr<RWMol> m1(MolFileToMol(fName));
} catch (FileParseException &e) {
ok = true;
}
TEST_ASSERT(ok);
}
BOOST_LOG(rdInfoLog) << "done" << std::endl;
}
void RunTests() {
#if 1
test1();
test2();
test4();
test5();
test6();
testIssue145();
testIssue148();
test7();
test8();
testIssue180();
testIssue264();
testIssue399();
testMolFileChgLines();
testDblBondStereochem();
testSymmetricDblBondStereochem();
testRingDblBondStereochem();
testMolFileRGroups();
testMolFileDegreeQueries();
testMolFileRBCQueries();
testMolFileUnsaturationQueries();
testMolFileQueryToSmarts();
testMissingFiles();
testIssue1965035();
testRadicals();
testBadBondOrders();
testAtomParity();
testIssue2692246();
testKekulizationSkip();
testMolFileAtomValues();
testMolFileAtomQueries();
testListsAndValues();
// testCrash();
test1V3K();
testIssue2963522();
testIssue3073163();
testIssue3154208();
testIssue3228150();
testIssue3313540();
testIssue3359739();
testIssue3374639();
testThreeCoordinateChirality();
testIssue3375647();
testIssue3375684();
testChiralPhosphorous();
testIssue3392107();
testIssue3477283();
testIssue3484552();
testIssue3514824();
testIssue3525799();
testSkipLines();
testIssue269();
testMolFileChiralFlag();
testMolFileTotalValence();
testGithub88();
testGithub82();
testMolFileWithHs();
testMolFileWithRxn();
testGithub166();
testZBO();
testGithub164();
testGithub194();
testGithub196();
testIssue3557675();
test3V3K();
test2V3K();
testGithub191();
testGithub210();
testPDBResidues();
testGithub337();
testGithub360();
testGithub741();
testGithub188();
testRCSBSdf();
testParseCHG();
testMDLAtomProps();
testSupplementalSmilesLabel();
testGithub1023();
testGithub1049();
testPDBFile();
testSequences();
// testSequenceReaders();
testMolFileDativeBonds();
testGithub1251();
testMarvinSMATag();
testGithub1029();
testGithub1340();
testGithub1034();
testMolBlockChirality();
testMolBlock3DStereochem();
testGithub1689();
testWedgeBondToDoublebond();
testGithub1615();
#endif
testGithub2000();
}
// must be in German Locale for test...
void testLocaleSwitcher() {
float d = -1.0;
char buffer[1024];
sprintf(buffer, "%0.2f", d);
if (std::string(buffer) != "-1,00") {
BOOST_LOG(rdInfoLog) << " ---- no German locale support (skipping) ---- "
<< std::endl;
return;
}
{
RDKit::Utils::LocaleSwitcher ls;
sprintf(buffer, "%0.2f", d);
CHECK_INVARIANT(std::string(buffer) == "-1.00", "Locale Switcher Fail");
// test locale switcher recursion
{
RDKit::Utils::LocaleSwitcher ls;
sprintf(buffer, "%0.2f", d);
CHECK_INVARIANT(std::string(buffer) == "-1.00", "Locale Switcher Fail");
}
// should still be in the "C" variant
sprintf(buffer, "%0.2f", d);
CHECK_INVARIANT(std::string(buffer) == "-1.00", "Locale Switcher Fail");
}
// Should be back in German Locale
sprintf(buffer, "%0.2f", d);
CHECK_INVARIANT(std::string(buffer) == "-1,00", "Locale Switcher Fail");
}
#ifdef RDK_TEST_MULTITHREADED
#include <thread>
#include <future>
namespace {
void runblock() {
std::setlocale(LC_ALL, "de_DE.UTF-8");
testLocaleSwitcher();
}
} // namespace
void testMultiThreadedSwitcher() {
BOOST_LOG(rdErrorLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdErrorLog) << " Test multithreading Locale Switching"
<< std::endl;
std::vector<std::future<void>> tg;
unsigned int count = 100;
for (unsigned int i = 0; i < count; ++i) {
tg.emplace_back(std::async(std::launch::async, runblock));
}
for (auto &fut : tg) {
fut.get();
}
BOOST_LOG(rdErrorLog) << " Test multithreading (Done)" << std::endl;
BOOST_LOG(rdErrorLog) << "-------------------------------------" << std::endl;
}
#else
void testMultiThreadedSwitcher() {
BOOST_LOG(rdErrorLog) << "-------------------------------------" << std::endl;
BOOST_LOG(rdInfoLog) << " ---- Multithreaded tests disabled ---- "
<< std::endl;
}
#endif
int main(int argc, char *argv[]) {
(void)argc;
(void)argv;
// std::locale::global(std::locale("de_DE.UTF-8"));
RDLog::InitLogs();
BOOST_LOG(rdInfoLog) << " ---- Running with POSIX locale ----- " << std::endl;
RunTests(); // run with C locale
BOOST_LOG(rdInfoLog) << " ---- Running with German locale ----- "
<< std::endl;
std::setlocale(LC_ALL, "de_DE.UTF-8");
std::cout << std::setlocale(LC_ALL, nullptr) << std::endl;
testLocaleSwitcher();
testMultiThreadedSwitcher();
RunTests();
return 0;
}
| 1 | 18,997 | We should probably make a unique_ptr<ROMol> typedef in ROMol.h and start using that liberally. | rdkit-rdkit | cpp |
@@ -555,9 +555,13 @@ DefaultSettings.prototype = {
* ```
*
* @type {Boolean|String|Object}
- * @default true
+ * @default fillHandle: {
+ autoInsertRow: false,
+ }
*/
- fillHandle: true,
+ fillHandle: {
+ autoInsertRow: false,
+ },
/**
* Allows to specify the number of fixed (or *frozen*) rows at the top of the table. | 1 | import {isDefined} from './helpers/mixed';
import {isObjectEqual} from './helpers/object';
/**
* @alias Options
* @constructor
* @description
* ## Constructor options
*
* Constructor options are applied using an object literal passed as a second argument to the Handsontable constructor.
*
* ```js
* var hot = new Handsontable(document.getElementById('example1'), {
* data: myArray,
* width: 400,
* height: 300
* });
* ```
*
* ---
* ## Cascading configuration
*
* Handsontable 0.9 and newer is using *Cascading Configuration*, which is a fast way to provide configuration options
* for the entire table, including its columns and particular cells.
*
* Consider the following example:
* ```js
* var hot = new Handsontable(document.getElementById('example'), {
* readOnly: true,
* columns: [
* {readOnly: false},
* {},
* {}
* ],
* cells: function (row, col, prop) {
* var cellProperties = {};
*
* if (row === 0 && col === 0) {
* cellProperties.readOnly = true;
* }
*
* return cellProperties;
* }
* });
* ```
*
* The above notation will result in all TDs being *read only*, except for first column TDs which will be *editable*, except for the TD in top left corner which will still be *read only*.
*
* ### The Cascading Configuration model
*
* ##### 1. Constructor
*
* Configuration options that are provided using first-level `handsontable(container, {option: "value"})` and `updateSettings` method.
*
* ##### 2. Columns
*
* Configuration options that are provided using second-level object `handsontable(container, {columns: {option: "value"}]})`
*
* ##### 3. Cells
*
* Configuration options that are provided using third-level function `handsontable(container, {cells: function: (row, col, prop){ }})`
*
* ---
* ## Architecture performance
*
* The Cascading Configuration model is based on prototypical inheritance. It is much faster and memory efficient compared
* to the previous model that used jQuery extend. See: [http://jsperf.com/extending-settings](http://jsperf.com/extending-settings).
*
* ---
* __Important notice:__ In order for the data separation to work properly, make sure that each instance of Handsontable has a unique `id`.
*/
function DefaultSettings() {};
DefaultSettings.prototype = {
/**
* License key for commercial version of Handsontable.
*
* @pro
* @type {String}
* @default 'trial'
*/
licenseKey: 'trial',
/**
* @description
* Initial data source that will be bound to the data grid __by reference__ (editing data grid alters the data source).
* Can be declared as an Array of Arrays, Array of Objects or a Function.
*
* See [Understanding binding as reference](https://docs.handsontable.com/tutorial-data-binding.html#page-reference).
*
* @type {Array|Function}
* @default undefined
*/
data: void 0,
/**
* @description
* Defines the structure of a new row when data source is an array of objects.
*
* See [data-schema](https://docs.handsontable.com/tutorial-data-sources.html#page-data-schema) for examples.
*
* @type {Object}
* @default undefined
*/
dataSchema: void 0,
/**
* Width of the grid. Can be a value or a function that returns a value.
*
* @type {Number|Function}
* @default undefined
*/
width: void 0,
/**
* Height of the grid. Can be a number or a function that returns a number.
*
* @type {Number|Function}
* @default undefined
*/
height: void 0,
/**
* @description
* Initial number of rows.
*
* __Notice:__ This option only has effect in Handsontable constructor and only if `data` option is not provided
*
* @type {Number}
* @default 5
*/
startRows: 5,
/**
* @description
* Initial number of columns.
*
* __Notice:__ This option only has effect in Handsontable constructor and only if `data` option is not provided
*
* @type {Number}
* @default 5
*/
startCols: 5,
/**
* Setting `true` or `false` will enable or disable the default row headers (1, 2, 3).
* You can also define an array `['One', 'Two', 'Three', ...]` or a function to define the headers.
* If a function is set the index of the row is passed as a parameter.
*
* @type {Boolean|Array|Function}
* @default null
* @example
* ```js
* ...
* // as boolean
* rowHeaders: true,
* ...
*
* ...
* // as array
* rowHeaders: [1, 2, 3],
* ...
*
* ...
* // as function
* rowHeaders: function(index) {
* return index + ': AB';
* },
* ...
* ```
*/
rowHeaders: void 0,
/**
* Setting `true` or `false` will enable or disable the default column headers (A, B, C).
* You can also define an array `['One', 'Two', 'Three', ...]` or a function to define the headers.
* If a function is set, then the index of the column is passed as a parameter.
*
* @type {Boolean|Array|Function}
* @default null
* @example
* ```js
* ...
* // as boolean
* colHeaders: true,
* ...
*
* ...
* // as array
* colHeaders: ['A', 'B', 'C'],
* ...
*
* ...
* // as function
* colHeaders: function(index) {
* return index + ': AB';
* },
* ...
* ```
*/
colHeaders: null,
/**
* Defines column widths in pixels. Accepts number, string (that will be converted to a number),
* array of numbers (if you want to define column width separately for each column) or a
* function (if you want to set column width dynamically on each render).
*
* @type {Array|Function|Number|String}
* @default undefined
* @example
* ```js
* ...
* // as numeric, for each column.
* colWidths: 100,
* ...
*
* * ...
* // as string, for each column.
* colWidths: '100px',
* ...
*
* ...
* // as array, based on visual indexes. The rest of the columns have a default width.
* colWidths: [100, 120, 90],
* ...
*
* ...
* // as function, based on visual indexes.
* colWidths: function(index) {
* return index * 10;
* },
* ...
* ```
*/
colWidths: void 0,
/**
* Defines row heights in pixels. Accepts numbers, strings (that will be converted into a number),
* array of numbers (if you want to define row height separately for each row) or a
* function (if you want to set row height dynamically on each render).
* If the ManualRowResize or AutoRowSize plugins are enabled, this is also the minimum height that can be set
* via either of those two plugins.
* Height should be equal or greater than 23px. Table is rendered incorrectly if height is less than 23px.
*
* @type {Array|Function|Number|String}
* @default undefined
* @example
* ```js
* ...
* // as numeric, for each row.
* rowHeights: 100,
* ...
*
* * ...
* // as string, for each row.
* rowHeights: '100px',
* ...
*
* ...
* // as array, based on visual indexes. The rest of the rows have a default height.
* rowHeights: [100, 120, 90],
* ...
*
* ...
* // as function, based on visual indexes.
* rowHeights: function(index) {
* return index * 10;
* },
* ...
* ```
*/
rowHeights: void 0,
/**
* @description
* Defines the cell properties and data binding for certain columns.
*
* __Notice:__ Using this option sets a fixed number of columns (options `startCols`, `minCols`, `maxCols` will be ignored).
*
* See [documentation -> datasources.html](https://docs.handsontable.com/tutorial-data-sources.html#page-nested) for examples.
*
* @type {Array|Function}
* @default undefined
* @example
* ```js
* ...
* // as an array of objects. Order of the objects in array is representation of physical indexes.
* columns: [
* {
* // column options for the first column
* type: 'numeric',
* numericFormat: {
* pattern: '0,0.00 $'
* }
* },
* {
* // column options for the second column
* type: 'text',
* readOnly: true
* }
* ],
* ...
*
* // or as function, based on physical indexes
* ...
* columns: function(index) {
* return {
* type: index > 0 ? 'numeric' : 'text',
* readOnly: index < 1
* }
* }
* ...
* ```
*/
columns: void 0,
/**
* @description
* Defines the cell properties for given `row`, `col`, `prop` coordinates.
* Any constructor or column option may be overwritten for a particular cell (row/column combination)
* using the `cells` property in the Handsontable constructor.
*
* __Note:__ Parameters `row` and `col` always represent __physical indexes__. Example below show how to execute
* operations based on the __visual__ representation of Handsontable.
*
* Possible values of `prop`:
* - property name for column's data source object, when dataset is an [array of objects](/tutorial-data-sources.html#page-object)
* - the same number as `col`, when dataset is an [array of arrays](/tutorial-data-sources.html#page-array)
*
* @type {Function}
* @default undefined
* @example
* ```js
* ...
* cells: function (row, col, prop) {
* var cellProperties = {};
* var visualRowIndex = this.instance.toVisualRow(row);
* var visualColIndex = this.instance.toVisualColumn(col);
*
* if (visualRowIndex === 0 && visualColIndex === 0) {
* cellProperties.readOnly = true;
* }
*
* return cellProperties;
* },
* ...
* ```
*/
cells: void 0,
/**
* Any constructor or column option may be overwritten for a particular cell (row/column combination), using `cell`
* array passed to the Handsontable constructor.
*
* @type {Array}
* @default []
* @example
* ```js
* ...
* cell: [
* {row: 0, col: 0, readOnly: true}
* ],
* ...
* ```
*/
cell: [],
/**
* @description
* If `true`, enables the {@link Comments} plugin, which enables an option to apply cell comments through the context menu
* (configurable with context menu keys `commentsAddEdit`, `commentsRemove`).
*
* To initialize Handsontable with predefined comments, provide cell coordinates and comment text values in a form of an array.
*
* See [Comments](https://docs.handsontable.com/demo-comments_.html) demo for examples.
*
* @since 0.11.0
* @type {Boolean|Array}
* @default false
* @example
* ```js
* ...
* comments: [{row: 1, col: 1, comment: {value: "Test comment"}}],
* ...
* ```
*/
comments: false,
/**
* @description
* If `true`, enables the Custom Borders plugin, which enables an option to apply custom borders through the context menu (configurable with context menu key `borders`).
*
* To initialize Handsontable with predefined custom borders, provide cell coordinates and border styles in a form of an array.
*
* See [Custom Borders](https://docs.handsontable.com/demo-custom-borders.html) demo for examples.
*
* @since 0.11.0
* @type {Boolean|Array}
* @default false
* @example
* ```js
* ...
* customBorders: [
* {range: {
* from: {row: 1, col: 1},
* to: {row: 3, col: 4}},
* left: {},
* right: {},
* top: {},
* bottom: {}
* }
* ],
* ...
*
* // or
* ...
* customBorders: [
* {row: 2, col: 2, left: {width: 2, color: 'red'},
* right: {width: 1, color: 'green'}, top: '', bottom: ''}
* ],
* ...
* ```
*/
customBorders: false,
/**
* Minimum number of rows. At least that number of rows will be created during initialization.
*
* @type {Number}
* @default 0
*/
minRows: 0,
/**
* Minimum number of columns. At least that number of columns will be created during initialization.
*
* @type {Number}
* @default 0
*/
minCols: 0,
/**
* Maximum number of rows. If set to a value lower than the initial row count, the data will be trimmed to the provided value as the number of rows.
*
* @type {Number}
* @default Infinity
*/
maxRows: Infinity,
/**
* Maximum number of cols. If set to a value lower than the initial col count, the data will be trimmed to the provided value as the number of cols.
*
* @type {Number}
* @default Infinity
*/
maxCols: Infinity,
/**
* When set to 1 (or more), Handsontable will add a new row at the end of grid if there are no more empty rows.
* (unless the number of rows exceeds the one set in the `maxRows` property)
*
* @type {Number}
* @default 0
*/
minSpareRows: 0,
/**
* When set to 1 (or more), Handsontable will add a new column at the end of grid if there are no more empty columns.
* (unless the number of rows exceeds the one set in the `maxCols` property)
*
* @type {Number}
* @default 0
*/
minSpareCols: 0,
/**
* If set to `false`, there won't be an option to insert new rows in the Context Menu.
*
* @type {Boolean}
* @default true
*/
allowInsertRow: true,
/**
* If set to `false`, there won't be an option to insert new columns in the Context Menu.
*
* @type {Boolean}
* @default true
*/
allowInsertColumn: true,
/**
* If set to `false`, there won't be an option to remove rows in the Context Menu.
*
* @type {Boolean}
* @default true
*/
allowRemoveRow: true,
/**
* If set to `false`, there won't be an option to remove columns in the Context Menu.
*
* @type {Boolean}
* @default true
*/
allowRemoveColumn: true,
/**
* @description
* Defines how the table selection reacts. The selection support three different behaviors defined as:
* * `'single'` Only a single cell can be selected.
* * `'range'` Multiple cells within a single range can be selected.
* * `'multiple'` Multiple ranges of cells can be selected.
*
* To see how to interact with selection by getting selected data or change styles of the selected cells go to
* [https://docs.handsontable.com/demo-selecting-ranges.html](https://docs.handsontable.com/demo-selecting-ranges.html).
*
* @since 0.36.0
* @type {String}
* @default 'multiple'
*/
selectionMode: 'multiple',
/**
* Enables the fill handle (drag-down and copy-down) functionality, which shows a small rectangle in bottom
* right corner of the selected area, that let's you expand values to the adjacent cells.
*
* Possible values: `true` (to enable in all directions), `'vertical'` or `'horizontal'` (to enable in one direction),
* `false` (to disable completely). Setting to `true` enables the fillHandle plugin.
*
* Since 0.23.0 you can pass object to plugin which allows you to add more options for this functionality. If `autoInsertRow`
* option is `true`, fill-handler will create new rows till it reaches the last row. It is enabled by default.
*
* @example
* ```js
* ...
* fillHandle: true // enable plugin in all directions and with autoInsertRow as true
* ...
* // or
* ...
* fillHandle: 'vertical' // enable plugin in vertical direction and with autoInsertRow as true
* ...
* // or
* ...
* fillHandle: { // enable plugin in both directions and with autoInsertRow as false
* autoInsertRow: false,
* }
* // or
* ...
* fillHandle: { // enable plugin in vertical direction and with autoInsertRow as false
* autoInsertRow: false,
* direction: 'vertical' // 'vertical' or 'horizontal'
* }
* ```
*
* @type {Boolean|String|Object}
* @default true
*/
fillHandle: true,
/**
* Allows to specify the number of fixed (or *frozen*) rows at the top of the table.
*
* @type {Number}
* @default 0
* @example
* ```js
* fixedRowsTop: 3 // This would freeze the top 3 rows of the table.
* ```
*/
fixedRowsTop: 0,
/**
* Allows to specify the number of fixed (or *frozen*) rows at the bottom of the table.
*
* @pro
* @type {Number}
* @default 0
* @example
* ```js
* fixedRowsBottom: 3 // This would freeze the top 3 rows of the table.
* ```
*/
fixedRowsBottom: 0,
/**
* Allows to specify the number of fixed (or *frozen*) columns on the left of the table.
*
* @type {Number}
* @default 0
* @example
* ```js
* fixedColumnsLeft: 3 // This would freeze the top 3 rows of the table.
* ```
*/
fixedColumnsLeft: 0,
/**
* If `true`, mouse click outside the grid will deselect the current selection.
* Can be a function that takes the click event target and returns a boolean.
*
* @type {Boolean|Function}
* @default true
*/
outsideClickDeselects: true,
/**
* If `true`, <kbd>ENTER</kbd> begins editing mode (like in Google Docs). If `false`, <kbd>ENTER</kbd> moves to next
* row (like Excel) and adds a new row if necessary. <kbd>TAB</kbd> adds new column if necessary.
*
* @type {Boolean}
* @default true
*/
enterBeginsEditing: true,
/**
* Defines the cursor movement after <kbd>ENTER</kbd> was pressed (<kbd>SHIFT</kbd> + <kbd>ENTER</kbd> uses a negative vector).
* Can be an object or a function that returns an object. The event argument passed to the function
* is a DOM Event object received after the <kbd>ENTER</kbd> key has been pressed. This event object can be used to check
* whether user pressed <kbd>ENTER</kbd> or <kbd>SHIFT</kbd> + <kbd>ENTER</kbd>.
*
* @type {Object|Function}
* @default {row: 1, col: 0}
*/
enterMoves: {row: 1, col: 0},
/**
* Defines the cursor movement after <kbd>TAB</kbd> is pressed (<kbd>SHIFT</kbd> + <kbd>TAB</kbd> uses a negative vector).
* Can be an object or a function that returns an object. The event argument passed to the function
* is a DOM Event object received after the <kbd>TAB</kbd> key has been pressed. This event object can be used to check
* whether user pressed <kbd>TAB</kbd> or <kbd>SHIFT</kbd> + <kbd>TAB</kbd>.
*
* @type {Object}
* @default {row: 0, col: 1}
*/
tabMoves: {row: 0, col: 1},
/**
* If `true`, pressing <kbd>TAB</kbd> or right arrow in the last column will move to first column in next row.
*
* @type {Boolean}
* @default false
*/
autoWrapRow: false,
/**
* If `true`, pressing <kbd>ENTER</kbd> or down arrow in the last row will move to the first row in the next column.
*
* @type {Boolean}
* @default false
*/
autoWrapCol: false,
/**
* @description
* Turns on saving the state of column sorting, column positions and column sizes in local storage.
*
* You can save any sort of data in local storage to preserve table state between page reloads.
* In order to enable data storage mechanism, `persistentState` option must be set to `true` (you can set it
* either during Handsontable initialization or using the `updateSettings` method). When `persistentState` is enabled it exposes 3 hooks:
*
* __persistentStateSave__ (key: String, value: Mixed)
*
* * Saves value under given key in browser local storage.
*
* __persistentStateLoad__ (key: String, valuePlaceholder: Object)
*
* * Loads `value`, saved under given key, form browser local storage. The loaded `value` will be saved in `valuePlaceholder.value`
* (this is due to specific behaviour of `Hooks.run()` method). If no value have been saved under key `valuePlaceholder.value`
* will be `undefined`.
*
* __persistentStateReset__ (key: String)
*
* * Clears the value saved under `key`. If no `key` is given, all values associated with table will be cleared.
*
* __Note:__ The main reason behind using `persistentState` hooks rather than regular LocalStorage API is that it
* ensures separation of data stored by multiple Handsontable instances. In other words, if you have two (or more)
* instances of Handsontable on one page, data saved by one instance won't be accessible by the second instance.
* Those two instances can store data under the same key and no data would be overwritten.
*
* __Important:__ In order for the data separation to work properly, make sure that each instance of Handsontable has a unique `id`.
*
* @type {Boolean}
* @default false
*/
persistentState: void 0,
/**
* Class name for all visible rows in the current selection.
*
* @type {String}
* @default undefined
* @example
* ```js
* currentRowClassName: 'currentRow' // This will add a 'currentRow' class name to appropriate table cells.
* ```
*/
currentRowClassName: void 0,
/**
* Class name for all visible columns in the current selection.
*
* @type {String}
* @default undefined
* @example
* ```js
* currentColClassName: 'currentColumn' // This will add a 'currentColumn' class name to appropriate table cells.
* ```
*/
currentColClassName: void 0,
/**
* Class name for all visible headers in current selection.
*
* @type {String}
* @since 0.27.0
* @default 'ht__highlight'
* @example
* ```js
* currentHeaderClassName: 'ht__highlight' // This will add a 'ht__highlight' class name to appropriate table headers.
* ```
*/
currentHeaderClassName: 'ht__highlight',
/**
* Class name for all active headers in selections. The header will be marked with this class name
* only when a whole column or row will be selected.
*
* @type {String}
* @since 0.38.2
* @default 'ht__active_highlight'
* @example
* ```js
* activeHeaderClassName: 'ht__active_highlight' // This will add a 'ht__active_highlight' class name to appropriate table headers.
* ```
*/
activeHeaderClassName: 'ht__active_highlight',
/**
* Class name for the Handsontable container element.
*
* @type {String|Array}
* @default undefined
*/
className: void 0,
/**
* Class name for all tables inside container element.
*
* @since 0.17.0
* @type {String|Array}
* @default undefined
*/
tableClassName: void 0,
/**
* @description
* Defines how the columns react, when the declared table width is different than the calculated sum of all column widths.
* [See more](https://docs.handsontable.com/demo-stretching.html) mode. Possible values:
* * `'none'` Disable stretching
* * `'last'` Stretch only the last column
* * `'all'` Stretch all the columns evenly
*
* @type {String}
* @default 'none'
*/
stretchH: 'none',
/**
* Lets you overwrite the default `isEmptyRow` method, which checks if row at the provided index is empty.
*
* @type {Function}
* @param {Number} row Visual row index.
* @returns {Boolean}
*/
isEmptyRow(row) {
var col,
colLen,
value,
meta;
for (col = 0, colLen = this.countCols(); col < colLen; col++) {
value = this.getDataAtCell(row, col);
if (value !== '' && value !== null && isDefined(value)) {
if (typeof value === 'object') {
meta = this.getCellMeta(row, col);
return isObjectEqual(this.getSchema()[meta.prop], value);
}
return false;
}
}
return true;
},
/**
* Lets you overwrite the default `isEmptyCol` method, which checks if column at the provided index is empty.
*
* @type {Function}
* @param {Number} col Visual column index
* @returns {Boolean}
*/
isEmptyCol(col) {
var row,
rowLen,
value;
for (row = 0, rowLen = this.countRows(); row < rowLen; row++) {
value = this.getDataAtCell(row, col);
if (value !== '' && value !== null && isDefined(value)) {
return false;
}
}
return true;
},
/**
* When set to `true`, the table is re-rendered when it is detected that it was made visible in DOM.
*
* @type {Boolean}
* @default true
*/
observeDOMVisibility: true,
/**
* If set to `true`, Handsontable will accept values that were marked as invalid by the cell `validator`.
* It will result with *invalid* cells being treated as *valid* (will save the *invalid* value into the Handsontable data source).
* If set to `false`, Handsontable will *not* accept the invalid values and won't allow the user to close the editor.
* This option will be particularly useful when used with the Autocomplete's `strict` mode.
*
* @type {Boolean}
* @default true
* @since 0.9.5
*/
allowInvalid: true,
/**
* If set to `true`, Handsontable will accept values that are empty (`null`, `undefined` or `''`).
* If set to `false`, Handsontable will *not* accept the empty values and mark cell as invalid.
*
* @example
* ```js
* ...
* allowEmpty: true // allow empty values for all cells (whole table)
* ...
* // or
* ...
* columns: [
* // allow empty values only for 'date' column
* {data: 'date', dateFormat: 'DD/MM/YYYY', allowEmpty: true}
* ]
* ...
* ```
*
* @type {Boolean}
* @default true
* @since 0.23.0
*/
allowEmpty: true,
/**
* CSS class name for cells that did not pass validation.
*
* @type {String}
* @default 'htInvalid'
*/
invalidCellClassName: 'htInvalid',
/**
* When set to an non-empty string, displayed as the cell content for empty cells. If a value of a different type is provided,
* it will be stringified and applied as a string.
*
* @type {Mixed}
* @default false
*/
placeholder: false,
/**
* CSS class name for cells that have a placeholder in use.
*
* @type {String}
* @default 'htPlaceholder'
*/
placeholderCellClassName: 'htPlaceholder',
/**
* CSS class name for read-only cells.
*
* @type {String}
* @default 'htDimmed'
*/
readOnlyCellClassName: 'htDimmed',
/**
* @description
* If a string is provided, it may be one of the following predefined values:
* * `autocomplete`,
* * `checkbox`,
* * `html`,
* * `numeric`,
* * `password`.
* * `text`.
*
* Or you can [register](https://docs.handsontable.com/demo-custom-renderers.html) the custom renderer under specified name and use
* its name as an alias in your configuration.
*
* If a function is provided, it will receive the following arguments:
* ```js
* function(instance, TD, row, col, prop, value, cellProperties) {}
* ```
*
* You can read more about custom renderes [in the documentation](https://docs.handsontable.com/demo-custom-renderers.html).
*
* @example
* ```js
* ...
* Handsontable.renderers.registerRenderer('my.renderer', function(instance, TD, row, col, prop, value, cellProperties) {
* TD.innerHTML = value;
* });
* ...
* columns: [
* {
* editor: 'select',
* renderer: 'autocomplete' // as string
* },
* {
* renderer: 'my.renderer' // custom renderer as an alias
* },
* {
* // renderer as custom function
* renderer: function(hotInstance, TD, row, col, prop, value, cellProperties) {
* TD.style.color = 'blue';
* TD.innerHTML = value;
* }
* }
* ]
* ...
* ```
*
* @type {String|Function}
* @default undefined
*/
renderer: void 0,
/**
* CSS class name added to the commented cells.
*
* @type {String}
* @default 'htCommentCell'
*/
commentedCellClassName: 'htCommentCell',
/**
* If set to `true`, it enables the browser's native selection of a fragment of the text within a single cell, between adjacent cells or in a whole table.
* If set to `'cell'`, it enables the possibility of selecting a fragment of the text within a single cell's body.
*
* @type {Boolean|String}
* @default false
*/
fragmentSelection: false,
/**
* @description
* Make cell [read only](https://docs.handsontable.com/demo-read-only.html).
*
* @type {Boolean}
* @default false
*/
readOnly: false,
/**
* @description
* When added to a `column` property, it skips the column on paste and pastes the data on the next column to the right.
*
* @type {Boolean}
* @default false
*/
skipColumnOnPaste: false,
/**
* @description
* Setting to true enables the search plugin (see [demo](https://docs.handsontable.com/demo-search-for-values.html)).
*
* @type {Boolean}
* @default false
*/
search: false,
/**
* @description
* Shortcut to define the combination of the cell renderer, editor and validator for the column, cell or whole table.
*
* Possible values:
* * [autocomplete](https://docs.handsontable.com/demo-autocomplete.html)
* * [checkbox](https://docs.handsontable.com/demo-checkbox.html)
* * [date](https://docs.handsontable.com/demo-date.html)
* * [dropdown](https://docs.handsontable.com/demo-dropdown.html)
* * [handsontable](https://docs.handsontable.com/demo-handsontable.html)
* * [numeric](https://docs.handsontable.com/demo-numeric.html)
* * [password](https://docs.handsontable.com/demo-password.html)
* * text
* * [time](https://docs.handsontable.com/demo-time.html)
*
* Or you can register the custom cell type under specified name and use
* its name as an alias in your configuration.
*
* @example
* ```js
* ...
* Handsontable.cellTypes.registerCellType('my.type', {
* editor: MyEditorClass,
* renderer: function(hot, td, row, col, prop, value, cellProperties) {
* td.innerHTML = value;
* },
* validator: function(value, callback) {
* callback(value === 'foo' ? true : false);
* }
* });
* ...
* columns: [
* {
* type: 'text'
* },
* {
* type: 'my.type' // an alias to custom type
* },
* {
* type: 'checkbox'
* }
* ]
* ...
* ```
*
* @type {String}
* @default 'text'
*/
type: 'text',
/**
* @description
* Make cell copyable (pressing <kbd>CTRL</kbd> + <kbd>C</kbd> on your keyboard moves its value to system clipboard).
*
* __Note:__ this setting is `false` by default for cells with type `password`.
*
* @type {Boolean}
* @default true
* @since 0.10.2
*/
copyable: true,
/**
* Defines the editor for the table/column/cell.
*
* If a string is provided, it may be one of the following predefined values:
* * [autocomplete](https://docs.handsontable.com/demo-autocomplete.html)
* * [checkbox](https://docs.handsontable.com/demo-checkbox.html)
* * [date](https://docs.handsontable.com/demo-date.html)
* * [dropdown](https://docs.handsontable.com/demo-dropdown.html)
* * [handsontable](https://docs.handsontable.com/demo-handsontable.html)
* * [mobile](https://docs.handsontable.com/demo-mobiles-and-tablets.html)
* * [password](https://docs.handsontable.com/demo-password.html)
* * [select](https://docs.handsontable.com/demo-select.html)
* * text
*
* Or you can [register](https://docs.handsontable.com/tutorial-cell-editor.html#registering-an-editor) the custom editor under specified name and use
* its name as an alias in your configuration.
*
* To disable cell editing completely set `editor` property to `false`.
*
* @example
* ```js
* ...
* columns: [
* {
* editor: 'select'
* },
* {
* editor: false
* }
* ]
* ...
* ```
*
* @type {String|Function|Boolean}
* @default 'text'
*/
editor: void 0,
/**
* @description
* Autocomplete definitions. See [autocomplete demo](https://docs.handsontable.com/demo-autocomplete.html) for examples and definitions.
*
* @type {Array}
* @default undefined
*/
autoComplete: void 0,
/**
* Control number of choices for the autocomplete (or dropdown) typed cells. After exceeding it, a scrollbar for the dropdown list of choices will appear.
*
* @since 0.18.0
* @type {Number}
* @default 10
*/
visibleRows: 10,
/**
* Makes autocomplete or dropdown width the same as the edited cell width. If `false` then editor will be scaled
* according to its content.
*
* @since 0.17.0
* @type {Boolean}
* @default true
*/
trimDropdown: true,
/**
* Setting to true enables the debug mode, currently used to test the correctness of the row and column
* header fixed positioning on a layer above the master table.
*
* @type {Boolean}
* @default false
*/
debug: false,
/**
* When set to `true`, the text of the cell content is wrapped if it does not fit in the fixed column width.
*
* @type {Boolean}
* @default true
* @since 0.11.0
*/
wordWrap: true,
/**
* CSS class name added to cells with cell meta `wordWrap: false`.
*
* @type {String}
* @default 'htNoWrap'
* @since 0.11.0
*/
noWordWrapClassName: 'htNoWrap',
/**
* @description
* Defines if the right-click context menu should be enabled. Context menu allows to create new row or
* column at any place in the grid among [other features](https://docs.handsontable.com/demo-context-menu.html).
* Possible values:
* * `true` (to enable default options),
* * `false` (to disable completely)
* * an array of [predefined options](https://docs.handsontable.com/demo-context-menu.html#page-specific),
* * an object [with defined structure](https://docs.handsontable.com/demo-context-menu.html#page-custom)
*
* See [the context menu demo](https://docs.handsontable.com/demo-context-menu.html) for examples.
*
* @example
* ```js
* ...
* // as a boolean
* contextMenu: true
* ...
* // as an array
* contextMenu: ['row_above', 'row_below', '--------', 'undo', 'redo']
* ...
* ```
* ...
* // as an object (`name` attribute is required in the custom keys)
* contextMenu: {
* items: {
* "option1": {
* name: "option1"
* },
* "option2": {
* name: "option2",
* submenu: {
* items: [
* {
* key: "option2:suboption1",
* name: "option2:suboption1",
* callback: function(key, options) {
* ...
* }
* },
* ...
* ]
* }
* }
* }
* }
* ...
* ```
* @type {Boolean|Array|Object}
* @default undefined
*/
contextMenu: void 0,
/**
* @description
* Disable or enable the copy/paste functionality.
*
* @example
* ```js
* ...
* copyPaste: false,
* ...
* ```
*
* @type {Boolean}
* @default true
*/
copyPaste: true,
/**
* If `true`, undo/redo functionality is enabled.
*
* @type {Boolean}
* @default undefined
*/
undo: void 0,
/**
* @description
* Turns on [Column sorting](https://docs.handsontable.com/demo-sorting-data.html).
* Can be either a boolean (true/false) or an object with a declared sorting options. See the below example:
*
* @example
* ```js
* ...
* // as boolean
* columnSorting: true
* ...
* // as a object with initial order (sort ascending column at index 2)
* columnSorting: {
* column: 2,
* sortOrder: 'asc', // 'asc' = ascending, 'desc' = descending, 'none' = original order
* sortEmptyCells: true // true = the table sorts empty cells, false = the table moves all empty cells to the end of the table
* }
* ...
* ```
*
* @type {Boolean|Object}
* @default undefined
*/
columnSorting: void 0,
/**
* @description
* Turns on [Manual column move](https://docs.handsontable.com/demo-moving-rows-and-columns.html), if set to a boolean or define initial
* column order, if set to an array of column indexes.
*
* @example
* ```js
* ...
* // as boolean
* manualColumnMove: true
* ...
* // as a array with initial order (move column index at 0 to 1 and move column index at 1 to 4)
* manualColumnMove: [1, 4]
* ...
* ```
*
* @type {Boolean|Array}
* @default undefined
*/
manualColumnMove: void 0,
/**
* @description
* Turns on [Manual column resize](https://docs.handsontable.com/demo-resizing.html), if set to a boolean or define initial
* column resized widths, if set to an array of numbers.
*
* @example
* ```js
* ...
* // as boolean
* manualColumnResize: true
* ...
* // as a array with initial widths (column at 0 index has 40px and column at 1 index has 50px)
* manualColumnResize: [40, 50]
* ...
* ```
*
* @type {Boolean|Array}
* @default undefined
*/
manualColumnResize: void 0,
/**
* @description
* Turns on [Manual row move](https://docs.handsontable.com/demo-moving-rows-and-columns.html), if set to a boolean or define initial
* row order, if set to an array of row indexes.
*
* @example
* ```js
* ...
* // as boolean
* manualRowMove: true
* ...
* // as a array with initial order (move row index at 0 to 1 and move row index at 1 to 4)
* manualRowMove: [1, 4]
* ...
* ```
*
* @type {Boolean|Array}
* @default undefined
* @since 0.11.0
*/
manualRowMove: void 0,
/**
* @description
* Turns on [Manual row resize](https://docs.handsontable.com/demo-resizing.html), if set to a boolean or define initial
* row resized heights, if set to an array of numbers.
*
* @example
* ```js
* ...
* // as boolean
* manualRowResize: true
* ...
* // as a array with initial heights (row at 0 index has 40px and row at 1 index has 50px)
* manualRowResize: [40, 50]
* ...
* ```
*
* @type {Boolean|Array}
* @default undefined
* @since 0.11.0
*/
manualRowResize: void 0,
/**
* @description
* If set to `true`, it enables a possibility to merge cells. If set to an array of objects, it merges the cells provided in the objects (see the example below).
* [More information on the demo page.](https://docs.handsontable.com/demo-merge-cells.html)
*
* @example
* ```js
* // enables the mergeCells plugin:
* margeCells: true
* ...
* // declares a list of merged sections:
* mergeCells: [
* {row: 1, col: 1, rowspan: 3, colspan: 3}, // rowspan and colspan properties declare the width and height of a merged section in cells
* {row: 3, col: 4, rowspan: 2, colspan: 2},
* {row: 5, col: 6, rowspan: 3, colspan: 3}
* ]
* ```
* @type {Boolean|Array}
* @default false
*/
mergeCells: false,
/**
* Number of rows to be rendered outside of the visible part of the table.
* By default, it's set to `'auto'`, which makes Handsontable to attempt to calculate the best offset performance-wise.
*
* You may test out different values to find the best one that works for your specific implementation.
*
* @type {Number|String}
* @default 'auto'
*/
viewportRowRenderingOffset: 'auto',
/**
* Number of columns to be rendered outside of the visible part of the table.
* By default, it's set to `'auto'`, which makes Handsontable try calculating the best offset performance-wise.
*
* You may experiment with the value to find the one that works best for your specific implementation.
*
* @type {Number|String}
* @default 'auto'
*/
viewportColumnRenderingOffset: 'auto',
/**
* A function, regular expression or a string, which will be used in the process of cell validation.
* If a function is used, be sure to execute the callback argument with either `true` (`callback(true)`) if the validation passed
* or with `false` (`callback(false)`), if the validation failed.
* Note, that `this` in the function points to the `cellProperties` object.
*
* If a string is provided, it may be one of the following predefined values:
* * `autocomplete`,
* * `date`,
* * `numeric`,
* * `time`.
*
* Or you can [register](https://docs.handsontable.com/demo-data-validation.html) the validator function under specified name and use
* its name as an alias in your configuration.
*
* See more [in the demo](https://docs.handsontable.com/demo-data-validation.html).
*
* @example
* ```js
* // as a function
* columns: [
* {
* validator: function(value, callback) { // validation rules }
* }
* ]
* ...
* // as a regexp
* columns: [
* {
* validator: /^[0-9]$/ // regular expression
* }
* ]
* // as a string
* columns: [
* {
* validator: 'numeric'
* }
* ]
* ```
* @type {Function|RegExp|String}
* @default undefined
* @since 0.9.5
*/
validator: void 0,
/**
* @description
* Disable visual cells selection.
*
* Possible values:
* * `true` - Disables any type of visual selection (current and area selection),
* * `false` - Enables any type of visual selection. This is default value.
* * `'current'` - Disables the selection of a currently selected cell, the area selection is still present.
* * `'area'` - Disables the area selection, the currently selected cell selection is still present.
* * `'header'` - Disables the headers selection, the currently selected cell selection is still present (available since 0.36.0).
*
* @type {Boolean|String|Array}
* @default false
* @since 0.13.2
* @example
* ```js
* ...
* // as boolean
* disableVisualSelection: true,
* ...
*
* ...
* // as string ('current', 'area' or 'header')
* disableVisualSelection: 'current',
* ...
*
* ...
* // as array
* disableVisualSelection: ['current', 'area'],
* ...
* ```
*/
disableVisualSelection: false,
/**
* @description
* Set whether to display the current sorting order indicator (a triangle icon in the column header, specifying the sorting order).
*
* @type {Boolean}
* @default false
* @since 0.15.0-beta3
*/
sortIndicator: void 0,
/**
* Disable or enable ManualColumnFreeze plugin.
*
* @type {Boolean}
* @default false
*/
manualColumnFreeze: void 0,
/**
* @description
* Defines whether Handsontable should trim the whitespace at the beginning and the end of the cell contents.
*
* @type {Boolean}
* @default true
*/
trimWhitespace: true,
settings: void 0,
/**
* @description
* Defines data source for Autocomplete or Dropdown cell types.
*
* @example
* ```js
* ...
* // source as a array
* columns: [{
* type: 'autocomplete',
* source: ['A', 'B', 'C', 'D']
* }]
* ...
* // source as a function
* columns: [{
* type: 'autocomplete',
* source: function(query, callback) {
* fetch('http://example.com/query?q=' + query, function(response) {
* callback(response.items);
* })
* }
* }]
* ...
* ```
*
* @type {Array|Function}
* @default undefined
*/
source: void 0,
/**
* @description
* Defines the column header name.
*
* @example
* ```js
* ...
* columns: [{
* title: 'First name',
* type: 'text',
* },
* {
* title: 'Last name',
* type: 'text',
* }]
* ...
* ```
*
* @type {String}
* @default undefined
*/
title: void 0,
/**
* Data template for `'checkbox'` type when checkbox is checked.
*
* @example
* ```js
* checkedTemplate: 'good'
*
* // if a checkbox-typed cell is checked, then getDataAtCell(x,y), where x and y are the coordinates of the cell
* // will return 'good'.
* ```
* @type {Boolean|String}
* @default true
*/
checkedTemplate: void 0,
/**
* Data template for `'checkbox'` type when checkbox is unchecked.
*
* @example
* ```js
* uncheckedTemplate: 'bad'
*
* // if a checkbox-typed cell is not checked, then getDataAtCell(x,y), where x and y are the coordinates of the cell
* // will return 'bad'.
* ```
* @type {Boolean|String}
* @default false
*/
uncheckedTemplate: void 0,
/**
* @description
* Object which describes if renderer should create checkbox element with label element as a parent. Option desired for
* [checkbox](https://docs.handsontable.com/demo-checkbox.html)-typed cells.
*
* By default the [checkbox](https://docs.handsontable.com/demo-checkbox.html) renderer renders the checkbox without a label.
*
* Possible object properties:
* * `property` - Defines the property name of the data object, which will to be used as a label.
* (eg. `label: {property: 'name.last'}`). This option works only if data was passed as an array of objects.
* * `position` - String which describes where to place the label text (before or after checkbox element).
* Valid values are `'before'` and '`after`' (defaults to `'after'`).
* * `value` - String or a Function which will be used as label text.
*
* @example
* ```js
* ...
* columns: [{
* type: 'checkbox',
* label: {position: 'after', value: 'My label: '}
* }]
* ...
* ```
*
* @since 0.19.0
* @type {Object}
* @default undefined
*/
label: void 0,
/**
* Display format. This option is desired for [numeric-typed](https://docs.handsontable.com/demo-numeric.html) cells. Format is described by two properties:
*
* - pattern, which is handled by `numbro` for purpose of formatting numbers to desired pattern. List of supported patterns can be found [here](http://numbrojs.com/format.html#numbers).
* - culture, which is handled by `numbro` for purpose of formatting currencies. Examples showing how it works can be found [here](http://numbrojs.com/format.html#currency). List of supported cultures can be found [here](http://numbrojs.com/languages.html#supported-languages).
*
* __Note:__ Please keep in mind that this option is used only to format the displayed output! It has no effect on the input data provided for the cell. The numeric data can be entered to the table only as floats (separated by a dot or a comma) or integers, and are stored in the source dataset as JavaScript numbers.
*
* Since 0.26.0 Handsontable uses [numbro](http://numbrojs.com/) as a main library for numbers formatting.
*
* @example
* ```js
* ...
* columns: [{
* type: 'numeric',
* numericFormat: {
* pattern: '0,00',
* culture: 'en-US'
* }
* }]
* ...
* ```
*
* @since 0.35.0
* @type {Object}
*/
numericFormat: void 0,
/**
* Language for Handsontable translation. Possible language codes are [listed here](https://docs.handsontable.com/tutorial-internationalization.html#available-languages).
*
* @type {String}
* @default 'en-US'
*/
language: void 0,
/**
* @description
* Data source for [select](https://docs.handsontable.com/demo-select.html)-typed cells.
*
* @example
* ```js
* ...
* columns: [{
* editor: 'select',
* selectOptions: ['A', 'B', 'C'],
* }]
* ...
* ```
*
* @type {Array}
*/
selectOptions: void 0,
/**
* Enables or disables the autoColumnSize plugin. Default value is `undefined`, which has the same effect as `true`.
* Disabling this plugin can increase performance, as no size-related calculations would be done.
*
* Column width calculations are divided into sync and async part. Each of this parts has their own advantages and
* disadvantages. Synchronous calculations are faster but they block the browser UI, while the slower asynchronous operations don't
* block the browser UI.
*
* To configure the sync/async distribution, you can pass an absolute value (number of columns) or a percentage value.
* `syncLimit` option is available since 0.16.0.
*
* You can also use the `useHeaders` option to take the column headers with into calculation.
*
* @example
* ```js
* ...
* // as a number (300 columns in sync, rest async)
* autoColumnSize: {syncLimit: 300},
* ...
*
* ...
* // as a string (percent)
* autoColumnSize: {syncLimit: '40%'},
* ...
*
* ...
* // use headers width while calculation the column width
* autoColumnSize: {useHeaders: true},
* ...
*
* ```
*
* @type {Object|Boolean}
* @default {syncLimit: 50}
*/
autoColumnSize: void 0,
/**
* Enables or disables autoRowSize plugin. Default value is `undefined`, which has the same effect as `false` (disabled).
* Enabling this plugin can decrease performance, as size-related calculations would be performed.
*
* Row height calculations are divided into sync and async stages. Each of these stages has their own advantages and
* disadvantages. Synchronous calculations are faster but they block the browser UI, while the slower asynchronous operations don't
* block the browser UI.
*
* To configure the sync/async distribution, you can pass an absolute value (number of columns) or a percentage value.
* `syncLimit` options is available since 0.16.0.
*
* @example
* ```js
* ...
* // as number (300 columns in sync, rest async)
* autoRowSize: {syncLimit: 300},
* ...
*
* ...
* // as string (percent)
* autoRowSize: {syncLimit: '40%'},
* ...
* ```
* @type {Object|Boolean}
* @default {syncLimit: 1000}
*/
autoRowSize: void 0,
/**
* Date validation format.
*
* Option desired for `'date'` - typed cells.
*
* @example
* ```js
* ...
* columns: [{
* type: 'date',
* dateFormat: 'MM/DD/YYYY'
* }]
* ...
* ```
*
* @type {String}
* @default 'DD/MM/YYYY'
*/
dateFormat: void 0,
/**
* If `true` then dates will be automatically formatted to match the desired format.
*
* Option desired for `'date'`-typed typed cells.
*
* @example
* ```js
* ...
* columns: [{
* type: 'date',
* dateFormat: 'YYYY-MM-DD',
* correctFormat: true
* }]
* ...
* ```
*
* @type {Boolean}
* @default false
*/
correctFormat: false,
/**
* Definition of default value which will fill the empty cells.
*
* Option desired for `'date'`-typed cells.
*
* @example
* ```js
* ...
* columns: [{
* type: 'date',
* defaultData: '2015-02-02'
* }]
* ...
* ```
*
* @type {String}
*/
defaultDate: void 0,
/**
* If set to `true`, the value entered into the cell must match (case-sensitive) the autocomplete source. Otherwise, cell won't pass the validation.
* When filtering the autocomplete source list, the editor will be working in case-insensitive mode.
*
* Option desired for `autocomplete`-typed cells.
*
* @example
* ```js
* ...
* columns: [{
* type: 'autocomplete',
* source: ['A', 'B', 'C'],
* strict: true
* }]
* ...
* ```
*
* @type {Boolean}
*/
strict: void 0,
/**
* @description
* If typed `true`, data defined in `source` of the autocomplete or dropdown cell will be treated as HTML.
*
* __Warning:__ Enabling this option can cause serious XSS vulnerabilities.
*
* Option desired for `'autocomplete'`-typed cells.
* @example
* ```js
* ...
* columns: [{
* type: 'autocomplete',
* allowHtml: true,
* source: ['<b>foo</b>', '<b>bar</b>']
* }]
* ...
* ```
* @type {Boolean}
* @default false
*/
allowHtml: false,
/**
* If typed `true` then virtual rendering mechanism for handsontable will be disabled.
*
* @type {Boolean}
*/
renderAllRows: void 0,
/**
* Prevents table to overlap outside the parent element. If `'horizontal'` option is chosen then table will appear horizontal
* scrollbar in case where parent's width is narrower then table's width.
*
* Possible values:
* * `false` - Disables functionality (Default option).
* * `horizontal` - Prevents horizontal overflow table.
* * `vertical` - Prevents vertical overflow table (Not implemented yet).
*
* @since 0.20.3
* @example
* ```js
* ...
* preventOverflow: 'horizontal'
* ...
* ```
*
* @type {String|Boolean}
*/
preventOverflow: false,
/**
* @description
* Plugin allowing binding the table rows with their headers.
* If the plugin is enabled, the table row headers will "stick" to the rows, when they are hidden/moved. Basically, if at the initialization
* row 0 has a header titled "A", it will have it no matter what you do with the table.
*
* @pro
* @since 1.0.0-beta1
* @type {Boolean|String}
* @example
*
* ```js
* ...
* var hot = new Handsontable(document.getElementById('example'), {
* date: getData(),
* bindRowsWithHeaders: true
* });
* ...
* ```
*
*/
bindRowsWithHeaders: void 0,
/**
* @description
* The CollapsibleColumns plugin allows collapsing of columns, covered by a header with the `colspan` property defined.
*
* Clicking the "collapse/expand" button collapses (or expands) all "child" headers except the first one.
*
* Setting the `collapsibleColumns` property to `true` will display a "collapse/expand" button in every header with a defined
* `colspan` property.
*
* To limit this functionality to a smaller group of headers, define the `collapsibleColumns` property as an array of objects, as in
* the example below.
*
* @pro
* @since 1.0.0-beta1
* @type {Boolean|Array}
* @example
* ```js
* ...
* collapsibleColumns: [
* {row: -4, col: 1, collapsible: true},
* {row: -3, col: 5, collapsible: true}
* ]
* ...
* // or
* ...
* collapsibleColumns: true
* ...
* ```
*/
collapsibleColumns: void 0,
/**
* @description
* Allows making pre-defined calculations on the cell values and display the results within Handsontable.
* See the demo for more information.
*
* @pro
* @since 1.0.0-beta1
* @type {Object}
*/
columnSummary: void 0,
/**
* This plugin allows adding a configurable dropdown menu to the table's column headers.
* The dropdown menu acts like the Context Menu, but is triggered by clicking the button in the header.
*
* @pro
* @since 1.0.0-beta1
* @type {Boolean|Object|Array}
*/
dropdownMenu: void 0,
/**
* The filters plugin.
* It allows filtering the table data either by the built-in component or with the API.
*
* @pro
* @since 1.0.0-beta1
* @type {Boolean}
*/
filters: void 0,
/**
* It allows Handsontable to process formula expressions defined in the provided data.
*
* @pro
* @since 1.7.0
* @type {Boolean}
*/
formulas: void 0,
/**
* @description
* GanttChart plugin enables a possibility to create a Gantt chart using a Handsontable instance.
* In this case, the whole table becomes read-only.
*
* @pro
* @since 1.0.0-beta1
* @type {Object}
*/
ganttChart: void 0,
/**
* @description
* Allows adding a tooltip to the table headers.
*
* Available options:
* * the `rows` property defines if tooltips should be added to row headers,
* * the `columns` property defines if tooltips should be added to column headers,
* * the `onlyTrimmed` property defines if tooltips should be added only to headers, which content is trimmed by the header itself (the content being wider then the header).
*
* @pro
* @since 1.0.0-beta1
* @type {Boolean|Object}
*/
headerTooltips: void 0,
/**
* Plugin allowing hiding of certain columns.
*
* @pro
* @since 1.0.0-beta1
* @type {Boolean|Object}
*/
hiddenColumns: void 0,
/**
* @description
* Plugin allowing hiding of certain rows.
*
* @pro
* @since 1.0.0-beta1
* @type {Boolean|Object}
*/
hiddenRows: void 0,
/**
* @description
* Allows creating a nested header structure, using the HTML's colspan attribute.
*
* @pro
* @since 1.0.0-beta1
* @type {Array}
*/
nestedHeaders: void 0,
/**
* @description
* Plugin allowing hiding of certain rows.
*
* @pro
* @since 1.0.0-beta1
* @type {Boolean|Array}
*/
trimRows: void 0,
/**
* @description
* Allows setting a custom width of the row headers. You can provide a number or an array of widths, if many row header levels are defined.
*
* @since 0.22.0
* @type {Number|Array}
*/
rowHeaderWidth: void 0,
/**
* @description
* Allows setting a custom height of the column headers. You can provide a number or an array of heights, if many column header levels are defined.
*
* @since 0.22.0
* @type {Number|Array}
*/
columnHeaderHeight: void 0,
/**
* @description
* Enabling this plugin switches table into one-way data binding where changes are applied into data source (from outside table)
* will be automatically reflected in the table.
*
* For every data change [afterChangesObserved](Hooks.html#event:afterChangesObserved) hook will be fired.
*
* @type {Boolean}
* @default false
*/
observeChanges: void 0,
/**
* @description
* When passed to the `column` property, allows specifying a custom sorting function for the desired column.
*
* @since 0.24.0
* @type {Function}
* @example
* ```js
* columns: [
* {
* sortFunction: function(sortOrder) {
* return function(a, b) {
* // sorting function body.
* //
* // Function parameters:
* // sortOrder: If true, the order is ascending, if false - descending. undefined = original order
* // a, b: Two compared elements. These are 2-element arrays, with the first element being the row index, the second - cell value.
* }
* }
* }
* ]
* ```
*/
sortFunction: void 0,
/**
* If defined as 'true', the Autocomplete's suggestion list would be sorted by relevance (the closer to the left the match is, the higher the suggestion).
*
* Option desired for cells of the `'autocomplete'` type.
*
* @type {Boolean}
* @default true
*/
sortByRelevance: true,
/**
* If defined as 'true', when the user types into the input area the Autocomplete's suggestion list is updated to only
* include those choices starting with what has been typed; if defined as 'false' all suggestions remain shown, with
* those matching what has been typed marked in bold.
*
* @type {Boolean}
* @default true
*/
filter: true,
/**
* If defined as 'true', filtering in the Autocomplete Editor will be case-sensitive.
*
* @type {Boolean}
* @default: false
*/
filteringCaseSensitive: false,
/**
* @description
* Disable or enable the drag to scroll functionality.
*
* @example
* ```js
* ...
* dragToScroll: false,
* ...
* ```
*
* @type {Boolean}
* @default true
*/
dragToScroll: true,
};
export default DefaultSettings;
| 1 | 14,822 | Can you add missing asterisk? | handsontable-handsontable | js |
@@ -243,7 +243,15 @@ public class Name {
}
public String toUpperCamelAndDigits() {
- char[] upper = toUpperCamel().toCharArray();
+ return capitalizeDigitsAfterNumbers(toUpperCamel());
+ }
+
+ public String toLowerCamelAndDigits() {
+ return capitalizeDigitsAfterNumbers(toLowerCamel());
+ }
+
+ private String capitalizeDigitsAfterNumbers(String camelCaseIdentifier) {
+ char[] upper = camelCaseIdentifier.toCharArray();
boolean digit = false;
for (int i = 0; i < upper.length; i++) {
if (Character.isDigit(upper[i])) { | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.util;
import com.google.api.codegen.util.CommonAcronyms.NamePieceCasingType;
import com.google.api.codegen.util.CommonAcronyms.SubNamePiece;
import com.google.common.base.CaseFormat;
import com.google.common.base.Joiner;
import com.google.common.base.Strings;
import java.util.ArrayList;
import java.util.List;
/** Name represents an identifier name which is casing-aware. */
public class Name {
private List<NamePiece> namePieces;
/**
* Creates a Name from a sequence of lower-underscore strings.
*
* @throws IllegalArgumentException if any of the strings contain any characters that are not
* lower case or underscores.
*/
public static Name from(String... pieces) {
List<NamePiece> namePieces = new ArrayList<>();
for (String piece : pieces) {
if (Strings.isNullOrEmpty(piece)) {
continue;
}
validateLowerUnderscore(piece);
namePieces.add(new NamePiece(piece, CaseFormat.LOWER_UNDERSCORE));
}
return new Name(namePieces);
}
/**
* Creates a Name from a String that is either a sequence of underscore strings or a sequence of
* camel strings. The first letter of the String must be lowercase.
*
* @throws IllegalArgumentException if any of the strings do not follow the camel format or
* contain characters that are not underscores.
*/
public static Name anyLower(String... pieces) {
Name name;
try {
name = Name.from(pieces);
} catch (IllegalArgumentException e) {
try {
name = Name.lowerCamel(pieces);
} catch (IllegalArgumentException ex) {
String msg = "[";
for (String p : pieces) {
msg += String.format("\"%s\", ", p);
}
msg += "]\n";
throw new IllegalArgumentException(
"Name: identifiers are not all either lower-underscore or lower-camel: " + msg);
}
}
return name;
}
/**
* Creates a Name from a sequence of upper-underscore strings.
*
* @throws IllegalArgumentException if any of the strings contain any characters that are not
* upper case or underscores.
*/
public static Name upperUnderscore(String... pieces) {
List<NamePiece> namePieces = new ArrayList<>();
for (String piece : pieces) {
if (Strings.isNullOrEmpty(piece)) {
continue;
}
validateUpperUnderscore(piece);
namePieces.add(new NamePiece(piece, CaseFormat.UPPER_UNDERSCORE));
}
return new Name(namePieces);
}
/**
* Creates a Name from a sequence of camel strings.
*
* @throws IllegalArgumentException if any of the strings do not follow the camel format.
*/
public static Name anyCamel(String... pieces) {
return camelInternal(CheckCase.NO_CHECK, AcronymMode.CAMEL_CASE, pieces);
}
/**
* Creates a Name from a sequence of lower-camel strings.
*
* @throws IllegalArgumentException if any of the strings do not follow the lower-camel format.
*/
public static Name lowerCamel(String... pieces) {
return camelInternal(CheckCase.LOWER, AcronymMode.CAMEL_CASE, pieces);
}
/**
* Creates a Name from a sequence of upper-camel strings.
*
* @throws IllegalArgumentException if any of the strings do not follow the upper-camel format.
*/
public static Name upperCamel(String... pieces) {
return camelInternal(CheckCase.UPPER, AcronymMode.CAMEL_CASE, pieces);
}
public static Name anyCamelKeepUpperAcronyms(String... pieces) {
return camelInternal(CheckCase.NO_CHECK, AcronymMode.UPPER_CASE, pieces);
}
public static Name upperCamelKeepUpperAcronyms(String... pieces) {
return camelInternal(CheckCase.UPPER, AcronymMode.UPPER_CASE, pieces);
}
private static CaseFormat getCamelCaseFormat(String piece) {
if (Character.isUpperCase(piece.charAt(0))) {
return CaseFormat.UPPER_CAMEL;
} else {
return CaseFormat.LOWER_CAMEL;
}
}
private static Name camelInternal(
CheckCase checkCase, AcronymMode acronymMode, String... pieces) {
List<NamePiece> namePieces = new ArrayList<>();
for (String piece : pieces) {
if (Strings.isNullOrEmpty(piece)) {
continue;
}
validateCamel(piece, checkCase);
for (SubNamePiece subPiece : CommonAcronyms.splitByUpperAcronyms(piece)) {
CaseFormat caseFormat = getCamelCaseFormat(subPiece.namePieceString());
CasingMode casingMode = CasingMode.NORMAL;
if (subPiece.type().equals(NamePieceCasingType.UPPER_ACRONYM)) {
caseFormat = CaseFormat.UPPER_UNDERSCORE;
casingMode = acronymMode.casingMode;
}
namePieces.add(new NamePiece(subPiece.namePieceString(), caseFormat, casingMode));
}
}
return new Name(namePieces);
}
private static void validateLowerUnderscore(String identifier) {
if (!isLowerUnderscore(identifier)) {
throw new IllegalArgumentException(
"Name: identifier not in lower-underscore: '" + identifier + "'");
}
}
private static void validateUpperUnderscore(String identifier) {
if (!isUpperUnderscore(identifier)) {
throw new IllegalArgumentException(
"Name: identifier not in upper-underscore: '" + identifier + "'");
}
}
private static boolean isUpperUnderscore(String identifier) {
Character underscore = Character.valueOf('_');
for (Character ch : identifier.toCharArray()) {
if (!Character.isUpperCase(ch) && !ch.equals(underscore) && !Character.isDigit(ch)) {
return false;
}
}
return true;
}
private static boolean isLowerUnderscore(String identifier) {
Character underscore = Character.valueOf('_');
for (Character ch : identifier.toCharArray()) {
if (!Character.isLowerCase(ch) && !ch.equals(underscore) && !Character.isDigit(ch)) {
return false;
}
}
return true;
}
private static void validateCamel(String identifier, CheckCase check) {
if (!isCamel(identifier, check)) {
String casingDescription = check + " camel";
throw new IllegalArgumentException(
"Name: identifier not in " + casingDescription + ": '" + identifier + "'");
}
}
private static boolean isCamel(String identifier, CheckCase check) {
if (identifier.length() == 0) {
return true;
}
if (!check.valid(identifier.charAt(0))) {
return false;
}
for (Character ch : identifier.toCharArray()) {
if (!Character.isLowerCase(ch) && !Character.isUpperCase(ch) && !Character.isDigit(ch)) {
return false;
}
}
return true;
}
private Name(List<NamePiece> namePieces) {
this.namePieces = namePieces;
}
/** Returns the identifier in upper-underscore format. */
public String toUpperUnderscore() {
return toUnderscore(CaseFormat.UPPER_UNDERSCORE);
}
/** Returns the identifier in lower-underscore format. */
public String toLowerUnderscore() {
return toUnderscore(CaseFormat.LOWER_UNDERSCORE);
}
private String toUnderscore(CaseFormat caseFormat) {
List<String> newPieces = new ArrayList<>();
for (NamePiece namePiece : namePieces) {
newPieces.add(namePiece.caseFormat.to(caseFormat, namePiece.identifier));
}
return Joiner.on('_').join(newPieces);
}
/** Returns the identifier in lower-camel format. */
public String toLowerCamel() {
return toCamel(CaseFormat.LOWER_CAMEL);
}
/** Returns the identifier in upper-camel format. */
public String toUpperCamel() {
return toCamel(CaseFormat.UPPER_CAMEL);
}
public String toUpperCamelAndDigits() {
char[] upper = toUpperCamel().toCharArray();
boolean digit = false;
for (int i = 0; i < upper.length; i++) {
if (Character.isDigit(upper[i])) {
digit = true;
} else if (digit) {
upper[i] = Character.toUpperCase(upper[i]);
digit = false;
}
}
return new String(upper);
}
private String toCamel(CaseFormat caseFormat) {
StringBuffer buffer = new StringBuffer();
boolean firstPiece = true;
for (NamePiece namePiece : namePieces) {
if (firstPiece && caseFormat.equals(CaseFormat.LOWER_CAMEL)) {
buffer.append(namePiece.caseFormat.to(CaseFormat.LOWER_CAMEL, namePiece.identifier));
} else {
CaseFormat toCaseFormat = CaseFormat.UPPER_CAMEL;
if (namePiece.casingMode.equals(CasingMode.UPPER_CAMEL_TO_SQUASHED_UPPERCASE)) {
toCaseFormat = CaseFormat.UPPER_UNDERSCORE;
}
buffer.append(namePiece.caseFormat.to(toCaseFormat, namePiece.identifier));
}
firstPiece = false;
}
return buffer.toString();
}
/** Returns the name in human readable form, useful in comments. */
public String toPhrase() {
return toLowerUnderscore().replace('_', ' ');
}
/** Returns the name in lower case, with a custom separator between components. */
public String toSeparatedString(String separator) {
return toLowerUnderscore().replace("_", separator);
}
/**
* Returns a new Name containing the pieces from this Name plus the given identifier added on the
* end.
*/
public Name join(String identifier) {
validateLowerUnderscore(identifier);
List<NamePiece> newPieceList = new ArrayList<>();
newPieceList.addAll(namePieces);
newPieceList.add(new NamePiece(identifier, CaseFormat.LOWER_UNDERSCORE));
return new Name(newPieceList);
}
/**
* Returns a new Name containing the pieces from this Name plus the pieces of the given name added
* on the end.
*/
public Name join(Name rhs) {
List<NamePiece> newPieceList = new ArrayList<>();
newPieceList.addAll(namePieces);
newPieceList.addAll(rhs.namePieces);
return new Name(newPieceList);
}
public String toOriginal() {
if (namePieces.size() != 1) {
throw new IllegalArgumentException(
"Name: toOriginal can only be called with a namePieces size of 1");
}
return namePieces.get(0).identifier;
}
@Override
public String toString() {
return String.format("Name(%s)", toLowerUnderscore());
}
@Override
public boolean equals(Object other) {
if (other instanceof Name) {
return ((Name) other).toLowerUnderscore().equals(this.toLowerUnderscore());
}
return false;
}
@Override
public int hashCode() {
return this.toLowerUnderscore().hashCode();
}
private static class NamePiece {
public final String identifier;
public final CaseFormat caseFormat;
public final CasingMode casingMode;
private NamePiece(String identifier, CaseFormat caseFormat) {
this(identifier, caseFormat, CasingMode.NORMAL);
}
private NamePiece(String identifier, CaseFormat caseFormat, CasingMode casingMode) {
this.identifier = identifier;
this.caseFormat = caseFormat;
this.casingMode = casingMode;
}
}
// Represents how acronyms should be rendered
private enum AcronymMode {
CAMEL_CASE(CasingMode.NORMAL),
UPPER_CASE(CasingMode.UPPER_CAMEL_TO_SQUASHED_UPPERCASE);
private AcronymMode(CasingMode casingMode) {
this.casingMode = casingMode;
}
private final CasingMode casingMode;
}
// Represents overrides of desired output casing
private enum CasingMode {
NORMAL,
UPPER_CAMEL_TO_SQUASHED_UPPERCASE;
}
private enum CheckCase {
NO_CHECK,
LOWER,
UPPER;
boolean valid(char c) {
switch (this) {
case NO_CHECK:
return true;
case UPPER:
return Character.isUpperCase(c);
case LOWER:
return Character.isLowerCase(c);
}
throw new IllegalStateException("unreachable");
}
}
}
| 1 | 30,835 | late to the party, but I bet it meant to be `capitalizeLettersAfterNumbers` :) | googleapis-gapic-generator | java |
@@ -235,6 +235,7 @@ public class MessageCompose extends K9Activity implements OnClickListener,
private RecipientPresenter recipientPresenter;
private MessageBuilder currentMessageBuilder;
private boolean mFinishAfterDraftSaved;
+ private boolean firstTimeEmptyObject = true;
@Override
public void onFocusChange(View v, boolean hasFocus) { | 1 | package com.fsck.k9.activity;
import java.text.DateFormat;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import android.annotation.SuppressLint;
import android.annotation.TargetApi;
import android.app.AlertDialog;
import android.app.AlertDialog.Builder;
import android.app.Dialog;
import android.app.LoaderManager;
import android.app.PendingIntent;
import android.content.ClipData;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.content.IntentSender.SendIntentException;
import android.content.Loader;
import android.content.pm.ActivityInfo;
import android.net.Uri;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Bundle;
import android.os.Handler;
import android.os.Parcelable;
import android.support.annotation.Nullable;
import android.text.TextUtils;
import android.text.TextWatcher;
import android.util.Log;
import android.util.TypedValue;
import android.view.ContextThemeWrapper;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.View.OnClickListener;
import android.view.View.OnFocusChangeListener;
import android.view.ViewGroup;
import android.view.Window;
import android.webkit.WebView;
import android.webkit.WebViewClient;
import android.widget.BaseAdapter;
import android.widget.Button;
import android.widget.EditText;
import android.widget.ImageButton;
import android.widget.LinearLayout;
import android.widget.TextView;
import android.widget.Toast;
import com.fsck.k9.Account;
import com.fsck.k9.Account.MessageFormat;
import com.fsck.k9.Account.QuoteStyle;
import com.fsck.k9.FontSizes;
import com.fsck.k9.Identity;
import com.fsck.k9.K9;
import com.fsck.k9.Preferences;
import com.fsck.k9.R;
import com.fsck.k9.activity.compose.ComposeCryptoStatus;
import com.fsck.k9.activity.compose.CryptoSettingsDialog.OnCryptoModeChangedListener;
import com.fsck.k9.activity.compose.RecipientMvpView;
import com.fsck.k9.activity.compose.RecipientPresenter;
import com.fsck.k9.activity.compose.RecipientPresenter.CryptoMode;
import com.fsck.k9.activity.loader.AttachmentContentLoader;
import com.fsck.k9.activity.loader.AttachmentInfoLoader;
import com.fsck.k9.activity.misc.Attachment;
import com.fsck.k9.controller.MessagingController;
import com.fsck.k9.controller.MessagingListener;
import com.fsck.k9.fragment.ProgressDialogFragment;
import com.fsck.k9.fragment.ProgressDialogFragment.CancelListener;
import com.fsck.k9.helper.Contacts;
import com.fsck.k9.helper.HtmlConverter;
import com.fsck.k9.helper.IdentityHelper;
import com.fsck.k9.helper.MailTo;
import com.fsck.k9.helper.SimpleTextWatcher;
import com.fsck.k9.helper.Utility;
import com.fsck.k9.mail.Address;
import com.fsck.k9.mail.Flag;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.Message.RecipientType;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.Multipart;
import com.fsck.k9.mail.Part;
import com.fsck.k9.mail.internet.MessageExtractor;
import com.fsck.k9.mail.internet.MimeMessage;
import com.fsck.k9.mail.internet.MimeUtility;
import com.fsck.k9.mailstore.LocalBodyPart;
import com.fsck.k9.mailstore.LocalMessage;
import com.fsck.k9.message.IdentityField;
import com.fsck.k9.message.IdentityHeaderParser;
import com.fsck.k9.message.InsertableHtmlContent;
import com.fsck.k9.message.MessageBuilder;
import com.fsck.k9.message.PgpMessageBuilder;
import com.fsck.k9.message.QuotedTextMode;
import com.fsck.k9.message.SimpleMessageBuilder;
import com.fsck.k9.message.SimpleMessageFormat;
import com.fsck.k9.provider.AttachmentProvider;
import com.fsck.k9.ui.EolConvertingEditText;
import com.fsck.k9.view.MessageWebView;
import org.htmlcleaner.CleanerProperties;
import org.htmlcleaner.HtmlCleaner;
import org.htmlcleaner.SimpleHtmlSerializer;
import org.htmlcleaner.TagNode;
import org.openintents.openpgp.IOpenPgpService2;
import org.openintents.openpgp.util.OpenPgpApi;
import org.openintents.openpgp.util.OpenPgpServiceConnection;
import org.openintents.openpgp.util.OpenPgpServiceConnection.OnBound;
@SuppressWarnings("deprecation")
public class MessageCompose extends K9Activity implements OnClickListener,
CancelListener, OnFocusChangeListener, OnCryptoModeChangedListener, MessageBuilder.Callback {
private static final int DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE = 1;
private static final int DIALOG_CONFIRM_DISCARD_ON_BACK = 2;
private static final int DIALOG_CHOOSE_IDENTITY = 3;
private static final int DIALOG_CONFIRM_DISCARD = 4;
private static final long INVALID_DRAFT_ID = MessagingController.INVALID_MESSAGE_ID;
private static final String ACTION_COMPOSE = "com.fsck.k9.intent.action.COMPOSE";
private static final String ACTION_REPLY = "com.fsck.k9.intent.action.REPLY";
private static final String ACTION_REPLY_ALL = "com.fsck.k9.intent.action.REPLY_ALL";
private static final String ACTION_FORWARD = "com.fsck.k9.intent.action.FORWARD";
private static final String ACTION_EDIT_DRAFT = "com.fsck.k9.intent.action.EDIT_DRAFT";
private static final String EXTRA_ACCOUNT = "account";
private static final String EXTRA_MESSAGE_BODY = "messageBody";
private static final String EXTRA_MESSAGE_REFERENCE = "message_reference";
private static final String STATE_KEY_ATTACHMENTS =
"com.fsck.k9.activity.MessageCompose.attachments";
private static final String STATE_KEY_QUOTED_TEXT_MODE =
"com.fsck.k9.activity.MessageCompose.QuotedTextShown";
private static final String STATE_KEY_SOURCE_MESSAGE_PROCED =
"com.fsck.k9.activity.MessageCompose.stateKeySourceMessageProced";
private static final String STATE_KEY_DRAFT_ID = "com.fsck.k9.activity.MessageCompose.draftId";
private static final String STATE_KEY_HTML_QUOTE = "com.fsck.k9.activity.MessageCompose.HTMLQuote";
private static final String STATE_IDENTITY_CHANGED =
"com.fsck.k9.activity.MessageCompose.identityChanged";
private static final String STATE_IDENTITY =
"com.fsck.k9.activity.MessageCompose.identity";
private static final String STATE_IN_REPLY_TO = "com.fsck.k9.activity.MessageCompose.inReplyTo";
private static final String STATE_REFERENCES = "com.fsck.k9.activity.MessageCompose.references";
private static final String STATE_KEY_READ_RECEIPT = "com.fsck.k9.activity.MessageCompose.messageReadReceipt";
private static final String STATE_KEY_DRAFT_NEEDS_SAVING = "com.fsck.k9.activity.MessageCompose.draftNeedsSaving";
private static final String STATE_KEY_FORCE_PLAIN_TEXT =
"com.fsck.k9.activity.MessageCompose.forcePlainText";
private static final String STATE_KEY_QUOTED_TEXT_FORMAT =
"com.fsck.k9.activity.MessageCompose.quotedTextFormat";
private static final String STATE_KEY_NUM_ATTACHMENTS_LOADING = "numAttachmentsLoading";
private static final String STATE_KEY_WAITING_FOR_ATTACHMENTS = "waitingForAttachments";
private static final String LOADER_ARG_ATTACHMENT = "attachment";
private static final String FRAGMENT_WAITING_FOR_ATTACHMENT = "waitingForAttachment";
private static final int MSG_PROGRESS_ON = 1;
private static final int MSG_PROGRESS_OFF = 2;
private static final int MSG_SKIPPED_ATTACHMENTS = 3;
private static final int MSG_SAVED_DRAFT = 4;
private static final int MSG_DISCARDED_DRAFT = 5;
private static final int MSG_PERFORM_STALLED_ACTION = 6;
private static final int ACTIVITY_REQUEST_PICK_ATTACHMENT = 1;
private static final int REQUEST_MASK_RECIPIENT_PRESENTER = (1<<8);
private static final int REQUEST_MASK_MESSAGE_BUILDER = (2<<8);
/**
* Regular expression to remove the first localized "Re:" prefix in subjects.
*
* Currently:
* - "Aw:" (german: abbreviation for "Antwort")
*/
private static final Pattern PREFIX = Pattern.compile("^AW[:\\s]\\s*", Pattern.CASE_INSENSITIVE);
/**
* The account used for message composition.
*/
private Account mAccount;
private Contacts mContacts;
/**
* This identity's settings are used for message composition.
* Note: This has to be an identity of the account {@link #mAccount}.
*/
private Identity mIdentity;
private boolean mIdentityChanged = false;
private boolean mSignatureChanged = false;
/**
* Reference to the source message (in case of reply, forward, or edit
* draft actions).
*/
private MessageReference mMessageReference;
private Message mSourceMessage;
/**
* "Original" message body
*
* <p>
* The contents of this string will be used instead of the body of a referenced message when
* replying to or forwarding a message.<br>
* Right now this is only used when replying to a signed or encrypted message. It then contains
* the stripped/decrypted body of that message.
* </p>
* <p><strong>Note:</strong>
* When this field is not {@code null} we assume that the message we are composing right now
* should be encrypted.
* </p>
*/
private String mSourceMessageBody;
/**
* Indicates that the source message has been processed at least once and should not
* be processed on any subsequent loads. This protects us from adding attachments that
* have already been added from the restore of the view state.
*/
private boolean mSourceMessageProcessed = false;
private int mMaxLoaderId = 0;
private RecipientPresenter recipientPresenter;
private MessageBuilder currentMessageBuilder;
private boolean mFinishAfterDraftSaved;
@Override
public void onFocusChange(View v, boolean hasFocus) {
switch(v.getId()) {
case R.id.message_content:
case R.id.subject:
if (hasFocus) {
recipientPresenter.onNonRecipientFieldFocused();
}
break;
}
}
@Override
public void onCryptoModeChanged(CryptoMode cryptoMode) {
recipientPresenter.onCryptoModeChanged(cryptoMode);
}
enum Action {
COMPOSE,
REPLY,
REPLY_ALL,
FORWARD,
EDIT_DRAFT
}
/**
* Contains the action we're currently performing (e.g. replying to a message)
*/
private Action mAction;
private boolean mReadReceipt = false;
private QuotedTextMode mQuotedTextMode = QuotedTextMode.NONE;
/**
* Contains the format of the quoted text (text vs. HTML).
*/
private SimpleMessageFormat mQuotedTextFormat;
/**
* When this it {@code true} the message format setting is ignored and we're always sending
* a text/plain message.
*/
private boolean mForcePlainText = false;
private TextView mChooseIdentityButton;
private EditText mSubjectView;
private EolConvertingEditText mSignatureView;
private EolConvertingEditText mMessageContentView;
private LinearLayout mAttachments;
private Button mQuotedTextShow;
private View mQuotedTextBar;
private ImageButton mQuotedTextEdit;
private EolConvertingEditText mQuotedText;
private MessageWebView mQuotedHTML;
private InsertableHtmlContent mQuotedHtmlContent; // Container for HTML reply as it's being built.
private String mOpenPgpProvider;
private OpenPgpServiceConnection mOpenPgpServiceConnection;
private String mReferences;
private String mInReplyTo;
private boolean mSourceProcessed = false;
/**
* The currently used message format.
*
* <p>
* <strong>Note:</strong>
* Don't modify this field directly. Use {@link #updateMessageFormat()}.
* </p>
*/
private SimpleMessageFormat mMessageFormat;
private QuoteStyle mQuoteStyle;
private boolean draftNeedsSaving = false;
private boolean isInSubActivity = false;
/**
* The database ID of this message's draft. This is used when saving drafts so the message in
* the database is updated instead of being created anew. This property is INVALID_DRAFT_ID
* until the first save.
*/
private long mDraftId = INVALID_DRAFT_ID;
/**
* Number of attachments currently being fetched.
*/
private int mNumAttachmentsLoading = 0;
private enum WaitingAction {
NONE,
SEND,
SAVE
}
/**
* Specifies what action to perform once attachments have been fetched.
*/
private WaitingAction mWaitingForAttachments = WaitingAction.NONE;
private Handler mHandler = new Handler() {
@Override
public void handleMessage(android.os.Message msg) {
switch (msg.what) {
case MSG_PROGRESS_ON:
setProgressBarIndeterminateVisibility(true);
break;
case MSG_PROGRESS_OFF:
setProgressBarIndeterminateVisibility(false);
break;
case MSG_SKIPPED_ATTACHMENTS:
Toast.makeText(
MessageCompose.this,
getString(R.string.message_compose_attachments_skipped_toast),
Toast.LENGTH_LONG).show();
break;
case MSG_SAVED_DRAFT:
mDraftId = (Long) msg.obj;
Toast.makeText(
MessageCompose.this,
getString(R.string.message_saved_toast),
Toast.LENGTH_LONG).show();
break;
case MSG_DISCARDED_DRAFT:
Toast.makeText(
MessageCompose.this,
getString(R.string.message_discarded_toast),
Toast.LENGTH_LONG).show();
break;
case MSG_PERFORM_STALLED_ACTION:
performStalledAction();
break;
default:
super.handleMessage(msg);
break;
}
}
};
private Listener mListener = new Listener();
private FontSizes mFontSizes = K9.getFontSizes();
/**
* Compose a new message using the given account. If account is null the default account
* will be used.
*/
public static void actionCompose(Context context, Account account) {
String accountUuid = (account == null) ?
Preferences.getPreferences(context).getDefaultAccount().getUuid() :
account.getUuid();
Intent i = new Intent(context, MessageCompose.class);
i.putExtra(EXTRA_ACCOUNT, accountUuid);
i.setAction(ACTION_COMPOSE);
context.startActivity(i);
}
/**
* Get intent for composing a new message as a reply to the given message. If replyAll is true
* the function is reply all instead of simply reply.
* @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message
*/
public static Intent getActionReplyIntent(
Context context,
LocalMessage message,
boolean replyAll,
String messageBody) {
Intent i = new Intent(context, MessageCompose.class);
i.putExtra(EXTRA_MESSAGE_BODY, messageBody);
i.putExtra(EXTRA_MESSAGE_REFERENCE, message.makeMessageReference());
if (replyAll) {
i.setAction(ACTION_REPLY_ALL);
} else {
i.setAction(ACTION_REPLY);
}
return i;
}
public static Intent getActionReplyIntent(Context context, MessageReference messageReference) {
Intent intent = new Intent(context, MessageCompose.class);
intent.setAction(ACTION_REPLY);
intent.putExtra(EXTRA_MESSAGE_REFERENCE, messageReference);
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
return intent;
}
/**
* Compose a new message as a reply to the given message. If replyAll is true the function
* is reply all instead of simply reply.
* @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message
*/
public static void actionReply(
Context context,
LocalMessage message,
boolean replyAll,
String messageBody) {
context.startActivity(getActionReplyIntent(context, message, replyAll, messageBody));
}
/**
* Compose a new message as a forward of the given message.
* @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message
*/
public static void actionForward(
Context context,
LocalMessage message,
String messageBody) {
Intent i = new Intent(context, MessageCompose.class);
i.putExtra(EXTRA_MESSAGE_BODY, messageBody);
i.putExtra(EXTRA_MESSAGE_REFERENCE, message.makeMessageReference());
i.setAction(ACTION_FORWARD);
context.startActivity(i);
}
/**
* Continue composition of the given message. This action modifies the way this Activity
* handles certain actions.
* Save will attempt to replace the message in the given folder with the updated version.
* Discard will delete the message from the given folder.
*/
public static void actionEditDraft(Context context, MessageReference messageReference) {
Intent i = new Intent(context, MessageCompose.class);
i.putExtra(EXTRA_MESSAGE_REFERENCE, messageReference);
i.setAction(ACTION_EDIT_DRAFT);
context.startActivity(i);
}
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (UpgradeDatabases.actionUpgradeDatabases(this, getIntent())) {
finish();
return;
}
requestWindowFeature(Window.FEATURE_INDETERMINATE_PROGRESS);
if (K9.getK9ComposerThemeSetting() != K9.Theme.USE_GLOBAL) {
// theme the whole content according to the theme (except the action bar)
ContextThemeWrapper themeContext = new ContextThemeWrapper(this,
K9.getK9ThemeResourceId(K9.getK9ComposerTheme()));
@SuppressLint("InflateParams") // this is the top level activity element, it has no root
View v = LayoutInflater.from(themeContext).inflate(R.layout.message_compose, null);
TypedValue outValue = new TypedValue();
// background color needs to be forced
themeContext.getTheme().resolveAttribute(R.attr.messageViewBackgroundColor, outValue, true);
v.setBackgroundColor(outValue.data);
setContentView(v);
} else {
setContentView(R.layout.message_compose);
}
final Intent intent = getIntent();
mMessageReference = intent.getParcelableExtra(EXTRA_MESSAGE_REFERENCE);
mSourceMessageBody = intent.getStringExtra(EXTRA_MESSAGE_BODY);
if (K9.DEBUG && mSourceMessageBody != null) {
Log.d(K9.LOG_TAG, "Composing message with explicitly specified message body.");
}
final String accountUuid = (mMessageReference != null) ?
mMessageReference.getAccountUuid() :
intent.getStringExtra(EXTRA_ACCOUNT);
mAccount = Preferences.getPreferences(this).getAccount(accountUuid);
if (mAccount == null) {
mAccount = Preferences.getPreferences(this).getDefaultAccount();
}
if (mAccount == null) {
/*
* There are no accounts set up. This should not have happened. Prompt the
* user to set up an account as an acceptable bailout.
*/
startActivity(new Intent(this, Accounts.class));
draftNeedsSaving = false;
finish();
return;
}
mContacts = Contacts.getInstance(MessageCompose.this);
mChooseIdentityButton = (TextView) findViewById(R.id.identity);
mChooseIdentityButton.setOnClickListener(this);
RecipientMvpView recipientMvpView = new RecipientMvpView(this);
recipientPresenter = new RecipientPresenter(this, recipientMvpView, mAccount);
mSubjectView = (EditText) findViewById(R.id.subject);
mSubjectView.getInputExtras(true).putBoolean("allowEmoji", true);
EolConvertingEditText upperSignature = (EolConvertingEditText)findViewById(R.id.upper_signature);
EolConvertingEditText lowerSignature = (EolConvertingEditText)findViewById(R.id.lower_signature);
mMessageContentView = (EolConvertingEditText)findViewById(R.id.message_content);
mMessageContentView.getInputExtras(true).putBoolean("allowEmoji", true);
mAttachments = (LinearLayout)findViewById(R.id.attachments);
mQuotedTextShow = (Button)findViewById(R.id.quoted_text_show);
mQuotedTextBar = findViewById(R.id.quoted_text_bar);
mQuotedTextEdit = (ImageButton)findViewById(R.id.quoted_text_edit);
ImageButton mQuotedTextDelete = (ImageButton) findViewById(R.id.quoted_text_delete);
mQuotedText = (EolConvertingEditText)findViewById(R.id.quoted_text);
mQuotedText.getInputExtras(true).putBoolean("allowEmoji", true);
mQuotedHTML = (MessageWebView) findViewById(R.id.quoted_html);
mQuotedHTML.configure();
// Disable the ability to click links in the quoted HTML page. I think this is a nice feature, but if someone
// feels this should be a preference (or should go away all together), I'm ok with that too. -achen 20101130
mQuotedHTML.setWebViewClient(new WebViewClient() {
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
return true;
}
});
TextWatcher draftNeedsChangingTextWatcher = new SimpleTextWatcher() {
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
draftNeedsSaving = true;
}
};
TextWatcher signTextWatcher = new SimpleTextWatcher() {
@Override
public void onTextChanged(CharSequence s, int start, int before, int count) {
draftNeedsSaving = true;
mSignatureChanged = true;
}
};
recipientMvpView.addTextChangedListener(draftNeedsChangingTextWatcher);
mSubjectView.addTextChangedListener(draftNeedsChangingTextWatcher);
mMessageContentView.addTextChangedListener(draftNeedsChangingTextWatcher);
mQuotedText.addTextChangedListener(draftNeedsChangingTextWatcher);
/*
* We set this to invisible by default. Other methods will turn it back on if it's
* needed.
*/
showOrHideQuotedText(QuotedTextMode.NONE);
mSubjectView.setOnFocusChangeListener(this);
mMessageContentView.setOnFocusChangeListener(this);
mQuotedTextShow.setOnClickListener(this);
mQuotedTextEdit.setOnClickListener(this);
mQuotedTextDelete.setOnClickListener(this);
if (savedInstanceState != null) {
/*
* This data gets used in onCreate, so grab it here instead of onRestoreInstanceState
*/
mSourceMessageProcessed = savedInstanceState.getBoolean(STATE_KEY_SOURCE_MESSAGE_PROCED, false);
}
if (initFromIntent(intent)) {
mAction = Action.COMPOSE;
draftNeedsSaving = true;
} else {
String action = intent.getAction();
if (ACTION_COMPOSE.equals(action)) {
mAction = Action.COMPOSE;
} else if (ACTION_REPLY.equals(action)) {
mAction = Action.REPLY;
} else if (ACTION_REPLY_ALL.equals(action)) {
mAction = Action.REPLY_ALL;
} else if (ACTION_FORWARD.equals(action)) {
mAction = Action.FORWARD;
} else if (ACTION_EDIT_DRAFT.equals(action)) {
mAction = Action.EDIT_DRAFT;
} else {
// This shouldn't happen
Log.w(K9.LOG_TAG, "MessageCompose was started with an unsupported action");
mAction = Action.COMPOSE;
}
}
if (mIdentity == null) {
mIdentity = mAccount.getIdentity(0);
}
if (mAccount.isSignatureBeforeQuotedText()) {
mSignatureView = upperSignature;
lowerSignature.setVisibility(View.GONE);
} else {
mSignatureView = lowerSignature;
upperSignature.setVisibility(View.GONE);
}
updateSignature();
mSignatureView.addTextChangedListener(signTextWatcher);
if (!mIdentity.getSignatureUse()) {
mSignatureView.setVisibility(View.GONE);
}
mReadReceipt = mAccount.isMessageReadReceiptAlways();
mQuoteStyle = mAccount.getQuoteStyle();
updateFrom();
if (!mSourceMessageProcessed) {
if (mAction == Action.REPLY || mAction == Action.REPLY_ALL ||
mAction == Action.FORWARD || mAction == Action.EDIT_DRAFT) {
/*
* If we need to load the message we add ourself as a message listener here
* so we can kick it off. Normally we add in onResume but we don't
* want to reload the message every time the activity is resumed.
* There is no harm in adding twice.
*/
MessagingController.getInstance(getApplication()).addListener(mListener);
final Account account = Preferences.getPreferences(this).getAccount(mMessageReference.getAccountUuid());
final String folderName = mMessageReference.getFolderName();
final String sourceMessageUid = mMessageReference.getUid();
MessagingController.getInstance(getApplication()).loadMessageForView(account, folderName, sourceMessageUid, null);
}
if (mAction != Action.EDIT_DRAFT) {
String alwaysBccString = mAccount.getAlwaysBcc();
if (!TextUtils.isEmpty(alwaysBccString)) {
recipientPresenter.addBccAddresses(Address.parse(alwaysBccString));
}
}
}
if (mAction == Action.REPLY || mAction == Action.REPLY_ALL) {
mMessageReference = mMessageReference.withModifiedFlag(Flag.ANSWERED);
}
if (mAction == Action.REPLY || mAction == Action.REPLY_ALL ||
mAction == Action.EDIT_DRAFT) {
//change focus to message body.
mMessageContentView.requestFocus();
} else {
// Explicitly set focus to "To:" input field (see issue 2998)
recipientMvpView.requestFocusOnToField();
}
if (mAction == Action.FORWARD) {
mMessageReference = mMessageReference.withModifiedFlag(Flag.FORWARDED);
}
mOpenPgpProvider = mAccount.getOpenPgpProvider();
if (isCryptoProviderEnabled()) {
// attachKeyCheckBox = (CheckBox) findViewById(R.id.cb_attach_key);
// attachKeyCheckBox.setEnabled(mAccount.getCryptoKey() != 0);
mOpenPgpServiceConnection = new OpenPgpServiceConnection(this, mOpenPgpProvider, new OnBound() {
@Override
public void onBound(IOpenPgpService2 service) {
recipientPresenter.onCryptoProviderBound();
}
@Override
public void onError(Exception e) {
recipientPresenter.onCryptoProviderError(e);
}
});
mOpenPgpServiceConnection.bindToService();
updateMessageFormat();
}
// Set font size of input controls
int fontSize = mFontSizes.getMessageComposeInput();
recipientMvpView.setFontSizes(mFontSizes, fontSize);
mFontSizes.setViewTextSize(mSubjectView, fontSize);
mFontSizes.setViewTextSize(mMessageContentView, fontSize);
mFontSizes.setViewTextSize(mQuotedText, fontSize);
mFontSizes.setViewTextSize(mSignatureView, fontSize);
updateMessageFormat();
setTitle();
currentMessageBuilder = (MessageBuilder) getLastNonConfigurationInstance();
if (currentMessageBuilder != null) {
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.reattachCallback(this);
}
}
@Override
public void onDestroy() {
super.onDestroy();
if (mOpenPgpServiceConnection != null) {
mOpenPgpServiceConnection.unbindFromService();
}
}
/**
* Handle external intents that trigger the message compose activity.
*
* <p>
* Supported external intents:
* <ul>
* <li>{@link Intent#ACTION_VIEW}</li>
* <li>{@link Intent#ACTION_SENDTO}</li>
* <li>{@link Intent#ACTION_SEND}</li>
* <li>{@link Intent#ACTION_SEND_MULTIPLE}</li>
* </ul>
* </p>
*
* @param intent
* The (external) intent that started the activity.
*
* @return {@code true}, if this activity was started by an external intent. {@code false},
* otherwise.
*/
private boolean initFromIntent(final Intent intent) {
boolean startedByExternalIntent = false;
final String action = intent.getAction();
if (Intent.ACTION_VIEW.equals(action) || Intent.ACTION_SENDTO.equals(action)) {
/*
* Someone has clicked a mailto: link. The address is in the URI.
*/
if (intent.getData() != null) {
Uri uri = intent.getData();
if (MailTo.isMailTo(uri)) {
MailTo mailTo = MailTo.parse(uri);
initializeFromMailto(mailTo);
}
}
/*
* Note: According to the documentation ACTION_VIEW and ACTION_SENDTO don't accept
* EXTRA_* parameters.
* And previously we didn't process these EXTRAs. But it looks like nobody bothers to
* read the official documentation and just copies wrong sample code that happens to
* work with the AOSP Email application. And because even big players get this wrong,
* we're now finally giving in and read the EXTRAs for those actions (below).
*/
}
if (Intent.ACTION_SEND.equals(action) || Intent.ACTION_SEND_MULTIPLE.equals(action) ||
Intent.ACTION_SENDTO.equals(action) || Intent.ACTION_VIEW.equals(action)) {
startedByExternalIntent = true;
/*
* Note: Here we allow a slight deviation from the documented behavior.
* EXTRA_TEXT is used as message body (if available) regardless of the MIME
* type of the intent. In addition one or multiple attachments can be added
* using EXTRA_STREAM.
*/
CharSequence text = intent.getCharSequenceExtra(Intent.EXTRA_TEXT);
// Only use EXTRA_TEXT if the body hasn't already been set by the mailto URI
if (text != null && mMessageContentView.getText().length() == 0) {
mMessageContentView.setCharacters(text);
}
String type = intent.getType();
if (Intent.ACTION_SEND.equals(action)) {
Uri stream = intent.getParcelableExtra(Intent.EXTRA_STREAM);
if (stream != null) {
addAttachment(stream, type);
}
} else {
List<Parcelable> list = intent.getParcelableArrayListExtra(Intent.EXTRA_STREAM);
if (list != null) {
for (Parcelable parcelable : list) {
Uri stream = (Uri) parcelable;
if (stream != null) {
addAttachment(stream, type);
}
}
}
}
String subject = intent.getStringExtra(Intent.EXTRA_SUBJECT);
// Only use EXTRA_SUBJECT if the subject hasn't already been set by the mailto URI
if (subject != null && mSubjectView.getText().length() == 0) {
mSubjectView.setText(subject);
}
recipientPresenter.initFromSendOrViewIntent(intent);
}
return startedByExternalIntent;
}
@Override
public void onResume() {
super.onResume();
MessagingController.getInstance(getApplication()).addListener(mListener);
}
@Override
public void onPause() {
super.onPause();
MessagingController.getInstance(getApplication()).removeListener(mListener);
boolean isPausingOnConfigurationChange = (getChangingConfigurations() & ActivityInfo.CONFIG_ORIENTATION)
== ActivityInfo.CONFIG_ORIENTATION;
boolean isCurrentlyBuildingMessage = currentMessageBuilder != null;
if (isPausingOnConfigurationChange || isCurrentlyBuildingMessage || isInSubActivity) {
return;
}
checkToSaveDraftImplicitly();
}
/**
* The framework handles most of the fields, but we need to handle stuff that we
* dynamically show and hide:
* Attachment list,
* Cc field,
* Bcc field,
* Quoted text,
*/
@Override
protected void onSaveInstanceState(Bundle outState) {
super.onSaveInstanceState(outState);
outState.putInt(STATE_KEY_NUM_ATTACHMENTS_LOADING, mNumAttachmentsLoading);
outState.putString(STATE_KEY_WAITING_FOR_ATTACHMENTS, mWaitingForAttachments.name());
outState.putParcelableArrayList(STATE_KEY_ATTACHMENTS, createAttachmentList());
outState.putSerializable(STATE_KEY_QUOTED_TEXT_MODE, mQuotedTextMode);
outState.putBoolean(STATE_KEY_SOURCE_MESSAGE_PROCED, mSourceMessageProcessed);
outState.putLong(STATE_KEY_DRAFT_ID, mDraftId);
outState.putSerializable(STATE_IDENTITY, mIdentity);
outState.putBoolean(STATE_IDENTITY_CHANGED, mIdentityChanged);
outState.putString(STATE_IN_REPLY_TO, mInReplyTo);
outState.putString(STATE_REFERENCES, mReferences);
outState.putSerializable(STATE_KEY_HTML_QUOTE, mQuotedHtmlContent);
outState.putBoolean(STATE_KEY_READ_RECEIPT, mReadReceipt);
outState.putBoolean(STATE_KEY_DRAFT_NEEDS_SAVING, draftNeedsSaving);
outState.putBoolean(STATE_KEY_FORCE_PLAIN_TEXT, mForcePlainText);
outState.putSerializable(STATE_KEY_QUOTED_TEXT_FORMAT, mQuotedTextFormat);
recipientPresenter.onSaveInstanceState(outState);
}
@Override
public Object onRetainNonConfigurationInstance() {
if (currentMessageBuilder != null) {
currentMessageBuilder.detachCallback();
}
return currentMessageBuilder;
}
@Override
protected void onRestoreInstanceState(Bundle savedInstanceState) {
super.onRestoreInstanceState(savedInstanceState);
mAttachments.removeAllViews();
mMaxLoaderId = 0;
mNumAttachmentsLoading = savedInstanceState.getInt(STATE_KEY_NUM_ATTACHMENTS_LOADING);
mWaitingForAttachments = WaitingAction.NONE;
try {
String waitingFor = savedInstanceState.getString(STATE_KEY_WAITING_FOR_ATTACHMENTS);
mWaitingForAttachments = WaitingAction.valueOf(waitingFor);
} catch (Exception e) {
Log.w(K9.LOG_TAG, "Couldn't read value \" + STATE_KEY_WAITING_FOR_ATTACHMENTS +" +
"\" from saved instance state", e);
}
List<Attachment> attachments = savedInstanceState.getParcelableArrayList(STATE_KEY_ATTACHMENTS);
// noinspection ConstantConditions, we know this is set in onSaveInstanceState
for (Attachment attachment : attachments) {
addAttachmentView(attachment);
if (attachment.loaderId > mMaxLoaderId) {
mMaxLoaderId = attachment.loaderId;
}
if (attachment.state == Attachment.LoadingState.URI_ONLY) {
initAttachmentInfoLoader(attachment);
} else if (attachment.state == Attachment.LoadingState.METADATA) {
initAttachmentContentLoader(attachment);
}
}
mReadReceipt = savedInstanceState.getBoolean(STATE_KEY_READ_RECEIPT);
recipientPresenter.onRestoreInstanceState(savedInstanceState);
mQuotedHtmlContent =
(InsertableHtmlContent) savedInstanceState.getSerializable(STATE_KEY_HTML_QUOTE);
if (mQuotedHtmlContent != null && mQuotedHtmlContent.getQuotedContent() != null) {
mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent());
}
mDraftId = savedInstanceState.getLong(STATE_KEY_DRAFT_ID);
mIdentity = (Identity)savedInstanceState.getSerializable(STATE_IDENTITY);
mIdentityChanged = savedInstanceState.getBoolean(STATE_IDENTITY_CHANGED);
mInReplyTo = savedInstanceState.getString(STATE_IN_REPLY_TO);
mReferences = savedInstanceState.getString(STATE_REFERENCES);
draftNeedsSaving = savedInstanceState.getBoolean(STATE_KEY_DRAFT_NEEDS_SAVING);
mForcePlainText = savedInstanceState.getBoolean(STATE_KEY_FORCE_PLAIN_TEXT);
mQuotedTextFormat = (SimpleMessageFormat) savedInstanceState.getSerializable(
STATE_KEY_QUOTED_TEXT_FORMAT);
showOrHideQuotedText(
(QuotedTextMode) savedInstanceState.getSerializable(STATE_KEY_QUOTED_TEXT_MODE));
updateFrom();
updateMessageFormat();
}
private void setTitle() {
switch (mAction) {
case REPLY: {
setTitle(R.string.compose_title_reply);
break;
}
case REPLY_ALL: {
setTitle(R.string.compose_title_reply_all);
break;
}
case FORWARD: {
setTitle(R.string.compose_title_forward);
break;
}
case COMPOSE:
default: {
setTitle(R.string.compose_title_compose);
break;
}
}
}
@Nullable
private MessageBuilder createMessageBuilder(boolean isDraft) {
MessageBuilder builder;
if (!recipientPresenter.canSendOrError(isDraft)) {
return null;
}
ComposeCryptoStatus cryptoStatus = recipientPresenter.getCurrentCryptoStatus();
// TODO encrypt drafts for storage
if(!isDraft && cryptoStatus.shouldUsePgpMessageBuilder()) {
PgpMessageBuilder pgpBuilder = new PgpMessageBuilder(getApplicationContext(), getOpenPgpApi());
pgpBuilder.setCryptoStatus(cryptoStatus);
builder = pgpBuilder;
} else {
builder = new SimpleMessageBuilder(getApplicationContext());
}
builder.setSubject(mSubjectView.getText().toString())
.setTo(recipientPresenter.getToAddresses())
.setCc(recipientPresenter.getCcAddresses())
.setBcc(recipientPresenter.getBccAddresses())
.setInReplyTo(mInReplyTo)
.setReferences(mReferences)
.setRequestReadReceipt(mReadReceipt)
.setIdentity(mIdentity)
.setMessageFormat(mMessageFormat)
.setText(mMessageContentView.getCharacters())
.setAttachments(createAttachmentList())
.setSignature(mSignatureView.getCharacters())
.setQuoteStyle(mQuoteStyle)
.setQuotedTextMode(mQuotedTextMode)
.setQuotedText(mQuotedText.getCharacters())
.setQuotedHtmlContent(mQuotedHtmlContent)
.setReplyAfterQuote(mAccount.isReplyAfterQuote())
.setSignatureBeforeQuotedText(mAccount.isSignatureBeforeQuotedText())
.setIdentityChanged(mIdentityChanged)
.setSignatureChanged(mSignatureChanged)
.setCursorPosition(mMessageContentView.getSelectionStart())
.setMessageReference(mMessageReference)
.setDraft(isDraft);
return builder;
}
private void checkToSendMessage() {
if (recipientPresenter.checkRecipientsOkForSending()) {
return;
}
if (mWaitingForAttachments != WaitingAction.NONE) {
return;
}
if (mNumAttachmentsLoading > 0) {
mWaitingForAttachments = WaitingAction.SEND;
showWaitingForAttachmentDialog();
return;
}
performSendAfterChecks();
}
private void checkToSaveDraftAndSave() {
if (!mAccount.hasDraftsFolder()) {
Toast.makeText(this, R.string.compose_error_no_draft_folder, Toast.LENGTH_SHORT).show();
return;
}
if (mWaitingForAttachments != WaitingAction.NONE) {
return;
}
if (mNumAttachmentsLoading > 0) {
mWaitingForAttachments = WaitingAction.SAVE;
showWaitingForAttachmentDialog();
return;
}
mFinishAfterDraftSaved = true;
performSaveAfterChecks();
}
private void checkToSaveDraftImplicitly() {
if (!mAccount.hasDraftsFolder()) {
return;
}
if (!draftNeedsSaving) {
return;
}
mFinishAfterDraftSaved = false;
performSaveAfterChecks();
}
private void performSaveAfterChecks() {
currentMessageBuilder = createMessageBuilder(true);
if (currentMessageBuilder != null) {
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.buildAsync(this);
}
}
public void performSendAfterChecks() {
currentMessageBuilder = createMessageBuilder(false);
if (currentMessageBuilder != null) {
draftNeedsSaving = false;
setProgressBarIndeterminateVisibility(true);
currentMessageBuilder.buildAsync(this);
}
}
private void onDiscard() {
if (mDraftId != INVALID_DRAFT_ID) {
MessagingController.getInstance(getApplication()).deleteDraft(mAccount, mDraftId);
mDraftId = INVALID_DRAFT_ID;
}
mHandler.sendEmptyMessage(MSG_DISCARDED_DRAFT);
draftNeedsSaving = false;
finish();
}
private void onReadReceipt() {
CharSequence txt;
if (!mReadReceipt) {
txt = getString(R.string.read_receipt_enabled);
mReadReceipt = true;
} else {
txt = getString(R.string.read_receipt_disabled);
mReadReceipt = false;
}
Context context = getApplicationContext();
Toast toast = Toast.makeText(context, txt, Toast.LENGTH_SHORT);
toast.show();
}
private ArrayList<Attachment> createAttachmentList() {
ArrayList<Attachment> attachments = new ArrayList<>();
for (int i = 0, count = mAttachments.getChildCount(); i < count; i++) {
View view = mAttachments.getChildAt(i);
Attachment attachment = (Attachment) view.getTag();
attachments.add(attachment);
}
return attachments;
}
/**
* Kick off a picker for the specified MIME type and let Android take over.
*/
@SuppressLint("InlinedApi")
private void onAddAttachment() {
Intent i = new Intent(Intent.ACTION_GET_CONTENT);
i.putExtra(Intent.EXTRA_ALLOW_MULTIPLE, true);
i.addCategory(Intent.CATEGORY_OPENABLE);
i.setType("*/*");
isInSubActivity = true;
startActivityForResult(Intent.createChooser(i, null), ACTIVITY_REQUEST_PICK_ATTACHMENT);
}
private void addAttachment(Uri uri) {
addAttachment(uri, null);
}
private void addAttachment(Uri uri, String contentType) {
Attachment attachment = new Attachment();
attachment.state = Attachment.LoadingState.URI_ONLY;
attachment.uri = uri;
attachment.contentType = contentType;
attachment.loaderId = ++mMaxLoaderId;
addAttachmentView(attachment);
initAttachmentInfoLoader(attachment);
}
private void initAttachmentInfoLoader(Attachment attachment) {
LoaderManager loaderManager = getLoaderManager();
Bundle bundle = new Bundle();
bundle.putParcelable(LOADER_ARG_ATTACHMENT, attachment);
loaderManager.initLoader(attachment.loaderId, bundle, mAttachmentInfoLoaderCallback);
}
private void initAttachmentContentLoader(Attachment attachment) {
LoaderManager loaderManager = getLoaderManager();
Bundle bundle = new Bundle();
bundle.putParcelable(LOADER_ARG_ATTACHMENT, attachment);
loaderManager.initLoader(attachment.loaderId, bundle, mAttachmentContentLoaderCallback);
}
private void addAttachmentView(Attachment attachment) {
boolean hasMetadata = (attachment.state != Attachment.LoadingState.URI_ONLY);
boolean isLoadingComplete = (attachment.state == Attachment.LoadingState.COMPLETE);
View view = getLayoutInflater().inflate(R.layout.message_compose_attachment, mAttachments, false);
TextView nameView = (TextView) view.findViewById(R.id.attachment_name);
View progressBar = view.findViewById(R.id.progressBar);
if (hasMetadata) {
nameView.setText(attachment.name);
} else {
nameView.setText(R.string.loading_attachment);
}
progressBar.setVisibility(isLoadingComplete ? View.GONE : View.VISIBLE);
ImageButton delete = (ImageButton) view.findViewById(R.id.attachment_delete);
delete.setOnClickListener(MessageCompose.this);
delete.setTag(view);
view.setTag(attachment);
mAttachments.addView(view);
}
private View getAttachmentView(int loaderId) {
for (int i = 0, childCount = mAttachments.getChildCount(); i < childCount; i++) {
View view = mAttachments.getChildAt(i);
Attachment tag = (Attachment) view.getTag();
if (tag != null && tag.loaderId == loaderId) {
return view;
}
}
return null;
}
private LoaderManager.LoaderCallbacks<Attachment> mAttachmentInfoLoaderCallback =
new LoaderManager.LoaderCallbacks<Attachment>() {
@Override
public Loader<Attachment> onCreateLoader(int id, Bundle args) {
onFetchAttachmentStarted();
Attachment attachment = args.getParcelable(LOADER_ARG_ATTACHMENT);
return new AttachmentInfoLoader(MessageCompose.this, attachment);
}
@Override
public void onLoadFinished(Loader<Attachment> loader, Attachment attachment) {
int loaderId = loader.getId();
View view = getAttachmentView(loaderId);
if (view != null) {
view.setTag(attachment);
TextView nameView = (TextView) view.findViewById(R.id.attachment_name);
nameView.setText(attachment.name);
attachment.loaderId = ++mMaxLoaderId;
initAttachmentContentLoader(attachment);
} else {
onFetchAttachmentFinished();
}
getLoaderManager().destroyLoader(loaderId);
}
@Override
public void onLoaderReset(Loader<Attachment> loader) {
onFetchAttachmentFinished();
}
};
private LoaderManager.LoaderCallbacks<Attachment> mAttachmentContentLoaderCallback =
new LoaderManager.LoaderCallbacks<Attachment>() {
@Override
public Loader<Attachment> onCreateLoader(int id, Bundle args) {
Attachment attachment = args.getParcelable(LOADER_ARG_ATTACHMENT);
return new AttachmentContentLoader(MessageCompose.this, attachment);
}
@Override
public void onLoadFinished(Loader<Attachment> loader, Attachment attachment) {
int loaderId = loader.getId();
View view = getAttachmentView(loaderId);
if (view != null) {
if (attachment.state == Attachment.LoadingState.COMPLETE) {
view.setTag(attachment);
View progressBar = view.findViewById(R.id.progressBar);
progressBar.setVisibility(View.GONE);
} else {
mAttachments.removeView(view);
}
}
onFetchAttachmentFinished();
getLoaderManager().destroyLoader(loaderId);
}
@Override
public void onLoaderReset(Loader<Attachment> loader) {
onFetchAttachmentFinished();
}
};
public OpenPgpApi getOpenPgpApi() {
return new OpenPgpApi(this, mOpenPgpServiceConnection.getService());
}
private void onFetchAttachmentStarted() {
mNumAttachmentsLoading += 1;
}
private void onFetchAttachmentFinished() {
// We're not allowed to perform fragment transactions when called from onLoadFinished().
// So we use the Handler to call performStalledAction().
mHandler.sendEmptyMessage(MSG_PERFORM_STALLED_ACTION);
}
private void performStalledAction() {
mNumAttachmentsLoading -= 1;
WaitingAction waitingFor = mWaitingForAttachments;
mWaitingForAttachments = WaitingAction.NONE;
if (waitingFor != WaitingAction.NONE) {
dismissWaitingForAttachmentDialog();
}
switch (waitingFor) {
case SEND: {
performSendAfterChecks();
break;
}
case SAVE: {
performSaveAfterChecks();
break;
}
case NONE:
break;
}
}
public void showContactPicker(int requestCode) {
requestCode |= REQUEST_MASK_RECIPIENT_PRESENTER;
isInSubActivity = true;
startActivityForResult(mContacts.contactPickerIntent(), requestCode);
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
isInSubActivity = false;
if ((requestCode & REQUEST_MASK_MESSAGE_BUILDER) == REQUEST_MASK_MESSAGE_BUILDER) {
requestCode ^= REQUEST_MASK_MESSAGE_BUILDER;
if (currentMessageBuilder == null) {
Log.e(K9.LOG_TAG, "Got a message builder activity result for no message builder, " +
"this is an illegal state!");
return;
}
currentMessageBuilder.onActivityResult(this, requestCode, resultCode, data);
return;
}
if ((requestCode & REQUEST_MASK_RECIPIENT_PRESENTER) == REQUEST_MASK_RECIPIENT_PRESENTER) {
requestCode ^= REQUEST_MASK_RECIPIENT_PRESENTER;
recipientPresenter.onActivityResult(resultCode, requestCode, data);
return;
}
if (resultCode != RESULT_OK) {
return;
}
if (data == null) {
return;
}
switch (requestCode) {
case ACTIVITY_REQUEST_PICK_ATTACHMENT:
addAttachmentsFromResultIntent(data);
draftNeedsSaving = true;
break;
}
}
@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
private void addAttachmentsFromResultIntent(Intent data) {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
ClipData clipData = data.getClipData();
if (clipData != null) {
for (int i = 0, end = clipData.getItemCount(); i < end; i++) {
Uri uri = clipData.getItemAt(i).getUri();
if (uri != null) {
addAttachment(uri);
}
}
return;
}
}
Uri uri = data.getData();
if (uri != null) {
addAttachment(uri);
}
}
private void onAccountChosen(Account account, Identity identity) {
if (!mAccount.equals(account)) {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Switching account from " + mAccount + " to " + account);
}
// on draft edit, make sure we don't keep previous message UID
if (mAction == Action.EDIT_DRAFT) {
mMessageReference = null;
}
// test whether there is something to save
if (draftNeedsSaving || (mDraftId != INVALID_DRAFT_ID)) {
final long previousDraftId = mDraftId;
final Account previousAccount = mAccount;
// make current message appear as new
mDraftId = INVALID_DRAFT_ID;
// actual account switch
mAccount = account;
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Account switch, saving new draft in new account");
}
checkToSaveDraftImplicitly();
if (previousDraftId != INVALID_DRAFT_ID) {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Account switch, deleting draft from previous account: "
+ previousDraftId);
}
MessagingController.getInstance(getApplication()).deleteDraft(previousAccount,
previousDraftId);
}
} else {
mAccount = account;
}
// Show CC/BCC text input field when switching to an account that always wants them
// displayed.
// Please note that we're not hiding the fields if the user switches back to an account
// that doesn't have this setting checked.
recipientPresenter.onSwitchAccount(mAccount);
// not sure how to handle mFolder, mSourceMessage?
}
switchToIdentity(identity);
}
private void switchToIdentity(Identity identity) {
mIdentity = identity;
mIdentityChanged = true;
draftNeedsSaving = true;
updateFrom();
updateSignature();
updateMessageFormat();
recipientPresenter.onSwitchIdentity(identity);
}
private void updateFrom() {
mChooseIdentityButton.setText(mIdentity.getEmail());
}
private void updateSignature() {
if (mIdentity.getSignatureUse()) {
mSignatureView.setCharacters(mIdentity.getSignature());
mSignatureView.setVisibility(View.VISIBLE);
} else {
mSignatureView.setVisibility(View.GONE);
}
}
@Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.attachment_delete:
/*
* The view is the delete button, and we have previously set the tag of
* the delete button to the view that owns it. We don't use parent because the
* view is very complex and could change in the future.
*/
mAttachments.removeView((View) view.getTag());
draftNeedsSaving = true;
break;
case R.id.quoted_text_show:
showOrHideQuotedText(QuotedTextMode.SHOW);
updateMessageFormat();
draftNeedsSaving = true;
break;
case R.id.quoted_text_delete:
showOrHideQuotedText(QuotedTextMode.HIDE);
updateMessageFormat();
draftNeedsSaving = true;
break;
case R.id.quoted_text_edit:
mForcePlainText = true;
if (mMessageReference != null) { // shouldn't happen...
// TODO - Should we check if mSourceMessageBody is already present and bypass the MessagingController call?
MessagingController.getInstance(getApplication()).addListener(mListener);
final Account account = Preferences.getPreferences(this).getAccount(mMessageReference.getAccountUuid());
final String folderName = mMessageReference.getFolderName();
final String sourceMessageUid = mMessageReference.getUid();
MessagingController.getInstance(getApplication()).loadMessageForView(account, folderName, sourceMessageUid, null);
}
break;
case R.id.identity:
showDialog(DIALOG_CHOOSE_IDENTITY);
break;
}
}
/**
* Show or hide the quoted text.
*
* @param mode
* The value to set {@link #mQuotedTextMode} to.
*/
private void showOrHideQuotedText(QuotedTextMode mode) {
mQuotedTextMode = mode;
switch (mode) {
case NONE:
case HIDE: {
if (mode == QuotedTextMode.NONE) {
mQuotedTextShow.setVisibility(View.GONE);
} else {
mQuotedTextShow.setVisibility(View.VISIBLE);
}
mQuotedTextBar.setVisibility(View.GONE);
mQuotedText.setVisibility(View.GONE);
mQuotedHTML.setVisibility(View.GONE);
mQuotedTextEdit.setVisibility(View.GONE);
break;
}
case SHOW: {
mQuotedTextShow.setVisibility(View.GONE);
mQuotedTextBar.setVisibility(View.VISIBLE);
if (mQuotedTextFormat == SimpleMessageFormat.HTML) {
mQuotedText.setVisibility(View.GONE);
mQuotedHTML.setVisibility(View.VISIBLE);
mQuotedTextEdit.setVisibility(View.VISIBLE);
} else {
mQuotedText.setVisibility(View.VISIBLE);
mQuotedHTML.setVisibility(View.GONE);
mQuotedTextEdit.setVisibility(View.GONE);
}
break;
}
}
}
private void askBeforeDiscard(){
if (K9.confirmDiscardMessage()) {
showDialog(DIALOG_CONFIRM_DISCARD);
} else {
onDiscard();
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
switch (item.getItemId()) {
case R.id.send:
checkToSendMessage();
break;
case R.id.save:
checkToSaveDraftAndSave();
break;
case R.id.discard:
askBeforeDiscard();
break;
case R.id.add_from_contacts:
recipientPresenter.onMenuAddFromContacts();
break;
case R.id.add_attachment:
onAddAttachment();
break;
case R.id.read_receipt:
onReadReceipt();
break;
default:
return super.onOptionsItemSelected(item);
}
return true;
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
getMenuInflater().inflate(R.menu.message_compose_option, menu);
// Disable the 'Save' menu option if Drafts folder is set to -NONE-
if (!mAccount.hasDraftsFolder()) {
menu.findItem(R.id.save).setEnabled(false);
}
return true;
}
@Override
public boolean onPrepareOptionsMenu(Menu menu) {
super.onPrepareOptionsMenu(menu);
recipientPresenter.onPrepareOptionsMenu(menu);
return true;
}
@Override
public void onBackPressed() {
if (draftNeedsSaving) {
if (!mAccount.hasDraftsFolder()) {
showDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
} else {
showDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
}
} else {
// Check if editing an existing draft.
if (mDraftId == INVALID_DRAFT_ID) {
onDiscard();
} else {
super.onBackPressed();
}
}
}
private void showWaitingForAttachmentDialog() {
String title;
switch (mWaitingForAttachments) {
case SEND: {
title = getString(R.string.fetching_attachment_dialog_title_send);
break;
}
case SAVE: {
title = getString(R.string.fetching_attachment_dialog_title_save);
break;
}
default: {
return;
}
}
ProgressDialogFragment fragment = ProgressDialogFragment.newInstance(title,
getString(R.string.fetching_attachment_dialog_message));
fragment.show(getFragmentManager(), FRAGMENT_WAITING_FOR_ATTACHMENT);
}
public void onCancel(ProgressDialogFragment fragment) {
attachmentProgressDialogCancelled();
}
void attachmentProgressDialogCancelled() {
mWaitingForAttachments = WaitingAction.NONE;
}
private void dismissWaitingForAttachmentDialog() {
ProgressDialogFragment fragment = (ProgressDialogFragment)
getFragmentManager().findFragmentByTag(FRAGMENT_WAITING_FOR_ATTACHMENT);
if (fragment != null) {
fragment.dismiss();
}
}
@Override
public Dialog onCreateDialog(int id) {
switch (id) {
case DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE:
return new AlertDialog.Builder(this)
.setTitle(R.string.save_or_discard_draft_message_dlg_title)
.setMessage(R.string.save_or_discard_draft_message_instructions_fmt)
.setPositiveButton(R.string.save_draft_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
checkToSaveDraftAndSave();
}
})
.setNegativeButton(R.string.discard_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE);
onDiscard();
}
})
.create();
case DIALOG_CONFIRM_DISCARD_ON_BACK:
return new AlertDialog.Builder(this)
.setTitle(R.string.confirm_discard_draft_message_title)
.setMessage(R.string.confirm_discard_draft_message)
.setPositiveButton(R.string.cancel_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
}
})
.setNegativeButton(R.string.discard_action, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int whichButton) {
dismissDialog(DIALOG_CONFIRM_DISCARD_ON_BACK);
Toast.makeText(MessageCompose.this,
getString(R.string.message_discarded_toast),
Toast.LENGTH_LONG).show();
onDiscard();
}
})
.create();
case DIALOG_CHOOSE_IDENTITY:
Context context = new ContextThemeWrapper(this,
(K9.getK9Theme() == K9.Theme.LIGHT) ?
R.style.Theme_K9_Dialog_Light :
R.style.Theme_K9_Dialog_Dark);
Builder builder = new AlertDialog.Builder(context);
builder.setTitle(R.string.send_as);
final IdentityAdapter adapter = new IdentityAdapter(context);
builder.setAdapter(adapter, new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialog, int which) {
IdentityContainer container = (IdentityContainer) adapter.getItem(which);
onAccountChosen(container.account, container.identity);
}
});
return builder.create();
case DIALOG_CONFIRM_DISCARD: {
return new AlertDialog.Builder(this)
.setTitle(R.string.dialog_confirm_delete_title)
.setMessage(R.string.dialog_confirm_delete_message)
.setPositiveButton(R.string.dialog_confirm_delete_confirm_button,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
onDiscard();
}
})
.setNegativeButton(R.string.dialog_confirm_delete_cancel_button,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
}
})
.create();
}
}
return super.onCreateDialog(id);
}
/**
* Add all attachments of an existing message as if they were added by hand.
*
* @param part
* The message part to check for being an attachment. This method will recurse if it's
* a multipart part.
* @param depth
* The recursion depth. Currently unused.
*
* @return {@code true} if all attachments were able to be attached, {@code false} otherwise.
*
* @throws MessagingException
* In case of an error
*/
private boolean loadAttachments(Part part, int depth) throws MessagingException {
if (part.getBody() instanceof Multipart) {
Multipart mp = (Multipart) part.getBody();
boolean ret = true;
for (int i = 0, count = mp.getCount(); i < count; i++) {
if (!loadAttachments(mp.getBodyPart(i), depth + 1)) {
ret = false;
}
}
return ret;
}
String contentType = MimeUtility.unfoldAndDecode(part.getContentType());
String name = MimeUtility.getHeaderParameter(contentType, "name");
if (name != null) {
if (part instanceof LocalBodyPart) {
LocalBodyPart localBodyPart = (LocalBodyPart) part;
String accountUuid = localBodyPart.getAccountUuid();
long attachmentId = localBodyPart.getId();
Uri uri = AttachmentProvider.getAttachmentUri(accountUuid, attachmentId);
addAttachment(uri);
return true;
}
return false;
}
return true;
}
/**
* Pull out the parts of the now loaded source message and apply them to the new message
* depending on the type of message being composed.
*
* @param message
* The source message used to populate the various text fields.
*/
private void processSourceMessage(LocalMessage message) {
try {
switch (mAction) {
case REPLY:
case REPLY_ALL: {
processMessageToReplyTo(message);
break;
}
case FORWARD: {
processMessageToForward(message);
break;
}
case EDIT_DRAFT: {
processDraftMessage(message);
break;
}
default: {
Log.w(K9.LOG_TAG, "processSourceMessage() called with unsupported action");
break;
}
}
} catch (MessagingException me) {
/**
* Let the user continue composing their message even if we have a problem processing
* the source message. Log it as an error, though.
*/
Log.e(K9.LOG_TAG, "Error while processing source message: ", me);
} finally {
mSourceMessageProcessed = true;
draftNeedsSaving = false;
}
updateMessageFormat();
}
private void processMessageToReplyTo(Message message) throws MessagingException {
if (message.getSubject() != null) {
final String subject = PREFIX.matcher(message.getSubject()).replaceFirst("");
if (!subject.toLowerCase(Locale.US).startsWith("re:")) {
mSubjectView.setText("Re: " + subject);
} else {
mSubjectView.setText(subject);
}
} else {
mSubjectView.setText("");
}
/*
* If a reply-to was included with the message use that, otherwise use the from
* or sender address.
*/
recipientPresenter.initFromReplyToMessage(message);
if (message.getMessageId() != null && message.getMessageId().length() > 0) {
mInReplyTo = message.getMessageId();
String[] refs = message.getReferences();
if (refs != null && refs.length > 0) {
mReferences = TextUtils.join("", refs) + " " + mInReplyTo;
} else {
mReferences = mInReplyTo;
}
} else {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "could not get Message-ID.");
}
}
// Quote the message and setup the UI.
populateUIWithQuotedMessage(mAccount.isDefaultQuotedTextShown());
if (mAction == Action.REPLY || mAction == Action.REPLY_ALL) {
Identity useIdentity = IdentityHelper.getRecipientIdentityFromMessage(mAccount, message);
Identity defaultIdentity = mAccount.getIdentity(0);
if (useIdentity != defaultIdentity) {
switchToIdentity(useIdentity);
}
}
}
private void processMessageToForward(Message message) throws MessagingException {
String subject = message.getSubject();
if (subject != null && !subject.toLowerCase(Locale.US).startsWith("fwd:")) {
mSubjectView.setText("Fwd: " + subject);
} else {
mSubjectView.setText(subject);
}
mQuoteStyle = QuoteStyle.HEADER;
// "Be Like Thunderbird" - on forwarded messages, set the message ID
// of the forwarded message in the references and the reply to. TB
// only includes ID of the message being forwarded in the reference,
// even if there are multiple references.
if (!TextUtils.isEmpty(message.getMessageId())) {
mInReplyTo = message.getMessageId();
mReferences = mInReplyTo;
} else {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "could not get Message-ID.");
}
}
// Quote the message and setup the UI.
populateUIWithQuotedMessage(true);
if (!mSourceMessageProcessed) {
if (message.isSet(Flag.X_DOWNLOADED_PARTIAL) || !loadAttachments(message, 0)) {
mHandler.sendEmptyMessage(MSG_SKIPPED_ATTACHMENTS);
}
}
}
private void processDraftMessage(LocalMessage message) throws MessagingException {
String showQuotedTextMode = "NONE";
mDraftId = MessagingController.getInstance(getApplication()).getId(message);
mSubjectView.setText(message.getSubject());
recipientPresenter.initFromDraftMessage(message);
// Read In-Reply-To header from draft
final String[] inReplyTo = message.getHeader("In-Reply-To");
if (inReplyTo.length >= 1) {
mInReplyTo = inReplyTo[0];
}
// Read References header from draft
final String[] references = message.getHeader("References");
if (references.length >= 1) {
mReferences = references[0];
}
if (!mSourceMessageProcessed) {
loadAttachments(message, 0);
}
// Decode the identity header when loading a draft.
// See buildIdentityHeader(TextBody) for a detailed description of the composition of this blob.
Map<IdentityField, String> k9identity = new HashMap<>();
String[] identityHeaders = message.getHeader(K9.IDENTITY_HEADER);
if (identityHeaders.length > 0 && identityHeaders[0] != null) {
k9identity = IdentityHeaderParser.parse(identityHeaders[0]);
}
Identity newIdentity = new Identity();
if (k9identity.containsKey(IdentityField.SIGNATURE)) {
newIdentity.setSignatureUse(true);
newIdentity.setSignature(k9identity.get(IdentityField.SIGNATURE));
mSignatureChanged = true;
} else {
newIdentity.setSignatureUse(message.getFolder().getSignatureUse());
newIdentity.setSignature(mIdentity.getSignature());
}
if (k9identity.containsKey(IdentityField.NAME)) {
newIdentity.setName(k9identity.get(IdentityField.NAME));
mIdentityChanged = true;
} else {
newIdentity.setName(mIdentity.getName());
}
if (k9identity.containsKey(IdentityField.EMAIL)) {
newIdentity.setEmail(k9identity.get(IdentityField.EMAIL));
mIdentityChanged = true;
} else {
newIdentity.setEmail(mIdentity.getEmail());
}
if (k9identity.containsKey(IdentityField.ORIGINAL_MESSAGE)) {
mMessageReference = null;
try {
String originalMessage = k9identity.get(IdentityField.ORIGINAL_MESSAGE);
MessageReference messageReference = new MessageReference(originalMessage);
// Check if this is a valid account in our database
Preferences prefs = Preferences.getPreferences(getApplicationContext());
Account account = prefs.getAccount(messageReference.getAccountUuid());
if (account != null) {
mMessageReference = messageReference;
}
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Could not decode message reference in identity.", e);
}
}
int cursorPosition = 0;
if (k9identity.containsKey(IdentityField.CURSOR_POSITION)) {
try {
cursorPosition = Integer.parseInt(k9identity.get(IdentityField.CURSOR_POSITION));
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Could not parse cursor position for MessageCompose; continuing.", e);
}
}
if (k9identity.containsKey(IdentityField.QUOTED_TEXT_MODE)) {
showQuotedTextMode = k9identity.get(IdentityField.QUOTED_TEXT_MODE);
}
mIdentity = newIdentity;
updateSignature();
updateFrom();
Integer bodyLength = k9identity.get(IdentityField.LENGTH) != null
? Integer.valueOf(k9identity.get(IdentityField.LENGTH))
: 0;
Integer bodyOffset = k9identity.get(IdentityField.OFFSET) != null
? Integer.valueOf(k9identity.get(IdentityField.OFFSET))
: 0;
Integer bodyFooterOffset = k9identity.get(IdentityField.FOOTER_OFFSET) != null
? Integer.valueOf(k9identity.get(IdentityField.FOOTER_OFFSET))
: null;
Integer bodyPlainLength = k9identity.get(IdentityField.PLAIN_LENGTH) != null
? Integer.valueOf(k9identity.get(IdentityField.PLAIN_LENGTH))
: null;
Integer bodyPlainOffset = k9identity.get(IdentityField.PLAIN_OFFSET) != null
? Integer.valueOf(k9identity.get(IdentityField.PLAIN_OFFSET))
: null;
mQuoteStyle = k9identity.get(IdentityField.QUOTE_STYLE) != null
? QuoteStyle.valueOf(k9identity.get(IdentityField.QUOTE_STYLE))
: mAccount.getQuoteStyle();
QuotedTextMode quotedMode;
try {
quotedMode = QuotedTextMode.valueOf(showQuotedTextMode);
} catch (Exception e) {
quotedMode = QuotedTextMode.NONE;
}
// Always respect the user's current composition format preference, even if the
// draft was saved in a different format.
// TODO - The current implementation doesn't allow a user in HTML mode to edit a draft that wasn't saved with K9mail.
String messageFormatString = k9identity.get(IdentityField.MESSAGE_FORMAT);
MessageFormat messageFormat = null;
if (messageFormatString != null) {
try {
messageFormat = MessageFormat.valueOf(messageFormatString);
} catch (Exception e) { /* do nothing */ }
}
if (messageFormat == null) {
// This message probably wasn't created by us. The exception is legacy
// drafts created before the advent of HTML composition. In those cases,
// we'll display the whole message (including the quoted part) in the
// composition window. If that's the case, try and convert it to text to
// match the behavior in text mode.
mMessageContentView.setCharacters(getBodyTextFromMessage(message, SimpleMessageFormat.TEXT));
mForcePlainText = true;
showOrHideQuotedText(quotedMode);
return;
}
if (messageFormat == MessageFormat.HTML) {
Part part = MimeUtility.findFirstPartByMimeType(message, "text/html");
if (part != null) { // Shouldn't happen if we were the one who saved it.
mQuotedTextFormat = SimpleMessageFormat.HTML;
String text = MessageExtractor.getTextFromPart(part);
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Loading message with offset " + bodyOffset + ", length " + bodyLength + ". Text length is " + text.length() + ".");
}
if (bodyOffset + bodyLength > text.length()) {
// The draft was edited outside of K-9 Mail?
Log.d(K9.LOG_TAG, "The identity field from the draft contains an invalid LENGTH/OFFSET");
bodyOffset = 0;
bodyLength = 0;
}
// Grab our reply text.
String bodyText = text.substring(bodyOffset, bodyOffset + bodyLength);
mMessageContentView.setCharacters(HtmlConverter.htmlToText(bodyText));
// Regenerate the quoted html without our user content in it.
StringBuilder quotedHTML = new StringBuilder();
quotedHTML.append(text.substring(0, bodyOffset)); // stuff before the reply
quotedHTML.append(text.substring(bodyOffset + bodyLength));
if (quotedHTML.length() > 0) {
mQuotedHtmlContent = new InsertableHtmlContent();
mQuotedHtmlContent.setQuotedContent(quotedHTML);
// We don't know if bodyOffset refers to the header or to the footer
mQuotedHtmlContent.setHeaderInsertionPoint(bodyOffset);
if (bodyFooterOffset != null) {
mQuotedHtmlContent.setFooterInsertionPoint(bodyFooterOffset);
} else {
mQuotedHtmlContent.setFooterInsertionPoint(bodyOffset);
}
mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent());
}
}
if (bodyPlainOffset != null && bodyPlainLength != null) {
processSourceMessageText(message, bodyPlainOffset, bodyPlainLength, false);
}
} else if (messageFormat == MessageFormat.TEXT) {
mQuotedTextFormat = SimpleMessageFormat.TEXT;
processSourceMessageText(message, bodyOffset, bodyLength, true);
} else {
Log.e(K9.LOG_TAG, "Unhandled message format.");
}
// Set the cursor position if we have it.
try {
mMessageContentView.setSelection(cursorPosition);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Could not set cursor position in MessageCompose; ignoring.", e);
}
showOrHideQuotedText(quotedMode);
}
/**
* Pull out the parts of the now loaded source message and apply them to the new message
* depending on the type of message being composed.
* @param message Source message
* @param bodyOffset Insertion point for reply.
* @param bodyLength Length of reply.
* @param viewMessageContent Update mMessageContentView or not.
* @throws MessagingException
*/
private void processSourceMessageText(Message message, Integer bodyOffset, Integer bodyLength,
boolean viewMessageContent) throws MessagingException {
Part textPart = MimeUtility.findFirstPartByMimeType(message, "text/plain");
if (textPart != null) {
String text = MessageExtractor.getTextFromPart(textPart);
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Loading message with offset " + bodyOffset + ", length " + bodyLength + ". Text length is " + text.length() + ".");
}
// If we had a body length (and it was valid), separate the composition from the quoted text
// and put them in their respective places in the UI.
if (bodyLength > 0) {
try {
String bodyText = text.substring(bodyOffset, bodyOffset + bodyLength);
// Regenerate the quoted text without our user content in it nor added newlines.
StringBuilder quotedText = new StringBuilder();
if (bodyOffset == 0 && text.substring(bodyLength, bodyLength + 4).equals("\r\n\r\n")) {
// top-posting: ignore two newlines at start of quote
quotedText.append(text.substring(bodyLength + 4));
} else if (bodyOffset + bodyLength == text.length() &&
text.substring(bodyOffset - 2, bodyOffset).equals("\r\n")) {
// bottom-posting: ignore newline at end of quote
quotedText.append(text.substring(0, bodyOffset - 2));
} else {
quotedText.append(text.substring(0, bodyOffset)); // stuff before the reply
quotedText.append(text.substring(bodyOffset + bodyLength));
}
if (viewMessageContent) {
mMessageContentView.setCharacters(bodyText);
}
mQuotedText.setCharacters(quotedText);
} catch (IndexOutOfBoundsException e) {
// Invalid bodyOffset or bodyLength. The draft was edited outside of K-9 Mail?
Log.d(K9.LOG_TAG, "The identity field from the draft contains an invalid bodyOffset/bodyLength");
if (viewMessageContent) {
mMessageContentView.setCharacters(text);
}
}
} else {
if (viewMessageContent) {
mMessageContentView.setCharacters(text);
}
}
}
}
// Regexes to check for signature.
private static final Pattern DASH_SIGNATURE_PLAIN = Pattern.compile("\r\n-- \r\n.*", Pattern.DOTALL);
private static final Pattern DASH_SIGNATURE_HTML = Pattern.compile("(<br( /)?>|\r?\n)-- <br( /)?>", Pattern.CASE_INSENSITIVE);
private static final Pattern BLOCKQUOTE_START = Pattern.compile("<blockquote", Pattern.CASE_INSENSITIVE);
private static final Pattern BLOCKQUOTE_END = Pattern.compile("</blockquote>", Pattern.CASE_INSENSITIVE);
/**
* Build and populate the UI with the quoted message.
*
* @param showQuotedText
* {@code true} if the quoted text should be shown, {@code false} otherwise.
*
* @throws MessagingException
*/
private void populateUIWithQuotedMessage(boolean showQuotedText) throws MessagingException {
MessageFormat origMessageFormat = mAccount.getMessageFormat();
if (mForcePlainText || origMessageFormat == MessageFormat.TEXT) {
// Use plain text for the quoted message
mQuotedTextFormat = SimpleMessageFormat.TEXT;
} else if (origMessageFormat == MessageFormat.AUTO) {
// Figure out which message format to use for the quoted text by looking if the source
// message contains a text/html part. If it does, we use that.
mQuotedTextFormat =
(MimeUtility.findFirstPartByMimeType(mSourceMessage, "text/html") == null) ?
SimpleMessageFormat.TEXT : SimpleMessageFormat.HTML;
} else {
mQuotedTextFormat = SimpleMessageFormat.HTML;
}
// TODO -- I am assuming that mSourceMessageBody will always be a text part. Is this a safe assumption?
// Handle the original message in the reply
// If we already have mSourceMessageBody, use that. It's pre-populated if we've got crypto going on.
String content = (mSourceMessageBody != null) ?
mSourceMessageBody :
getBodyTextFromMessage(mSourceMessage, mQuotedTextFormat);
if (mQuotedTextFormat == SimpleMessageFormat.HTML) {
// Strip signature.
// closing tags such as </div>, </span>, </table>, </pre> will be cut off.
if (mAccount.isStripSignature() &&
(mAction == Action.REPLY || mAction == Action.REPLY_ALL)) {
Matcher dashSignatureHtml = DASH_SIGNATURE_HTML.matcher(content);
if (dashSignatureHtml.find()) {
Matcher blockquoteStart = BLOCKQUOTE_START.matcher(content);
Matcher blockquoteEnd = BLOCKQUOTE_END.matcher(content);
List<Integer> start = new ArrayList<>();
List<Integer> end = new ArrayList<>();
while (blockquoteStart.find()) {
start.add(blockquoteStart.start());
}
while (blockquoteEnd.find()) {
end.add(blockquoteEnd.start());
}
if (start.size() != end.size()) {
Log.d(K9.LOG_TAG, "There are " + start.size() + " <blockquote> tags, but " +
end.size() + " </blockquote> tags. Refusing to strip.");
} else if (start.size() > 0) {
// Ignore quoted signatures in blockquotes.
dashSignatureHtml.region(0, start.get(0));
if (dashSignatureHtml.find()) {
// before first <blockquote>.
content = content.substring(0, dashSignatureHtml.start());
} else {
for (int i = 0; i < start.size() - 1; i++) {
// within blockquotes.
if (end.get(i) < start.get(i + 1)) {
dashSignatureHtml.region(end.get(i), start.get(i + 1));
if (dashSignatureHtml.find()) {
content = content.substring(0, dashSignatureHtml.start());
break;
}
}
}
if (end.get(end.size() - 1) < content.length()) {
// after last </blockquote>.
dashSignatureHtml.region(end.get(end.size() - 1), content.length());
if (dashSignatureHtml.find()) {
content = content.substring(0, dashSignatureHtml.start());
}
}
}
} else {
// No blockquotes found.
content = content.substring(0, dashSignatureHtml.start());
}
}
// Fix the stripping off of closing tags if a signature was stripped,
// as well as clean up the HTML of the quoted message.
HtmlCleaner cleaner = new HtmlCleaner();
CleanerProperties properties = cleaner.getProperties();
// see http://htmlcleaner.sourceforge.net/parameters.php for descriptions
properties.setNamespacesAware(false);
properties.setAdvancedXmlEscape(false);
properties.setOmitXmlDeclaration(true);
properties.setOmitDoctypeDeclaration(false);
properties.setTranslateSpecialEntities(false);
properties.setRecognizeUnicodeChars(false);
TagNode node = cleaner.clean(content);
SimpleHtmlSerializer htmlSerialized = new SimpleHtmlSerializer(properties);
content = htmlSerialized.getAsString(node, "UTF8");
}
// Add the HTML reply header to the top of the content.
mQuotedHtmlContent = quoteOriginalHtmlMessage(mSourceMessage, content, mQuoteStyle);
// Load the message with the reply header.
mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent());
// TODO: Also strip the signature from the text/plain part
mQuotedText.setCharacters(quoteOriginalTextMessage(mSourceMessage,
getBodyTextFromMessage(mSourceMessage, SimpleMessageFormat.TEXT), mQuoteStyle));
} else if (mQuotedTextFormat == SimpleMessageFormat.TEXT) {
if (mAccount.isStripSignature() &&
(mAction == Action.REPLY || mAction == Action.REPLY_ALL)) {
if (DASH_SIGNATURE_PLAIN.matcher(content).find()) {
content = DASH_SIGNATURE_PLAIN.matcher(content).replaceFirst("\r\n");
}
}
mQuotedText.setCharacters(quoteOriginalTextMessage(mSourceMessage, content, mQuoteStyle));
}
if (showQuotedText) {
showOrHideQuotedText(QuotedTextMode.SHOW);
} else {
showOrHideQuotedText(QuotedTextMode.HIDE);
}
}
/**
* Fetch the body text from a message in the desired message format. This method handles
* conversions between formats (html to text and vice versa) if necessary.
* @param message Message to analyze for body part.
* @param format Desired format.
* @return Text in desired format.
* @throws MessagingException
*/
private String getBodyTextFromMessage(final Message message, final SimpleMessageFormat format)
throws MessagingException {
Part part;
if (format == SimpleMessageFormat.HTML) {
// HTML takes precedence, then text.
part = MimeUtility.findFirstPartByMimeType(message, "text/html");
if (part != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "getBodyTextFromMessage: HTML requested, HTML found.");
}
return MessageExtractor.getTextFromPart(part);
}
part = MimeUtility.findFirstPartByMimeType(message, "text/plain");
if (part != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "getBodyTextFromMessage: HTML requested, text found.");
}
String text = MessageExtractor.getTextFromPart(part);
return HtmlConverter.textToHtml(text);
}
} else if (format == SimpleMessageFormat.TEXT) {
// Text takes precedence, then html.
part = MimeUtility.findFirstPartByMimeType(message, "text/plain");
if (part != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "getBodyTextFromMessage: Text requested, text found.");
}
return MessageExtractor.getTextFromPart(part);
}
part = MimeUtility.findFirstPartByMimeType(message, "text/html");
if (part != null) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "getBodyTextFromMessage: Text requested, HTML found.");
}
String text = MessageExtractor.getTextFromPart(part);
return HtmlConverter.htmlToText(text);
}
}
// If we had nothing interesting, return an empty string.
return "";
}
// Regular expressions to look for various HTML tags. This is no HTML::Parser, but hopefully it's good enough for
// our purposes.
private static final Pattern FIND_INSERTION_POINT_HTML = Pattern.compile("(?si:.*?(<html(?:>|\\s+[^>]*>)).*)");
private static final Pattern FIND_INSERTION_POINT_HEAD = Pattern.compile("(?si:.*?(<head(?:>|\\s+[^>]*>)).*)");
private static final Pattern FIND_INSERTION_POINT_BODY = Pattern.compile("(?si:.*?(<body(?:>|\\s+[^>]*>)).*)");
private static final Pattern FIND_INSERTION_POINT_HTML_END = Pattern.compile("(?si:.*(</html>).*?)");
private static final Pattern FIND_INSERTION_POINT_BODY_END = Pattern.compile("(?si:.*(</body>).*?)");
// The first group in a Matcher contains the first capture group. We capture the tag found in the above REs so that
// we can locate the *end* of that tag.
private static final int FIND_INSERTION_POINT_FIRST_GROUP = 1;
// HTML bits to insert as appropriate
// TODO is it safe to assume utf-8 here?
private static final String FIND_INSERTION_POINT_HTML_CONTENT = "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\r\n<html>";
private static final String FIND_INSERTION_POINT_HTML_END_CONTENT = "</html>";
private static final String FIND_INSERTION_POINT_HEAD_CONTENT = "<head><meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"></head>";
// Index of the start of the beginning of a String.
private static final int FIND_INSERTION_POINT_START_OF_STRING = 0;
/**
* <p>Find the start and end positions of the HTML in the string. This should be the very top
* and bottom of the displayable message. It returns a {@link InsertableHtmlContent}, which
* contains both the insertion points and potentially modified HTML. The modified HTML should be
* used in place of the HTML in the original message.</p>
*
* <p>This method loosely mimics the HTML forward/reply behavior of BlackBerry OS 4.5/BIS 2.5, which in turn mimics
* Outlook 2003 (as best I can tell).</p>
*
* @param content Content to examine for HTML insertion points
* @return Insertion points and HTML to use for insertion.
*/
private InsertableHtmlContent findInsertionPoints(final String content) {
InsertableHtmlContent insertable = new InsertableHtmlContent();
// If there is no content, don't bother doing any of the regex dancing.
if (content == null || content.equals("")) {
return insertable;
}
// Search for opening tags.
boolean hasHtmlTag = false;
boolean hasHeadTag = false;
boolean hasBodyTag = false;
// First see if we have an opening HTML tag. If we don't find one, we'll add one later.
Matcher htmlMatcher = FIND_INSERTION_POINT_HTML.matcher(content);
if (htmlMatcher.matches()) {
hasHtmlTag = true;
}
// Look for a HEAD tag. If we're missing a BODY tag, we'll use the close of the HEAD to start our content.
Matcher headMatcher = FIND_INSERTION_POINT_HEAD.matcher(content);
if (headMatcher.matches()) {
hasHeadTag = true;
}
// Look for a BODY tag. This is the ideal place for us to start our content.
Matcher bodyMatcher = FIND_INSERTION_POINT_BODY.matcher(content);
if (bodyMatcher.matches()) {
hasBodyTag = true;
}
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Open: hasHtmlTag:" + hasHtmlTag + " hasHeadTag:" + hasHeadTag + " hasBodyTag:" + hasBodyTag);
}
// Given our inspections, let's figure out where to start our content.
// This is the ideal case -- there's a BODY tag and we insert ourselves just after it.
if (hasBodyTag) {
insertable.setQuotedContent(new StringBuilder(content));
insertable.setHeaderInsertionPoint(bodyMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP));
} else if (hasHeadTag) {
// Now search for a HEAD tag. We can insert after there.
// If BlackBerry sees a HEAD tag, it inserts right after that, so long as there is no BODY tag. It doesn't
// try to add BODY, either. Right or wrong, it seems to work fine.
insertable.setQuotedContent(new StringBuilder(content));
insertable.setHeaderInsertionPoint(headMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP));
} else if (hasHtmlTag) {
// Lastly, check for an HTML tag.
// In this case, it will add a HEAD, but no BODY.
StringBuilder newContent = new StringBuilder(content);
// Insert the HEAD content just after the HTML tag.
newContent.insert(htmlMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP), FIND_INSERTION_POINT_HEAD_CONTENT);
insertable.setQuotedContent(newContent);
// The new insertion point is the end of the HTML tag, plus the length of the HEAD content.
insertable.setHeaderInsertionPoint(htmlMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP) + FIND_INSERTION_POINT_HEAD_CONTENT.length());
} else {
// If we have none of the above, we probably have a fragment of HTML. Yahoo! and Gmail both do this.
// Again, we add a HEAD, but not BODY.
StringBuilder newContent = new StringBuilder(content);
// Add the HTML and HEAD tags.
newContent.insert(FIND_INSERTION_POINT_START_OF_STRING, FIND_INSERTION_POINT_HEAD_CONTENT);
newContent.insert(FIND_INSERTION_POINT_START_OF_STRING, FIND_INSERTION_POINT_HTML_CONTENT);
// Append the </HTML> tag.
newContent.append(FIND_INSERTION_POINT_HTML_END_CONTENT);
insertable.setQuotedContent(newContent);
insertable.setHeaderInsertionPoint(FIND_INSERTION_POINT_HTML_CONTENT.length() + FIND_INSERTION_POINT_HEAD_CONTENT.length());
}
// Search for closing tags. We have to do this after we deal with opening tags since it may
// have modified the message.
boolean hasHtmlEndTag = false;
boolean hasBodyEndTag = false;
// First see if we have an opening HTML tag. If we don't find one, we'll add one later.
Matcher htmlEndMatcher = FIND_INSERTION_POINT_HTML_END.matcher(insertable.getQuotedContent());
if (htmlEndMatcher.matches()) {
hasHtmlEndTag = true;
}
// Look for a BODY tag. This is the ideal place for us to place our footer.
Matcher bodyEndMatcher = FIND_INSERTION_POINT_BODY_END.matcher(insertable.getQuotedContent());
if (bodyEndMatcher.matches()) {
hasBodyEndTag = true;
}
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Close: hasHtmlEndTag:" + hasHtmlEndTag + " hasBodyEndTag:" + hasBodyEndTag);
}
// Now figure out where to put our footer.
// This is the ideal case -- there's a BODY tag and we insert ourselves just before it.
if (hasBodyEndTag) {
insertable.setFooterInsertionPoint(bodyEndMatcher.start(FIND_INSERTION_POINT_FIRST_GROUP));
} else if (hasHtmlEndTag) {
// Check for an HTML tag. Add ourselves just before it.
insertable.setFooterInsertionPoint(htmlEndMatcher.start(FIND_INSERTION_POINT_FIRST_GROUP));
} else {
// If we have none of the above, we probably have a fragment of HTML.
// Set our footer insertion point as the end of the string.
insertable.setFooterInsertionPoint(insertable.getQuotedContent().length());
}
return insertable;
}
static class SendMessageTask extends AsyncTask<Void, Void, Void> {
Context context;
Account account;
Contacts contacts;
Message message;
Long draftId;
SendMessageTask(Context context, Account account, Contacts contacts, Message message, Long draftId) {
this.context = context;
this.account = account;
this.contacts = contacts;
this.message = message;
this.draftId = draftId;
}
@Override
protected Void doInBackground(Void... params) {
try {
contacts.markAsContacted(message.getRecipients(RecipientType.TO));
contacts.markAsContacted(message.getRecipients(RecipientType.CC));
contacts.markAsContacted(message.getRecipients(RecipientType.BCC));
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Failed to mark contact as contacted.", e);
}
MessagingController.getInstance(context).sendMessage(account, message, null);
if (draftId != null) {
// TODO set draft id to invalid in MessageCompose!
MessagingController.getInstance(context).deleteDraft(account, draftId);
}
return null;
}
}
class Listener extends MessagingListener {
@Override
public void loadMessageForViewStarted(Account account, String folder, String uid) {
if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) {
return;
}
mHandler.sendEmptyMessage(MSG_PROGRESS_ON);
}
@Override
public void loadMessageForViewFinished(Account account, String folder, String uid, LocalMessage message) {
if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) {
return;
}
mHandler.sendEmptyMessage(MSG_PROGRESS_OFF);
}
@Override
public void loadMessageForViewBodyAvailable(Account account, String folder, String uid, final Message message) {
if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) {
return;
}
mSourceMessage = message;
runOnUiThread(new Runnable() {
@Override
public void run() {
loadLocalMessageForDisplay((LocalMessage) message);
}
});
}
@Override
public void loadMessageForViewFailed(Account account, String folder, String uid, Throwable t) {
if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) {
return;
}
mHandler.sendEmptyMessage(MSG_PROGRESS_OFF);
// TODO show network error
}
@Override
public void messageUidChanged(Account account, String folder, String oldUid, String newUid) {
// Track UID changes of the source message
if (mMessageReference != null) {
final Account sourceAccount = Preferences.getPreferences(MessageCompose.this).getAccount(mMessageReference.getAccountUuid());
final String sourceFolder = mMessageReference.getFolderName();
final String sourceMessageUid = mMessageReference.getUid();
if (account.equals(sourceAccount) && (folder.equals(sourceFolder))) {
if (oldUid.equals(sourceMessageUid)) {
mMessageReference = mMessageReference.withModifiedUid(newUid);
}
if ((mSourceMessage != null) && (oldUid.equals(mSourceMessage.getUid()))) {
mSourceMessage.setUid(newUid);
}
}
}
}
}
private void loadLocalMessageForDisplay(LocalMessage message) {
// We check to see if we've previously processed the source message since this
// could be called when switching from HTML to text replies. If that happens, we
// only want to update the UI with quoted text (which picks the appropriate
// part).
if (mSourceProcessed) {
try {
populateUIWithQuotedMessage(true);
} catch (MessagingException e) {
// Hm, if we couldn't populate the UI after source reprocessing, let's just delete it?
showOrHideQuotedText(QuotedTextMode.HIDE);
Log.e(K9.LOG_TAG, "Could not re-process source message; deleting quoted text to be safe.", e);
}
updateMessageFormat();
} else {
processSourceMessage(message);
mSourceProcessed = true;
}
}
/**
* When we are launched with an intent that includes a mailto: URI, we can actually
* gather quite a few of our message fields from it.
*
* @param mailTo
* The MailTo object we use to initialize message field
*/
private void initializeFromMailto(MailTo mailTo) {
recipientPresenter.initFromMailto(mailTo);
String subject = mailTo.getSubject();
if (subject != null && !subject.isEmpty()) {
mSubjectView.setText(subject);
}
String body = mailTo.getBody();
if (body != null && !subject.isEmpty()) {
mMessageContentView.setCharacters(body);
}
}
private static class SaveMessageTask extends AsyncTask<Void, Void, Void> {
Context context;
Account account;
Contacts contacts;
Handler handler;
Message message;
long draftId;
boolean saveRemotely;
SaveMessageTask(Context context, Account account, Contacts contacts,
Handler handler, Message message, long draftId, boolean saveRemotely) {
this.context = context;
this.account = account;
this.contacts = contacts;
this.handler = handler;
this.message = message;
this.draftId = draftId;
this.saveRemotely = saveRemotely;
}
@Override
protected Void doInBackground(Void... params) {
final MessagingController messagingController = MessagingController.getInstance(context);
Message draftMessage = messagingController.saveDraft(account, message, draftId, saveRemotely);
draftId = messagingController.getId(draftMessage);
android.os.Message msg = android.os.Message.obtain(handler, MSG_SAVED_DRAFT, draftId);
handler.sendMessage(msg);
return null;
}
}
private static final int REPLY_WRAP_LINE_WIDTH = 72;
private static final int QUOTE_BUFFER_LENGTH = 512; // amount of extra buffer to allocate to accommodate quoting headers or prefixes
/**
* Add quoting markup to a text message.
* @param originalMessage Metadata for message being quoted.
* @param messageBody Text of the message to be quoted.
* @param quoteStyle Style of quoting.
* @return Quoted text.
* @throws MessagingException
*/
private String quoteOriginalTextMessage(final Message originalMessage, final String messageBody, final QuoteStyle quoteStyle) throws MessagingException {
String body = messageBody == null ? "" : messageBody;
String sentDate = getSentDateText(originalMessage);
if (quoteStyle == QuoteStyle.PREFIX) {
StringBuilder quotedText = new StringBuilder(body.length() + QUOTE_BUFFER_LENGTH);
if (sentDate.length() != 0) {
quotedText.append(String.format(
getString(R.string.message_compose_reply_header_fmt_with_date) + "\r\n",
sentDate,
Address.toString(originalMessage.getFrom())));
} else {
quotedText.append(String.format(
getString(R.string.message_compose_reply_header_fmt) + "\r\n",
Address.toString(originalMessage.getFrom()))
);
}
final String prefix = mAccount.getQuotePrefix();
final String wrappedText = Utility.wrap(body, REPLY_WRAP_LINE_WIDTH - prefix.length());
// "$" and "\" in the quote prefix have to be escaped for
// the replaceAll() invocation.
final String escapedPrefix = prefix.replaceAll("(\\\\|\\$)", "\\\\$1");
quotedText.append(wrappedText.replaceAll("(?m)^", escapedPrefix));
return quotedText.toString().replaceAll("\\\r", "");
} else if (quoteStyle == QuoteStyle.HEADER) {
StringBuilder quotedText = new StringBuilder(body.length() + QUOTE_BUFFER_LENGTH);
quotedText.append("\r\n");
quotedText.append(getString(R.string.message_compose_quote_header_separator)).append("\r\n");
if (originalMessage.getFrom() != null && Address.toString(originalMessage.getFrom()).length() != 0) {
quotedText.append(getString(R.string.message_compose_quote_header_from)).append(" ").append(Address.toString(originalMessage.getFrom())).append("\r\n");
}
if (sentDate.length() != 0) {
quotedText.append(getString(R.string.message_compose_quote_header_send_date)).append(" ").append(sentDate).append("\r\n");
}
if (originalMessage.getRecipients(RecipientType.TO) != null && originalMessage.getRecipients(RecipientType.TO).length != 0) {
quotedText.append(getString(R.string.message_compose_quote_header_to)).append(" ").append(Address.toString(originalMessage.getRecipients(RecipientType.TO))).append("\r\n");
}
if (originalMessage.getRecipients(RecipientType.CC) != null && originalMessage.getRecipients(RecipientType.CC).length != 0) {
quotedText.append(getString(R.string.message_compose_quote_header_cc)).append(" ").append(Address.toString(originalMessage.getRecipients(RecipientType.CC))).append("\r\n");
}
if (originalMessage.getSubject() != null) {
quotedText.append(getString(R.string.message_compose_quote_header_subject)).append(" ").append(originalMessage.getSubject()).append("\r\n");
}
quotedText.append("\r\n");
quotedText.append(body);
return quotedText.toString();
} else {
// Shouldn't ever happen.
return body;
}
}
/**
* Add quoting markup to a HTML message.
* @param originalMessage Metadata for message being quoted.
* @param messageBody Text of the message to be quoted.
* @param quoteStyle Style of quoting.
* @return Modified insertable message.
* @throws MessagingException
*/
private InsertableHtmlContent quoteOriginalHtmlMessage(final Message originalMessage, final String messageBody, final QuoteStyle quoteStyle) throws MessagingException {
InsertableHtmlContent insertable = findInsertionPoints(messageBody);
String sentDate = getSentDateText(originalMessage);
if (quoteStyle == QuoteStyle.PREFIX) {
StringBuilder header = new StringBuilder(QUOTE_BUFFER_LENGTH);
header.append("<div class=\"gmail_quote\">");
if (sentDate.length() != 0) {
header.append(HtmlConverter.textToHtmlFragment(String.format(
getString(R.string.message_compose_reply_header_fmt_with_date),
sentDate,
Address.toString(originalMessage.getFrom()))
));
} else {
header.append(HtmlConverter.textToHtmlFragment(String.format(
getString(R.string.message_compose_reply_header_fmt),
Address.toString(originalMessage.getFrom()))
));
}
header.append("<blockquote class=\"gmail_quote\" " +
"style=\"margin: 0pt 0pt 0pt 0.8ex; border-left: 1px solid rgb(204, 204, 204); padding-left: 1ex;\">\r\n");
String footer = "</blockquote></div>";
insertable.insertIntoQuotedHeader(header.toString());
insertable.insertIntoQuotedFooter(footer);
} else if (quoteStyle == QuoteStyle.HEADER) {
StringBuilder header = new StringBuilder();
header.append("<div style='font-size:10.0pt;font-family:\"Tahoma\",\"sans-serif\";padding:3.0pt 0in 0in 0in'>\r\n");
header.append("<hr style='border:none;border-top:solid #E1E1E1 1.0pt'>\r\n"); // This gets converted into a horizontal line during html to text conversion.
if (originalMessage.getFrom() != null && Address.toString(originalMessage.getFrom()).length() != 0) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_from)).append("</b> ")
.append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getFrom())))
.append("<br>\r\n");
}
if (sentDate.length() != 0) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_send_date)).append("</b> ")
.append(sentDate)
.append("<br>\r\n");
}
if (originalMessage.getRecipients(RecipientType.TO) != null && originalMessage.getRecipients(RecipientType.TO).length != 0) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_to)).append("</b> ")
.append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getRecipients(RecipientType.TO))))
.append("<br>\r\n");
}
if (originalMessage.getRecipients(RecipientType.CC) != null && originalMessage.getRecipients(RecipientType.CC).length != 0) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_cc)).append("</b> ")
.append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getRecipients(RecipientType.CC))))
.append("<br>\r\n");
}
if (originalMessage.getSubject() != null) {
header.append("<b>").append(getString(R.string.message_compose_quote_header_subject)).append("</b> ")
.append(HtmlConverter.textToHtmlFragment(originalMessage.getSubject()))
.append("<br>\r\n");
}
header.append("</div>\r\n");
header.append("<br>\r\n");
insertable.insertIntoQuotedHeader(header.toString());
}
return insertable;
}
/**
* Used to store an {@link Identity} instance together with the {@link Account} it belongs to.
*
* @see IdentityAdapter
*/
static class IdentityContainer {
public final Identity identity;
public final Account account;
IdentityContainer(Identity identity, Account account) {
this.identity = identity;
this.account = account;
}
}
/**
* Adapter for the <em>Choose identity</em> list view.
*
* <p>
* Account names are displayed as section headers, identities as selectable list items.
* </p>
*/
static class IdentityAdapter extends BaseAdapter {
private LayoutInflater mLayoutInflater;
private List<Object> mItems;
public IdentityAdapter(Context context) {
mLayoutInflater = (LayoutInflater) context.getSystemService(
Context.LAYOUT_INFLATER_SERVICE);
List<Object> items = new ArrayList<>();
Preferences prefs = Preferences.getPreferences(context.getApplicationContext());
Collection<Account> accounts = prefs.getAvailableAccounts();
for (Account account : accounts) {
items.add(account);
List<Identity> identities = account.getIdentities();
for (Identity identity : identities) {
items.add(new IdentityContainer(identity, account));
}
}
mItems = items;
}
@Override
public int getCount() {
return mItems.size();
}
@Override
public int getViewTypeCount() {
return 2;
}
@Override
public int getItemViewType(int position) {
return (mItems.get(position) instanceof Account) ? 0 : 1;
}
@Override
public boolean isEnabled(int position) {
return (mItems.get(position) instanceof IdentityContainer);
}
@Override
public Object getItem(int position) {
return mItems.get(position);
}
@Override
public long getItemId(int position) {
return position;
}
@Override
public boolean hasStableIds() {
return false;
}
@Override
public View getView(int position, View convertView, ViewGroup parent) {
Object item = mItems.get(position);
View view = null;
if (item instanceof Account) {
if (convertView != null && convertView.getTag() instanceof AccountHolder) {
view = convertView;
} else {
view = mLayoutInflater.inflate(R.layout.choose_account_item, parent, false);
AccountHolder holder = new AccountHolder();
holder.name = (TextView) view.findViewById(R.id.name);
holder.chip = view.findViewById(R.id.chip);
view.setTag(holder);
}
Account account = (Account) item;
AccountHolder holder = (AccountHolder) view.getTag();
holder.name.setText(account.getDescription());
holder.chip.setBackgroundColor(account.getChipColor());
} else if (item instanceof IdentityContainer) {
if (convertView != null && convertView.getTag() instanceof IdentityHolder) {
view = convertView;
} else {
view = mLayoutInflater.inflate(R.layout.choose_identity_item, parent, false);
IdentityHolder holder = new IdentityHolder();
holder.name = (TextView) view.findViewById(R.id.name);
holder.description = (TextView) view.findViewById(R.id.description);
view.setTag(holder);
}
IdentityContainer identityContainer = (IdentityContainer) item;
Identity identity = identityContainer.identity;
IdentityHolder holder = (IdentityHolder) view.getTag();
holder.name.setText(identity.getDescription());
holder.description.setText(getIdentityDescription(identity));
}
return view;
}
static class AccountHolder {
public TextView name;
public View chip;
}
static class IdentityHolder {
public TextView name;
public TextView description;
}
}
private static String getIdentityDescription(Identity identity) {
return String.format("%s <%s>", identity.getName(), identity.getEmail());
}
private void setMessageFormat(SimpleMessageFormat format) {
// This method will later be used to enable/disable the rich text editing mode.
mMessageFormat = format;
}
private void updateMessageFormat() {
MessageFormat origMessageFormat = mAccount.getMessageFormat();
SimpleMessageFormat messageFormat;
if (origMessageFormat == MessageFormat.TEXT) {
// The user wants to send text/plain messages. We don't override that choice under
// any circumstances.
messageFormat = SimpleMessageFormat.TEXT;
} else if (mForcePlainText && includeQuotedText()) {
// Right now we send a text/plain-only message when the quoted text was edited, no
// matter what the user selected for the message format.
messageFormat = SimpleMessageFormat.TEXT;
} else if (recipientPresenter.isForceTextMessageFormat()) {
// Right now we only support PGP inline which doesn't play well with HTML. So force
// plain text in those cases.
messageFormat = SimpleMessageFormat.TEXT;
} else if (origMessageFormat == MessageFormat.AUTO) {
if (mAction == Action.COMPOSE || mQuotedTextFormat == SimpleMessageFormat.TEXT ||
!includeQuotedText()) {
// If the message format is set to "AUTO" we use text/plain whenever possible. That
// is, when composing new messages and replying to or forwarding text/plain
// messages.
messageFormat = SimpleMessageFormat.TEXT;
} else {
messageFormat = SimpleMessageFormat.HTML;
}
} else {
// In all other cases use HTML
messageFormat = SimpleMessageFormat.HTML;
}
setMessageFormat(messageFormat);
}
private boolean includeQuotedText() {
return (mQuotedTextMode == QuotedTextMode.SHOW);
}
/**
* Extract the date from a message and convert it into a locale-specific
* date string suitable for use in a header for a quoted message.
*
* @return A string with the formatted date/time
*/
private String getSentDateText(Message message) {
try {
final int dateStyle = DateFormat.LONG;
final int timeStyle = DateFormat.LONG;
Date date = message.getSentDate();
Locale locale = getResources().getConfiguration().locale;
return DateFormat.getDateTimeInstance(dateStyle, timeStyle, locale)
.format(date);
} catch (Exception e) {
return "";
}
}
private boolean isCryptoProviderEnabled() {
return mOpenPgpProvider != null;
}
@Override
public void onMessageBuildSuccess(MimeMessage message, boolean isDraft) {
if (isDraft) {
draftNeedsSaving = false;
currentMessageBuilder = null;
if (mAction == Action.EDIT_DRAFT && mMessageReference != null) {
message.setUid(mMessageReference.getUid());
}
boolean saveRemotely = recipientPresenter.isAllowSavingDraftRemotely();
new SaveMessageTask(getApplicationContext(), mAccount, mContacts, mHandler,
message, mDraftId, saveRemotely).execute();
if (mFinishAfterDraftSaved) {
finish();
} else {
setProgressBarIndeterminateVisibility(false);
}
} else {
currentMessageBuilder = null;
new SendMessageTask(getApplicationContext(), mAccount, mContacts, message,
mDraftId != INVALID_DRAFT_ID ? mDraftId : null).execute();
finish();
}
}
@Override
public void onMessageBuildCancel() {
currentMessageBuilder = null;
setProgressBarIndeterminateVisibility(false);
}
@Override
public void onMessageBuildException(MessagingException me) {
Log.e(K9.LOG_TAG, "Error sending message", me);
Toast.makeText(MessageCompose.this,
getString(R.string.send_aborted, me.getLocalizedMessage()),
Toast.LENGTH_LONG).show();
currentMessageBuilder = null;
setProgressBarIndeterminateVisibility(false);
}
@Override
public void onMessageBuildReturnPendingIntent(PendingIntent pendingIntent, int requestCode) {
requestCode |= REQUEST_MASK_MESSAGE_BUILDER;
try {
startIntentSenderForResult(pendingIntent.getIntentSender(), requestCode, null, 0, 0, 0);
} catch (SendIntentException e) {
Log.e(K9.LOG_TAG, "Error starting pending intent from builder!", e);
}
}
public void launchUserInteractionPendingIntent(PendingIntent pendingIntent, int requestCode) {
requestCode |= REQUEST_MASK_RECIPIENT_PRESENTER;
try {
startIntentSenderForResult(pendingIntent.getIntentSender(), requestCode, null, 0, 0, 0);
} catch (SendIntentException e) {
e.printStackTrace();
}
}
}
| 1 | 13,425 | The Activity is recreated on configuration changes and the value of the field is lost. So, e.g. pressing 'send' once will display the error message. If you rotate the device and press 'send' again, the message will show another time. Use `onSaveInstanceState()` and `onRetainInstanceState()` to save and restore the value of the field. | k9mail-k-9 | java |
@@ -643,7 +643,7 @@ func (s *Server) reloadOptions(curOpts, newOpts *Options) error {
if err != nil {
return err
}
- // Create a context that is used to pass special info that we may need
+ // Create a ctx that is used to pass special info that we may need
// while applying the new options.
ctx := reloadContext{oldClusterPerms: curOpts.Cluster.Permissions}
s.setOpts(newOpts) | 1 | // Copyright 2017-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"crypto/tls"
"errors"
"fmt"
"net/url"
"reflect"
"strings"
"sync/atomic"
"time"
)
// FlagSnapshot captures the server options as specified by CLI flags at
// startup. This should not be modified once the server has started.
var FlagSnapshot *Options
type reloadContext struct {
oldClusterPerms *RoutePermissions
}
// option is a hot-swappable configuration setting.
type option interface {
// Apply the server option.
Apply(server *Server)
// IsLoggingChange indicates if this option requires reloading the logger.
IsLoggingChange() bool
// IsAuthChange indicates if this option requires reloading authorization.
IsAuthChange() bool
// IsClusterPermsChange indicates if this option requires reloading
// cluster permissions.
IsClusterPermsChange() bool
}
// noopOption is a base struct that provides default no-op behaviors.
type noopOption struct{}
func (n noopOption) IsLoggingChange() bool {
return false
}
func (n noopOption) IsAuthChange() bool {
return false
}
func (n noopOption) IsClusterPermsChange() bool {
return false
}
// loggingOption is a base struct that provides default option behaviors for
// logging-related options.
type loggingOption struct {
noopOption
}
func (l loggingOption) IsLoggingChange() bool {
return true
}
// traceOption implements the option interface for the `trace` setting.
type traceOption struct {
loggingOption
newValue bool
}
// Apply is a no-op because logging will be reloaded after options are applied.
func (t *traceOption) Apply(server *Server) {
server.Noticef("Reloaded: trace = %v", t.newValue)
}
// debugOption implements the option interface for the `debug` setting.
type debugOption struct {
loggingOption
newValue bool
}
// Apply is a no-op because logging will be reloaded after options are applied.
func (d *debugOption) Apply(server *Server) {
server.Noticef("Reloaded: debug = %v", d.newValue)
}
// logtimeOption implements the option interface for the `logtime` setting.
type logtimeOption struct {
loggingOption
newValue bool
}
// Apply is a no-op because logging will be reloaded after options are applied.
func (l *logtimeOption) Apply(server *Server) {
server.Noticef("Reloaded: logtime = %v", l.newValue)
}
// logfileOption implements the option interface for the `log_file` setting.
type logfileOption struct {
loggingOption
newValue string
}
// Apply is a no-op because logging will be reloaded after options are applied.
func (l *logfileOption) Apply(server *Server) {
server.Noticef("Reloaded: log_file = %v", l.newValue)
}
// syslogOption implements the option interface for the `syslog` setting.
type syslogOption struct {
loggingOption
newValue bool
}
// Apply is a no-op because logging will be reloaded after options are applied.
func (s *syslogOption) Apply(server *Server) {
server.Noticef("Reloaded: syslog = %v", s.newValue)
}
// remoteSyslogOption implements the option interface for the `remote_syslog`
// setting.
type remoteSyslogOption struct {
loggingOption
newValue string
}
// Apply is a no-op because logging will be reloaded after options are applied.
func (r *remoteSyslogOption) Apply(server *Server) {
server.Noticef("Reloaded: remote_syslog = %v", r.newValue)
}
// tlsOption implements the option interface for the `tls` setting.
type tlsOption struct {
noopOption
newValue *tls.Config
}
// Apply the tls change.
func (t *tlsOption) Apply(server *Server) {
server.mu.Lock()
tlsRequired := t.newValue != nil
server.info.TLSRequired = tlsRequired
message := "disabled"
if tlsRequired {
server.info.TLSVerify = (t.newValue.ClientAuth == tls.RequireAndVerifyClientCert)
message = "enabled"
}
server.mu.Unlock()
server.Noticef("Reloaded: tls = %s", message)
}
// tlsTimeoutOption implements the option interface for the tls `timeout`
// setting.
type tlsTimeoutOption struct {
noopOption
newValue float64
}
// Apply is a no-op because the timeout will be reloaded after options are
// applied.
func (t *tlsTimeoutOption) Apply(server *Server) {
server.Noticef("Reloaded: tls timeout = %v", t.newValue)
}
// authOption is a base struct that provides default option behaviors.
type authOption struct {
noopOption
}
func (o authOption) IsAuthChange() bool {
return true
}
// usernameOption implements the option interface for the `username` setting.
type usernameOption struct {
authOption
}
// Apply is a no-op because authorization will be reloaded after options are
// applied.
func (u *usernameOption) Apply(server *Server) {
server.Noticef("Reloaded: authorization username")
}
// passwordOption implements the option interface for the `password` setting.
type passwordOption struct {
authOption
}
// Apply is a no-op because authorization will be reloaded after options are
// applied.
func (p *passwordOption) Apply(server *Server) {
server.Noticef("Reloaded: authorization password")
}
// authorizationOption implements the option interface for the `token`
// authorization setting.
type authorizationOption struct {
authOption
}
// Apply is a no-op because authorization will be reloaded after options are
// applied.
func (a *authorizationOption) Apply(server *Server) {
server.Noticef("Reloaded: authorization token")
}
// authTimeoutOption implements the option interface for the authorization
// `timeout` setting.
type authTimeoutOption struct {
noopOption // Not authOption because this is a no-op; will be reloaded with options.
newValue float64
}
// Apply is a no-op because the timeout will be reloaded after options are
// applied.
func (a *authTimeoutOption) Apply(server *Server) {
server.Noticef("Reloaded: authorization timeout = %v", a.newValue)
}
// usersOption implements the option interface for the authorization `users`
// setting.
type usersOption struct {
authOption
}
func (u *usersOption) Apply(server *Server) {
server.Noticef("Reloaded: authorization users")
}
// nkeysOption implements the option interface for the authorization `users`
// setting.
type nkeysOption struct {
authOption
}
func (u *nkeysOption) Apply(server *Server) {
server.Noticef("Reloaded: authorization nkey users")
}
// clusterOption implements the option interface for the `cluster` setting.
type clusterOption struct {
authOption
newValue ClusterOpts
permsChanged bool
}
// Apply the cluster change.
func (c *clusterOption) Apply(server *Server) {
// TODO: support enabling/disabling clustering.
server.mu.Lock()
tlsRequired := c.newValue.TLSConfig != nil
server.routeInfo.TLSRequired = tlsRequired
server.routeInfo.TLSVerify = tlsRequired
server.routeInfo.AuthRequired = c.newValue.Username != ""
if c.newValue.NoAdvertise {
server.routeInfo.ClientConnectURLs = nil
} else {
server.routeInfo.ClientConnectURLs = server.clientConnectURLs
}
server.setRouteInfoHostPortAndIP()
server.mu.Unlock()
server.Noticef("Reloaded: cluster")
if tlsRequired && c.newValue.TLSConfig.InsecureSkipVerify {
server.Warnf(clusterTLSInsecureWarning)
}
}
func (c *clusterOption) IsClusterPermsChange() bool {
return c.permsChanged
}
// routesOption implements the option interface for the cluster `routes`
// setting.
type routesOption struct {
noopOption
add []*url.URL
remove []*url.URL
}
// Apply the route changes by adding and removing the necessary routes.
func (r *routesOption) Apply(server *Server) {
server.mu.Lock()
routes := make([]*client, len(server.routes))
i := 0
for _, client := range server.routes {
routes[i] = client
i++
}
// If there was a change, notify monitoring code that it should
// update the route URLs if /varz endpoint is inspected.
if len(r.add)+len(r.remove) > 0 {
server.varzUpdateRouteURLs = true
}
server.mu.Unlock()
// Remove routes.
for _, remove := range r.remove {
for _, client := range routes {
var url *url.URL
client.mu.Lock()
if client.route != nil {
url = client.route.url
}
client.mu.Unlock()
if url != nil && urlsAreEqual(url, remove) {
// Do not attempt to reconnect when route is removed.
client.setNoReconnect()
client.closeConnection(RouteRemoved)
server.Noticef("Removed route %v", remove)
}
}
}
// Add routes.
server.solicitRoutes(r.add)
server.Noticef("Reloaded: cluster routes")
}
// maxConnOption implements the option interface for the `max_connections`
// setting.
type maxConnOption struct {
noopOption
newValue int
}
// Apply the max connections change by closing random connections til we are
// below the limit if necessary.
func (m *maxConnOption) Apply(server *Server) {
server.mu.Lock()
var (
clients = make([]*client, len(server.clients))
i = 0
)
// Map iteration is random, which allows us to close random connections.
for _, client := range server.clients {
clients[i] = client
i++
}
server.mu.Unlock()
if m.newValue > 0 && len(clients) > m.newValue {
// Close connections til we are within the limit.
var (
numClose = len(clients) - m.newValue
closed = 0
)
for _, client := range clients {
client.maxConnExceeded()
closed++
if closed >= numClose {
break
}
}
server.Noticef("Closed %d connections to fall within max_connections", closed)
}
server.Noticef("Reloaded: max_connections = %v", m.newValue)
}
// pidFileOption implements the option interface for the `pid_file` setting.
type pidFileOption struct {
noopOption
newValue string
}
// Apply the setting by logging the pid to the new file.
func (p *pidFileOption) Apply(server *Server) {
if p.newValue == "" {
return
}
if err := server.logPid(); err != nil {
server.Errorf("Failed to write pidfile: %v", err)
}
server.Noticef("Reloaded: pid_file = %v", p.newValue)
}
// portsFileDirOption implements the option interface for the `portFileDir` setting.
type portsFileDirOption struct {
noopOption
oldValue string
newValue string
}
func (p *portsFileDirOption) Apply(server *Server) {
server.deletePortsFile(p.oldValue)
server.logPorts()
server.Noticef("Reloaded: ports_file_dir = %v", p.newValue)
}
// maxControlLineOption implements the option interface for the
// `max_control_line` setting.
type maxControlLineOption struct {
noopOption
newValue int32
}
// Apply the setting by updating each client.
func (m *maxControlLineOption) Apply(server *Server) {
mcl := int32(m.newValue)
server.mu.Lock()
for _, client := range server.clients {
atomic.StoreInt32(&client.mcl, mcl)
}
server.mu.Unlock()
server.Noticef("Reloaded: max_control_line = %d", mcl)
}
// maxPayloadOption implements the option interface for the `max_payload`
// setting.
type maxPayloadOption struct {
noopOption
newValue int32
}
// Apply the setting by updating the server info and each client.
func (m *maxPayloadOption) Apply(server *Server) {
server.mu.Lock()
server.info.MaxPayload = m.newValue
for _, client := range server.clients {
atomic.StoreInt32(&client.mpay, int32(m.newValue))
}
server.mu.Unlock()
server.Noticef("Reloaded: max_payload = %d", m.newValue)
}
// pingIntervalOption implements the option interface for the `ping_interval`
// setting.
type pingIntervalOption struct {
noopOption
newValue time.Duration
}
// Apply is a no-op because the ping interval will be reloaded after options
// are applied.
func (p *pingIntervalOption) Apply(server *Server) {
server.Noticef("Reloaded: ping_interval = %s", p.newValue)
}
// maxPingsOutOption implements the option interface for the `ping_max`
// setting.
type maxPingsOutOption struct {
noopOption
newValue int
}
// Apply is a no-op because the ping interval will be reloaded after options
// are applied.
func (m *maxPingsOutOption) Apply(server *Server) {
server.Noticef("Reloaded: ping_max = %d", m.newValue)
}
// writeDeadlineOption implements the option interface for the `write_deadline`
// setting.
type writeDeadlineOption struct {
noopOption
newValue time.Duration
}
// Apply is a no-op because the write deadline will be reloaded after options
// are applied.
func (w *writeDeadlineOption) Apply(server *Server) {
server.Noticef("Reloaded: write_deadline = %s", w.newValue)
}
// clientAdvertiseOption implements the option interface for the `client_advertise` setting.
type clientAdvertiseOption struct {
noopOption
newValue string
}
// Apply the setting by updating the server info and regenerate the infoJSON byte array.
func (c *clientAdvertiseOption) Apply(server *Server) {
server.mu.Lock()
server.setInfoHostPortAndGenerateJSON()
server.mu.Unlock()
server.Noticef("Reload: client_advertise = %s", c.newValue)
}
// accountsOption implements the option interface.
// Ensure that authorization code is executed if any change in accounts
type accountsOption struct {
authOption
}
// Apply is a no-op. Changes will be applied in reloadAuthorization
func (a *accountsOption) Apply(s *Server) {
s.Noticef("Reloaded: accounts")
}
// connectErrorReports implements the option interface for the `connect_error_reports`
// setting.
type connectErrorReports struct {
noopOption
newValue int
}
// Apply is a no-op because the value will be reloaded after options are applied.
func (c *connectErrorReports) Apply(s *Server) {
s.Noticef("Reloaded: connect_error_reports = %v", c.newValue)
}
// connectErrorReports implements the option interface for the `connect_error_reports`
// setting.
type reconnectErrorReports struct {
noopOption
newValue int
}
// Apply is a no-op because the value will be reloaded after options are applied.
func (r *reconnectErrorReports) Apply(s *Server) {
s.Noticef("Reloaded: reconnect_error_reports = %v", r.newValue)
}
// maxTracedMsgLenOption implements the option interface for the `max_traced_msg_len` setting.
type maxTracedMsgLenOption struct {
noopOption
newValue int
}
// Apply the setting by updating the maximum traced message length.
func (m *maxTracedMsgLenOption) Apply(server *Server) {
server.mu.Lock()
defer server.mu.Unlock()
server.opts.MaxTracedMsgLen = m.newValue
server.Noticef("Reloaded: max_traced_msg_len = %d", m.newValue)
}
// Reload reads the current configuration file and applies any supported
// changes. This returns an error if the server was not started with a config
// file or an option which doesn't support hot-swapping was changed.
func (s *Server) Reload() error {
s.mu.Lock()
if s.configFile == "" {
s.mu.Unlock()
return errors.New("can only reload config when a file is provided using -c or --config")
}
newOpts, err := ProcessConfigFile(s.configFile)
if err != nil {
s.mu.Unlock()
// TODO: Dump previous good config to a .bak file?
return err
}
curOpts := s.getOpts()
// Wipe trusted keys if needed when we have an operator.
if len(curOpts.TrustedOperators) > 0 && len(curOpts.TrustedKeys) > 0 {
curOpts.TrustedKeys = nil
}
clientOrgPort := curOpts.Port
clusterOrgPort := curOpts.Cluster.Port
gatewayOrgPort := curOpts.Gateway.Port
leafnodesOrgPort := curOpts.LeafNode.Port
s.mu.Unlock()
// Apply flags over config file settings.
newOpts = MergeOptions(newOpts, FlagSnapshot)
// Need more processing for boolean flags...
if FlagSnapshot != nil {
applyBoolFlags(newOpts, FlagSnapshot)
}
setBaselineOptions(newOpts)
// setBaselineOptions sets Port to 0 if set to -1 (RANDOM port)
// If that's the case, set it to the saved value when the accept loop was
// created.
if newOpts.Port == 0 {
newOpts.Port = clientOrgPort
}
// We don't do that for cluster, so check against -1.
if newOpts.Cluster.Port == -1 {
newOpts.Cluster.Port = clusterOrgPort
}
if newOpts.Gateway.Port == -1 {
newOpts.Gateway.Port = gatewayOrgPort
}
if newOpts.LeafNode.Port == -1 {
newOpts.LeafNode.Port = leafnodesOrgPort
}
if err := s.reloadOptions(curOpts, newOpts); err != nil {
return err
}
s.mu.Lock()
s.configTime = time.Now()
s.updateVarzConfigReloadableFields(s.varz)
s.mu.Unlock()
return nil
}
func applyBoolFlags(newOpts, flagOpts *Options) {
// Reset fields that may have been set to `true` in
// MergeOptions() when some of the flags default to `true`
// but have not been explicitly set and therefore value
// from config file should take precedence.
for name, val := range newOpts.inConfig {
f := reflect.ValueOf(newOpts).Elem()
names := strings.Split(name, ".")
for _, name := range names {
f = f.FieldByName(name)
}
f.SetBool(val)
}
// Now apply value (true or false) from flags that have
// been explicitly set in command line
for name, val := range flagOpts.inCmdLine {
f := reflect.ValueOf(newOpts).Elem()
names := strings.Split(name, ".")
for _, name := range names {
f = f.FieldByName(name)
}
f.SetBool(val)
}
}
// reloadOptions reloads the server config with the provided options. If an
// option that doesn't support hot-swapping is changed, this returns an error.
func (s *Server) reloadOptions(curOpts, newOpts *Options) error {
// Apply to the new options some of the options that may have been set
// that can't be configured in the config file (this can happen in
// applications starting NATS Server programmatically).
newOpts.CustomClientAuthentication = curOpts.CustomClientAuthentication
newOpts.CustomRouterAuthentication = curOpts.CustomRouterAuthentication
changed, err := s.diffOptions(newOpts)
if err != nil {
return err
}
// Create a context that is used to pass special info that we may need
// while applying the new options.
ctx := reloadContext{oldClusterPerms: curOpts.Cluster.Permissions}
s.setOpts(newOpts)
s.applyOptions(&ctx, changed)
return nil
}
// diffOptions returns a slice containing options which have been changed. If
// an option that doesn't support hot-swapping is changed, this returns an
// error.
func (s *Server) diffOptions(newOpts *Options) ([]option, error) {
var (
oldConfig = reflect.ValueOf(s.getOpts()).Elem()
newConfig = reflect.ValueOf(newOpts).Elem()
diffOpts = []option{}
)
for i := 0; i < oldConfig.NumField(); i++ {
field := oldConfig.Type().Field(i)
// field.PkgPath is empty for exported fields, and is not for unexported ones.
// We skip the unexported fields.
if field.PkgPath != "" {
continue
}
var (
oldValue = oldConfig.Field(i).Interface()
newValue = newConfig.Field(i).Interface()
changed = !reflect.DeepEqual(oldValue, newValue)
)
if !changed {
continue
}
switch strings.ToLower(field.Name) {
case "trace":
diffOpts = append(diffOpts, &traceOption{newValue: newValue.(bool)})
case "debug":
diffOpts = append(diffOpts, &debugOption{newValue: newValue.(bool)})
case "logtime":
diffOpts = append(diffOpts, &logtimeOption{newValue: newValue.(bool)})
case "logfile":
diffOpts = append(diffOpts, &logfileOption{newValue: newValue.(string)})
case "syslog":
diffOpts = append(diffOpts, &syslogOption{newValue: newValue.(bool)})
case "remotesyslog":
diffOpts = append(diffOpts, &remoteSyslogOption{newValue: newValue.(string)})
case "tlsconfig":
diffOpts = append(diffOpts, &tlsOption{newValue: newValue.(*tls.Config)})
case "tlstimeout":
diffOpts = append(diffOpts, &tlsTimeoutOption{newValue: newValue.(float64)})
case "username":
diffOpts = append(diffOpts, &usernameOption{})
case "password":
diffOpts = append(diffOpts, &passwordOption{})
case "authorization":
diffOpts = append(diffOpts, &authorizationOption{})
case "authtimeout":
diffOpts = append(diffOpts, &authTimeoutOption{newValue: newValue.(float64)})
case "users":
diffOpts = append(diffOpts, &usersOption{})
case "nkeys":
diffOpts = append(diffOpts, &nkeysOption{})
case "cluster":
newClusterOpts := newValue.(ClusterOpts)
oldClusterOpts := oldValue.(ClusterOpts)
if err := validateClusterOpts(oldClusterOpts, newClusterOpts); err != nil {
return nil, err
}
permsChanged := !reflect.DeepEqual(newClusterOpts.Permissions, oldClusterOpts.Permissions)
diffOpts = append(diffOpts, &clusterOption{newValue: newClusterOpts, permsChanged: permsChanged})
case "routes":
add, remove := diffRoutes(oldValue.([]*url.URL), newValue.([]*url.URL))
diffOpts = append(diffOpts, &routesOption{add: add, remove: remove})
case "maxconn":
diffOpts = append(diffOpts, &maxConnOption{newValue: newValue.(int)})
case "pidfile":
diffOpts = append(diffOpts, &pidFileOption{newValue: newValue.(string)})
case "portsfiledir":
diffOpts = append(diffOpts, &portsFileDirOption{newValue: newValue.(string), oldValue: oldValue.(string)})
case "maxcontrolline":
diffOpts = append(diffOpts, &maxControlLineOption{newValue: newValue.(int32)})
case "maxpayload":
diffOpts = append(diffOpts, &maxPayloadOption{newValue: newValue.(int32)})
case "pinginterval":
diffOpts = append(diffOpts, &pingIntervalOption{newValue: newValue.(time.Duration)})
case "maxpingsout":
diffOpts = append(diffOpts, &maxPingsOutOption{newValue: newValue.(int)})
case "writedeadline":
diffOpts = append(diffOpts, &writeDeadlineOption{newValue: newValue.(time.Duration)})
case "clientadvertise":
cliAdv := newValue.(string)
if cliAdv != "" {
// Validate ClientAdvertise syntax
if _, _, err := parseHostPort(cliAdv, 0); err != nil {
return nil, fmt.Errorf("invalid ClientAdvertise value of %s, err=%v", cliAdv, err)
}
}
diffOpts = append(diffOpts, &clientAdvertiseOption{newValue: cliAdv})
case "accounts":
diffOpts = append(diffOpts, &accountsOption{})
case "resolver", "accountresolver", "accountsresolver":
// We can't move from no resolver to one. So check for that.
if (oldValue == nil && newValue != nil) ||
(oldValue != nil && newValue == nil) {
return nil, fmt.Errorf("config reload does not support moving to or from an account resolver")
}
diffOpts = append(diffOpts, &accountsOption{})
case "accountresolvertlsconfig":
diffOpts = append(diffOpts, &accountsOption{})
case "gateway":
// Not supported for now, but report warning if configuration of gateway
// is actually changed so that user knows that it won't take effect.
// Any deep-equal is likely to fail for when there is a TLSConfig. so
// remove for the test.
tmpOld := oldValue.(GatewayOpts)
tmpNew := newValue.(GatewayOpts)
tmpOld.TLSConfig = nil
tmpNew.TLSConfig = nil
// If there is really a change prevents reload.
if !reflect.DeepEqual(tmpOld, tmpNew) {
// See TODO(ik) note below about printing old/new values.
return nil, fmt.Errorf("config reload not supported for %s: old=%v, new=%v",
field.Name, oldValue, newValue)
}
case "leafnode":
// Similar to gateways
tmpOld := oldValue.(LeafNodeOpts)
tmpNew := newValue.(LeafNodeOpts)
tmpOld.TLSConfig = nil
tmpNew.TLSConfig = nil
// If there is really a change prevents reload.
if !reflect.DeepEqual(tmpOld, tmpNew) {
// See TODO(ik) note below about printing old/new values.
return nil, fmt.Errorf("config reload not supported for %s: old=%v, new=%v",
field.Name, oldValue, newValue)
}
case "connecterrorreports":
diffOpts = append(diffOpts, &connectErrorReports{newValue: newValue.(int)})
case "reconnecterrorreports":
diffOpts = append(diffOpts, &reconnectErrorReports{newValue: newValue.(int)})
case "nolog", "nosigs":
// Ignore NoLog and NoSigs options since they are not parsed and only used in
// testing.
continue
case "disableshortfirstping":
newOpts.DisableShortFirstPing = oldValue.(bool)
continue
case "maxtracedmsglen":
diffOpts = append(diffOpts, &maxTracedMsgLenOption{newValue: newValue.(int)})
case "port":
// check to see if newValue == 0 and continue if so.
if newValue == 0 {
// ignore RANDOM_PORT
continue
}
fallthrough
default:
// TODO(ik): Implement String() on those options to have a nice print.
// %v is difficult to figure what's what, %+v print private fields and
// would print passwords. Tried json.Marshal but it is too verbose for
// the URL array.
// Bail out if attempting to reload any unsupported options.
return nil, fmt.Errorf("config reload not supported for %s: old=%v, new=%v",
field.Name, oldValue, newValue)
}
}
return diffOpts, nil
}
func (s *Server) applyOptions(ctx *reloadContext, opts []option) {
var (
reloadLogging = false
reloadAuth = false
reloadClusterPerms = false
)
for _, opt := range opts {
opt.Apply(s)
if opt.IsLoggingChange() {
reloadLogging = true
}
if opt.IsAuthChange() {
reloadAuth = true
}
if opt.IsClusterPermsChange() {
reloadClusterPerms = true
}
}
if reloadLogging {
s.ConfigureLogger()
}
if reloadAuth {
s.reloadAuthorization()
}
if reloadClusterPerms {
s.reloadClusterPermissions(ctx.oldClusterPerms)
}
s.Noticef("Reloaded server configuration")
}
// reloadAuthorization reconfigures the server authorization settings,
// disconnects any clients who are no longer authorized, and removes any
// unauthorized subscriptions.
func (s *Server) reloadAuthorization() {
// This map will contain the names of accounts that have their streams
// import configuration changed.
awcsti := make(map[string]struct{})
s.mu.Lock()
// This can not be changed for now so ok to check server's trustedKeys
if s.trustedKeys == nil {
// We need to drain the old accounts here since we have something
// new configured. We do not want s.accounts to change since that would
// mean adding a lock to lookupAccount which is what we are trying to
// optimize for with the change from a map to a sync.Map.
oldAccounts := make(map[string]*Account)
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
acc.mu.RLock()
oldAccounts[acc.Name] = acc
acc.mu.RUnlock()
s.accounts.Delete(k)
return true
})
s.gacc = nil
s.configureAccounts()
s.configureAuthorization()
s.accounts.Range(func(k, v interface{}) bool {
newAcc := v.(*Account)
if acc, ok := oldAccounts[newAcc.Name]; ok {
// If account exist in latest config, "transfer" the account's
// sublist and client map to the new account.
acc.mu.RLock()
if len(acc.clients) > 0 {
newAcc.clients = make(map[*client]*client, len(acc.clients))
for _, c := range acc.clients {
newAcc.clients[c] = c
}
}
newAcc.sl = acc.sl
newAcc.rm = acc.rm
newAcc.respMap = acc.respMap
acc.mu.RUnlock()
// Check if current and new config of this account are same
// in term of stream imports.
if !acc.checkStreamImportsEqual(newAcc) {
awcsti[newAcc.Name] = struct{}{}
}
}
return true
})
} else if s.opts.AccountResolver != nil {
s.configureResolver()
if _, ok := s.accResolver.(*MemAccResolver); ok {
// Check preloads so we can issue warnings etc if needed.
s.checkResolvePreloads()
// With a memory resolver we want to do something similar to configured accounts.
// We will walk the accounts and delete them if they are no longer present via fetch.
// If they are present we will force a claim update to process changes.
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
// Skip global account.
if acc == s.gacc {
return true
}
acc.mu.RLock()
accName := acc.Name
acc.mu.RUnlock()
// Release server lock for following actions
s.mu.Unlock()
accClaims, claimJWT, _ := s.fetchAccountClaims(accName)
if accClaims != nil {
err := s.updateAccountWithClaimJWT(acc, claimJWT)
if err != nil && err != ErrAccountResolverSameClaims {
s.Noticef("Reloaded: deleting account [bad claims]: %q", accName)
s.accounts.Delete(k)
}
} else {
s.Noticef("Reloaded: deleting account [removed]: %q", accName)
s.accounts.Delete(k)
}
// Regrab server lock.
s.mu.Lock()
return true
})
}
}
// Gather clients that changed accounts. We will close them and they
// will reconnect, doing the right thing.
var (
cclientsa [64]*client
cclients = cclientsa[:0]
clientsa [64]*client
clients = clientsa[:0]
routesa [64]*client
routes = routesa[:0]
)
for _, client := range s.clients {
if s.clientHasMovedToDifferentAccount(client) {
cclients = append(cclients, client)
} else {
clients = append(clients, client)
}
}
for _, route := range s.routes {
routes = append(routes, route)
}
s.mu.Unlock()
// Close clients that have moved accounts
for _, client := range cclients {
client.closeConnection(ClientClosed)
}
for _, client := range clients {
// Disconnect any unauthorized clients.
if !s.isClientAuthorized(client) {
client.authViolation()
continue
}
// Remove any unauthorized subscriptions and check for account imports.
client.processSubsOnConfigReload(awcsti)
}
for _, route := range routes {
// Disconnect any unauthorized routes.
// Do this only for routes that were accepted, not initiated
// because in the later case, we don't have the user name/password
// of the remote server.
if !route.isSolicitedRoute() && !s.isRouterAuthorized(route) {
route.setNoReconnect()
route.authViolation()
}
}
}
// Returns true if given client current account has changed (or user
// no longer exist) in the new config, false if the user did not
// change account.
// Server lock is held on entry.
func (s *Server) clientHasMovedToDifferentAccount(c *client) bool {
var (
nu *NkeyUser
u *User
)
if c.opts.Nkey != "" {
if s.nkeys != nil {
nu = s.nkeys[c.opts.Nkey]
}
} else if c.opts.Username != "" {
if s.users != nil {
u = s.users[c.opts.Username]
}
} else {
return false
}
// Get the current account name
c.mu.Lock()
var curAccName string
if c.acc != nil {
curAccName = c.acc.Name
}
c.mu.Unlock()
if nu != nil && nu.Account != nil {
return curAccName != nu.Account.Name
} else if u != nil && u.Account != nil {
return curAccName != u.Account.Name
}
// user/nkey no longer exists.
return true
}
// reloadClusterPermissions reconfigures the cluster's permssions
// and set the permissions to all existing routes, sending an
// update INFO protocol so that remote can resend their local
// subs if needed, and sending local subs matching cluster's
// import subjects.
func (s *Server) reloadClusterPermissions(oldPerms *RoutePermissions) {
s.mu.Lock()
var (
infoJSON []byte
newPerms = s.opts.Cluster.Permissions
routes = make(map[uint64]*client, len(s.routes))
withNewProto int
)
// Get all connected routes
for i, route := range s.routes {
// Count the number of routes that can understand receiving INFO updates.
route.mu.Lock()
if route.opts.Protocol >= RouteProtoInfo {
withNewProto++
}
route.mu.Unlock()
routes[i] = route
}
// If new permissions is nil, then clear routeInfo import/export
if newPerms == nil {
s.routeInfo.Import = nil
s.routeInfo.Export = nil
} else {
s.routeInfo.Import = newPerms.Import
s.routeInfo.Export = newPerms.Export
}
// Regenerate route INFO
s.generateRouteInfoJSON()
infoJSON = s.routeInfoJSON
s.mu.Unlock()
// If there were no route, we are done
if len(routes) == 0 {
return
}
// If only older servers, simply close all routes and they will do the right
// thing on reconnect.
if withNewProto == 0 {
for _, route := range routes {
route.closeConnection(RouteRemoved)
}
return
}
// Fake clients to test cluster permissions
oldPermsTester := &client{}
oldPermsTester.setRoutePermissions(oldPerms)
newPermsTester := &client{}
newPermsTester.setRoutePermissions(newPerms)
var (
_localSubs [4096]*subscription
localSubs = _localSubs[:0]
subsNeedSUB []*subscription
subsNeedUNSUB []*subscription
deleteRoutedSubs []*subscription
)
// FIXME(dlc) - Change for accounts.
s.gacc.sl.localSubs(&localSubs)
// Go through all local subscriptions
for _, sub := range localSubs {
// Get all subs that can now be imported
subj := string(sub.subject)
couldImportThen := oldPermsTester.canImport(subj)
canImportNow := newPermsTester.canImport(subj)
if canImportNow {
// If we could not before, then will need to send a SUB protocol.
if !couldImportThen {
subsNeedSUB = append(subsNeedSUB, sub)
}
} else if couldImportThen {
// We were previously able to import this sub, but now
// we can't so we need to send an UNSUB protocol
subsNeedUNSUB = append(subsNeedUNSUB, sub)
}
}
for _, route := range routes {
route.mu.Lock()
// If route is to older server, simply close connection.
if route.opts.Protocol < RouteProtoInfo {
route.mu.Unlock()
route.closeConnection(RouteRemoved)
continue
}
route.setRoutePermissions(newPerms)
for _, sub := range route.subs {
// If we can't export, we need to drop the subscriptions that
// we have on behalf of this route.
subj := string(sub.subject)
if !route.canExport(subj) {
delete(route.subs, string(sub.sid))
deleteRoutedSubs = append(deleteRoutedSubs, sub)
}
}
// Send an update INFO, which will allow remote server to show
// our current route config in monitoring and resend subscriptions
// that we now possibly allow with a change of Export permissions.
route.enqueueProto(infoJSON)
// Now send SUB and UNSUB protocols as needed.
route.sendRouteSubProtos(subsNeedSUB, false, nil)
route.sendRouteUnSubProtos(subsNeedUNSUB, false, nil)
route.mu.Unlock()
}
// Remove as a batch all the subs that we have removed from each route.
// FIXME(dlc) - Change for accounts.
s.gacc.sl.RemoveBatch(deleteRoutedSubs)
}
// validateClusterOpts ensures the new ClusterOpts does not change host or
// port, which do not support reload.
func validateClusterOpts(old, new ClusterOpts) error {
if old.Host != new.Host {
return fmt.Errorf("config reload not supported for cluster host: old=%s, new=%s",
old.Host, new.Host)
}
if old.Port != new.Port {
return fmt.Errorf("config reload not supported for cluster port: old=%d, new=%d",
old.Port, new.Port)
}
// Validate Cluster.Advertise syntax
if new.Advertise != "" {
if _, _, err := parseHostPort(new.Advertise, 0); err != nil {
return fmt.Errorf("invalid Cluster.Advertise value of %s, err=%v", new.Advertise, err)
}
}
return nil
}
// diffRoutes diffs the old routes and the new routes and returns the ones that
// should be added and removed from the server.
func diffRoutes(old, new []*url.URL) (add, remove []*url.URL) {
// Find routes to remove.
removeLoop:
for _, oldRoute := range old {
for _, newRoute := range new {
if urlsAreEqual(oldRoute, newRoute) {
continue removeLoop
}
}
remove = append(remove, oldRoute)
}
// Find routes to add.
addLoop:
for _, newRoute := range new {
for _, oldRoute := range old {
if urlsAreEqual(oldRoute, newRoute) {
continue addLoop
}
}
add = append(add, newRoute)
}
return add, remove
}
| 1 | 9,980 | Looks like a "find and replace" unintended change here | nats-io-nats-server | go |
@@ -590,12 +590,13 @@ class SPRegion(PyRegion):
inputVector = numpy.array(rfInput[0]).astype('uint32')
outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('uint32')
- # Switch to using a random SP if learning mode is off and the SP hasn't
- # learned anything yet.
+ # Don't strip unlearned columns if learning is off and the SP hasn't
+ # learned anything yet. This acts as a random SP.
if (not self.learningMode) and (self._sfdr.getIterationLearnNum() == 0):
- self._sfdr.compute(inputVector, self.learningMode, outputVector, False)
+ self._sfdr.compute(inputVector, self.learningMode, outputVector)
else:
self._sfdr.compute(inputVector, self.learningMode, outputVector)
+ self._sfdr.stripUnlearnedColumns(outputVector)
self._spatialPoolerOutput[:] = outputVector[:]
| 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import numpy
from nupic.bindings.math import GetNTAReal
from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler
from nupic.research.spatial_pooler import SpatialPooler as PYSpatialPooler
import nupic.research.fdrutilities as fdru
from nupic.support import getArgumentDescriptions
from PyRegion import PyRegion
def getDefaultSPImp():
"""
Return the default spatial pooler implementation for this region.
"""
return 'cpp'
def getSPClass(spatialImp):
""" Return the class corresponding to the given spatialImp string
"""
if spatialImp == 'py':
return PYSpatialPooler
elif spatialImp == 'cpp':
return CPPSpatialPooler
else:
raise RuntimeError("Invalid spatialImp '%s'. Legal values are: 'py', "
"'cpp'" % (spatialImp))
def _buildArgs(f, self=None, kwargs={}):
"""
Get the default arguments from the function and assign as instance vars.
Return a list of 3-tuples with (name, description, defaultValue) for each
argument to the function.
Assigns all arguments to the function as instance variables of SPRegion.
If the argument was not provided, uses the default value.
Pops any values from kwargs that go to the function.
"""
# Get the name, description, and default value for each argument
argTuples = getArgumentDescriptions(f)
argTuples = argTuples[1:] # Remove 'self'
# Get the names of the parameters to our own constructor and remove them
# Check for _originial_init first, because if LockAttributesMixin is used,
# __init__'s signature will be just (self, *args, **kw), but
# _original_init is created with the original signature
#init = getattr(self, '_original_init', self.__init__)
init = SPRegion.__init__
ourArgNames = [t[0] for t in getArgumentDescriptions(init)]
# Also remove a few other names that aren't in our constructor but are
# computed automatically (e.g. numberOfCols for the TP)
# TODO: where does numberOfCols come into SPRegion?
ourArgNames += [
'numberOfCols',
]
for argTuple in argTuples[:]:
if argTuple[0] in ourArgNames:
argTuples.remove(argTuple)
# Build the dictionary of arguments
if self:
for argTuple in argTuples:
argName = argTuple[0]
if argName in kwargs:
# Argument was provided
argValue = kwargs.pop(argName)
else:
# Argument was not provided; use the default value if there is one, and
# raise an exception otherwise
if len(argTuple) == 2:
# No default value
raise TypeError("Must provide value for '%s'" % argName)
argValue = argTuple[2]
# Set as an instance variable if 'self' was passed in
setattr(self, argName, argValue)
return argTuples
def _getAdditionalSpecs(spatialImp, kwargs={}):
"""Build the additional specs in three groups (for the inspector)
Use the type of the default argument to set the Spec type, defaulting
to 'Byte' for None and complex types
Determines the spatial parameters based on the selected implementation.
It defaults to SpatialPooler.
"""
typeNames = {int: 'UInt32', float: 'Real32', str: 'Byte', bool: 'bool', tuple: 'tuple'}
def getArgType(arg):
t = typeNames.get(type(arg), 'Byte')
count = 0 if t == 'Byte' else 1
if t == 'tuple':
t = typeNames.get(type(arg[0]), 'Byte')
count = len(arg)
if t == 'bool':
t = 'UInt32'
return (t, count)
def getConstraints(arg):
t = typeNames.get(type(arg), 'Byte')
if t == 'Byte':
return 'multiple'
elif t == 'bool':
return 'bool'
else:
return ''
# Get arguments from spatial pooler constructors, figure out types of
# variables and populate spatialSpec.
SpatialClass = getSPClass(spatialImp)
sArgTuples = _buildArgs(SpatialClass.__init__)
spatialSpec = {}
for argTuple in sArgTuples:
d = dict(
description=argTuple[1],
accessMode='ReadWrite',
dataType=getArgType(argTuple[2])[0],
count=getArgType(argTuple[2])[1],
constraints=getConstraints(argTuple[2]))
spatialSpec[argTuple[0]] = d
# Add special parameters that weren't handled automatically
# Spatial parameters only!
spatialSpec.update(dict(
columnCount=dict(
description='Total number of columns (coincidences).',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
inputWidth=dict(
description='Size of inputs to the SP.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
spInputNonZeros=dict(
description='The indices of the non-zero inputs to the spatial pooler',
accessMode='Read',
dataType='UInt32',
count=0,
constraints=''),
spOutputNonZeros=dict(
description='The indices of the non-zero outputs from the spatial pooler',
accessMode='Read',
dataType='UInt32',
count=0,
constraints=''),
spOverlapDistribution=dict(
description="""The overlaps between the active output coincidences
and the input. The overlap amounts for each coincidence are sorted
from highest to lowest. """,
accessMode='Read',
dataType='Real32',
count=0,
constraints=''),
sparseCoincidenceMatrix=dict(
description='The coincidences, as a SparseMatrix',
accessMode='Read',
dataType='Byte',
count=0,
constraints=''),
denseOutput=dict(
description='Score for each coincidence.',
accessMode='Read',
dataType='Real32',
count=0,
constraints=''),
spLearningStatsStr=dict(
description="""String representation of dictionary containing a number
of statistics related to learning.""",
accessMode='Read',
dataType='Byte',
count=0,
constraints='handle'),
spatialImp=dict(
description="""Which spatial pooler implementation to use. Set to either
'py', or 'cpp'. The 'cpp' implementation is optimized for
speed in C++.""",
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints='enum: py, cpp'),
))
# The last group is for parameters that aren't specific to spatial pooler
otherSpec = dict(
learningMode=dict(
description='1 if the node is learning (default 1).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
inferenceMode=dict(
description='1 if the node is inferring (default 0).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
anomalyMode=dict(
description='1 if an anomaly score is being computed',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
topDownMode=dict(
description='1 if the node should do top down compute on the next call '
'to compute into topDownOut (default 0).',
accessMode='ReadWrite',
dataType='UInt32',
count=1,
constraints='bool'),
activeOutputCount=dict(
description='Number of active elements in bottomUpOut output.',
accessMode='Read',
dataType='UInt32',
count=1,
constraints=''),
logPathInput=dict(
description='Optional name of input log file. If set, every input vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutput=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
logPathOutputDense=dict(
description='Optional name of output log file. If set, every output vector'
' will be logged to this file as a dense vector.',
accessMode='ReadWrite',
dataType='Byte',
count=0,
constraints=''),
)
return spatialSpec, otherSpec
class SPRegion(PyRegion):
"""
SPRegion is designed to implement the spatial pooler compute for a given
HTM level.
Uses the SpatialPooler class to do most of the work. This node has just one
SpatialPooler instance for the enitire level and does *not* support the concept
of "baby nodes" within it.
Automatic parameter handling:
Parameter names, default values, and descriptions are retrieved automatically
from SpatialPooler. Thus, there are only a few hardcoded arguments in __init__,
and the rest are passed to the appropriate underlying class. The NodeSpec is
mostly built automatically from these parameters, too.
If you add a parameter to SpatialPooler, it will be exposed through SPRegion
automatically as if it were in SPRegion.__init__, with the right default
value. Add an entry in the __init__ docstring for it too, and that will be
brought into the NodeSpec. SPRegion will maintain the parameter as its own
instance variable and also pass it to SpatialPooler. If the parameter is
changed, SPRegion will propagate the change.
If you want to do something different with the parameter, add it as an
argument into SPRegion.__init__, which will override all the default handling.
"""
def __init__(self,
columnCount, # Number of columns in the SP, a required parameter
inputWidth, # Size of inputs to the SP, a required parameter
spatialImp=getDefaultSPImp(), #'py', 'cpp'
**kwargs):
if columnCount <= 0 or inputWidth <=0:
raise TypeError("Parameters columnCount and inputWidth must be > 0")
# Pull out the spatial arguments automatically
# These calls whittle down kwargs and create instance variables of SPRegion
self.SpatialClass = getSPClass(spatialImp)
sArgTuples = _buildArgs(self.SpatialClass.__init__, self, kwargs)
# Make a list of automatic spatial arg names for later use
self._spatialArgNames = [t[0] for t in sArgTuples]
# Learning and SP parameters.
# By default we start out in stage learn with inference disabled
self.learningMode = True
self.inferenceMode = False
self.anomalyMode = False
self.topDownMode = False
self.columnCount = columnCount
self.inputWidth = inputWidth
PyRegion.__init__(self, **kwargs)
# Initialize all non-persistent base members, as well as give
# derived class an opportunity to do the same.
self._loaded = False
self._initializeEphemeralMembers()
# Debugging support, used in _conditionalBreak
self.breakPdb = False
self.breakKomodo = False
# Defaults for all other parameters
self.logPathInput = ''
self.logPathOutput = ''
self.logPathOutputDense = ''
self._fpLogSPInput = None
self._fpLogSP = None
self._fpLogSPDense = None
#
# Variables set up in initInNetwork()
#
# Spatial instance
self._sfdr = None
# Spatial pooler's bottom-up output value: hang on to this output for
# top-down inference and for debugging
self._spatialPoolerOutput = None
# Spatial pooler's bottom-up input: hang on to this for supporting the
# spInputNonZeros parameter
self._spatialPoolerInput = None
#############################################################################
#
# Initialization code
#
#############################################################################
def _initializeEphemeralMembers(self):
"""
Initialize all ephemeral data members, and give the derived class the
opportunity to do the same by invoking the virtual member _initEphemerals(),
which is intended to be overridden.
NOTE: this is used by both __init__ and __setstate__ code paths.
"""
for attrName in self._getEphemeralMembersBase():
if attrName != "_loaded":
if hasattr(self, attrName):
if self._loaded:
# print self.__class__.__name__, "contains base class member '%s' " \
# "after loading." % attrName
# TODO: Re-enable warning or turn into error in a future release.
pass
else:
print self.__class__.__name__, "contains base class member '%s'" % \
attrName
if not self._loaded:
for attrName in self._getEphemeralMembersBase():
if attrName != "_loaded":
# if hasattr(self, attrName):
# import pdb; pdb.set_trace()
assert not hasattr(self, attrName)
else:
assert hasattr(self, attrName)
# Profiling information
self._profileObj = None
self._iterations = 0
# Let derived class initialize ephemerals
self._initEphemerals()
self._checkEphemeralMembers()
def initialize(self, dims, splitterMaps):
""""""
# Zero out the spatial output in case it is requested
self._spatialPoolerOutput = numpy.zeros(self.columnCount,
dtype=GetNTAReal())
# Zero out the rfInput in case it is requested
self._spatialPoolerInput = numpy.zeros((1,self.inputWidth), dtype=GetNTAReal())
# Allocate the spatial pooler
self._allocateSpatialFDR(None)
def _allocateSpatialFDR(self, rfInput):
"""Allocate the spatial pooler instance."""
if self._sfdr:
return
# Retrieve the necessary extra arguments that were handled automatically
autoArgs = dict((name, getattr(self, name))
for name in self._spatialArgNames)
# Instantiate the spatial pooler class.
if ( (self.SpatialClass == CPPSpatialPooler) or
(self.SpatialClass == PYSpatialPooler) ):
autoArgs['columnDimensions'] = [self.columnCount]
autoArgs['inputDimensions'] = [self.inputWidth]
autoArgs['potentialRadius'] = self.inputWidth
self._sfdr = self.SpatialClass(
**autoArgs
)
#############################################################################
#
# Core compute methods: learning, inference, and prediction
#
#############################################################################
def compute(self, inputs, outputs):
"""
Run one iteration of SPRegion's compute, profiling it if requested.
The guts of the compute are contained in the _compute() call so that
we can profile it if requested.
"""
# Uncomment this to find out who is generating divide by 0, or other numpy warnings
# numpy.seterr(divide='raise', invalid='raise', over='raise')
# Modify this line to turn on profiling for a given node. The results file
# ('hotshot.stats') will be sensed and printed out by the vision framework's
# RunInference.py script at the end of inference.
# Also uncomment the hotshot import at the top of this file.
if False and self.learningMode \
and self._iterations > 0 and self._iterations <= 10:
import hotshot
if self._iterations == 10:
print "\n Collecting and sorting internal node profiling stats generated by hotshot..."
stats = hotshot.stats.load("hotshot.stats")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats()
if self._profileObj is None:
print "\n Preparing to capture profile using hotshot..."
if os.path.exists('hotshot.stats'):
# There is an old hotshot stats profile left over, remove it.
os.remove('hotshot.stats')
self._profileObj = hotshot.Profile("hotshot.stats", 1, 1)
# filename, lineevents, linetimings
self._profileObj.runcall(self._compute, *[inputs, outputs])
else:
self._compute(inputs, outputs)
def _compute(self, inputs, outputs):
"""
Run one iteration of SPRegion's compute
"""
#if self.topDownMode and (not 'topDownIn' in inputs):
# raise RuntimeError("The input topDownIn must be linked in if "
# "topDownMode is True")
if self._sfdr is None:
raise RuntimeError("Spatial pooler has not been initialized")
if not self.topDownMode:
#
# BOTTOM-UP compute
#
self._iterations += 1
# Get our inputs into numpy arrays
buInputVector = inputs['bottomUpIn']
resetSignal = False
if 'resetIn' in inputs:
assert len(inputs['resetIn']) == 1
resetSignal = inputs['resetIn'][0] != 0
# Perform inference and/or learning
rfOutput = self._doBottomUpCompute(
rfInput = buInputVector.reshape((1,buInputVector.size)),
resetSignal = resetSignal
)
outputs['bottomUpOut'][:] = rfOutput.flat
else:
#
# TOP-DOWN inference
#
topDownIn = inputs.get('topDownIn',None)
spatialTopDownOut, temporalTopDownOut = self._doTopDownInfer(topDownIn)
outputs['spatialTopDownOut'][:] = spatialTopDownOut
if temporalTopDownOut is not None:
outputs['temporalTopDownOut'][:] = temporalTopDownOut
# OBSOLETE
outputs['anomalyScore'][:] = 0
# Write the bottom up out to our node outputs only if we are doing inference
#print "SPRegion input: ", buInputVector.nonzero()[0]
#print "SPRegion output: ", rfOutput.nonzero()[0]
def _doBottomUpCompute(self, rfInput, resetSignal):
"""
Do one iteration of inference and/or learning and return the result
Parameters:
--------------------------------------------
rfInput: Input vector. Shape is: (1, inputVectorLen).
resetSignal: True if reset is asserted
"""
# Conditional compute break
self._conditionalBreak()
# Save the rfInput for the spInputNonZeros parameter
self._spatialPoolerInput = rfInput.reshape(-1)
assert(rfInput.shape[0] == 1)
# Run inference using the spatial pooler. We learn on the coincidences only
# if we are in learning mode and trainingStep is set appropriately.
# Run SFDR bottom-up compute and cache output in self._spatialPoolerOutput
inputVector = numpy.array(rfInput[0]).astype('uint32')
outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('uint32')
# Switch to using a random SP if learning mode is off and the SP hasn't
# learned anything yet.
if (not self.learningMode) and (self._sfdr.getIterationLearnNum() == 0):
self._sfdr.compute(inputVector, self.learningMode, outputVector, False)
else:
self._sfdr.compute(inputVector, self.learningMode, outputVector)
self._spatialPoolerOutput[:] = outputVector[:]
# Direct logging of SP outputs if requested
if self._fpLogSP:
output = self._spatialPoolerOutput.reshape(-1)
outputNZ = output.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogSP, output.size, outStr
# Direct logging of SP inputs
if self._fpLogSPInput:
output = rfInput.reshape(-1)
outputNZ = output.nonzero()[0]
outStr = " ".join(["%d" % int(token) for token in outputNZ])
print >>self._fpLogSPInput, output.size, outStr
return self._spatialPoolerOutput
def _doTopDownInfer(self, topDownInput = None):
"""
Do one iteration of top-down inference.
Parameters:
--------------------------------------------
tdInput: Top-down input
retval: (spatialTopDownOut, temporalTopDownOut)
spatialTopDownOut is the top down output computed only from the SP,
using it's current bottom-up output.
temporalTopDownOut is the top down output computed from the topDown in
of the level above us.
"""
return None, None
#############################################################################
#
# Region API support methods: getSpec, getParameter, and setParameter
#
#############################################################################
@classmethod
def getBaseSpec(cls):
"""Return the base Spec for SPRegion.
Doesn't include the spatial, temporal and other parameters
"""
spec = dict(
description=SPRegion.__doc__,
singleNodeOnly=True,
inputs=dict(
bottomUpIn=dict(
description="""The input vector.""",
dataType='Real32',
count=0,
required=True,
regionLevel=False,
isDefaultInput=True,
requireSplitterMap=False),
resetIn=dict(
description="""A boolean flag that indicates whether
or not the input vector received in this compute cycle
represents the start of a new temporal sequence.""",
dataType='Real32',
count=1,
required=False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
topDownIn=dict(
description="""The top-down input signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
required = False,
regionLevel=True,
isDefaultInput=False,
requireSplitterMap=False),
),
outputs=dict(
bottomUpOut=dict(
description="""The output signal generated from the bottom-up inputs
from lower levels.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=True),
topDownOut=dict(
description="""The top-down output signal, generated from
feedback from upper levels""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
spatialTopDownOut = dict(
description="""The top-down output, generated only from the current
SP output. This can be used to evaluate how well the
SP is representing the inputs independent of the TP.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
temporalTopDownOut = dict(
description="""The top-down output, generated only from the current
TP output feedback down through the SP.""",
dataType='Real32',
count=0,
regionLevel=True,
isDefaultOutput=False),
anomalyScore = dict(
description="""The score for how 'anomalous' (i.e. rare) this spatial
input pattern is. Higher values are increasingly rare""",
dataType='Real32',
count=1,
regionLevel=True,
isDefaultOutput=False),
),
parameters=dict(
breakPdb=dict(
description='Set to 1 to stop in the pdb debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
breakKomodo=dict(
description='Set to 1 to stop in the Komodo debugger on the next compute',
dataType='UInt32',
count=1,
constraints='bool',
defaultValue=0,
accessMode='ReadWrite'),
),
)
return spec
@classmethod
def getSpec(cls):
"""Return the Spec for SPRegion.
The parameters collection is constructed based on the parameters specified
by the variosu components (spatialSpec, temporalSpec and otherSpec)
"""
spec = cls.getBaseSpec()
s, o = _getAdditionalSpecs(spatialImp=getDefaultSPImp())
spec['parameters'].update(s)
spec['parameters'].update(o)
return spec
def getParameter(self, parameterName, index=-1):
"""
Get the value of a NodeSpec parameter. Most parameters are handled
automatically by PyRegion's parameter get mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName == 'activeOutputCount':
return self.columnCount
elif parameterName == 'spatialPoolerInput':
return list(self._spatialPoolerInput.reshape(-1))
elif parameterName == 'spatialPoolerOutput':
return list(self._spatialPoolerOutput)
elif parameterName == 'spNumActiveOutputs':
return len(self._spatialPoolerOutput.nonzero()[0])
elif parameterName == 'spOutputNonZeros':
return [len(self._spatialPoolerOutput)] + \
list(self._spatialPoolerOutput.nonzero()[0])
elif parameterName == 'spInputNonZeros':
import pdb; pdb.set_trace()
return [len(self._spatialPoolerInput)] + \
list(self._spatialPoolerInput.nonzero()[0])
elif parameterName == 'spLearningStatsStr':
try:
return str(self._sfdr.getLearningStats())
except:
return str(dict())
else:
return PyRegion.getParameter(self, parameterName, index)
def setParameter(self, parameterName, index, parameterValue):
"""
Set the value of a Spec parameter. Most parameters are handled
automatically by PyRegion's parameter set mechanism. The ones that need
special treatment are explicitly handled here.
"""
if parameterName in self._spatialArgNames:
setattr(self._sfdr, parameterName, parameterValue)
elif parameterName == "logPathInput":
self.logPathInput = parameterValue
# Close any existing log file
if self._fpLogSPInput:
self._fpLogSPInput.close()
self._fpLogSPInput = None
# Open a new log file
if parameterValue:
self._fpLogSPInput = open(self.logPathInput, 'w')
elif parameterName == "logPathOutput":
self.logPathOutput = parameterValue
# Close any existing log file
if self._fpLogSP:
self._fpLogSP.close()
self._fpLogSP = None
# Open a new log file
if parameterValue:
self._fpLogSP = open(self.logPathOutput, 'w')
elif parameterName == "logPathOutputDense":
self.logPathOutputDense = parameterValue
# Close any existing log file
if self._fpLogSPDense:
self._fpLogSPDense.close()
self._fpLogSPDense = None
# Open a new log file
if parameterValue:
self._fpLogSPDense = open(self.logPathOutputDense, 'w')
elif hasattr(self, parameterName):
setattr(self, parameterName, parameterValue)
else:
raise Exception('Unknown parameter: ' + parameterName)
#############################################################################
#
# Methods to support serialization
#
#############################################################################
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with all "ephemeral" members stripped out. "Ephemeral" members
are defined as those that do not need to be (nor should be) stored
in any kind of persistent file (e.g., NuPIC network XML file.)
"""
state = self.__dict__.copy()
# We only want to serialize a single spatial/temporal FDR if they're cloned
for ephemeralMemberName in self._getEphemeralMembersAll():
state.pop(ephemeralMemberName, None)
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
"""
self.__dict__.update(state)
self._loaded = True
# Backwards compatibility
if not hasattr(self, "SpatialClass"):
self.SpatialClass = self._sfdr.__class__
# Initialize all non-persistent base members, as well as give
# derived class an opportunity to do the same.
self._initializeEphemeralMembers()
self._allocateSpatialFDR(None)
def _initEphemerals(self):
"""
Initialize all ephemerals used by derived classes.
"""
if hasattr(self, '_sfdr') and self._sfdr:
self._spatialPoolerOutput = numpy.zeros(self.columnCount,
dtype=GetNTAReal())
else:
self._spatialPoolerOutput = None # Will be filled in initInNetwork
# Direct logging support (faster than node watch)
self._fpLogSPInput = None
self._fpLogSP = None
self._fpLogSPDense = None
self.logPathInput = ""
self.logPathOutput = ""
self.logPathOutputDense = ""
def _getEphemeralMembers(self):
"""
Callback that returns a list of all "ephemeral" members (i.e., data members
that should not and/or cannot be pickled.)
"""
return ['_spatialPoolerOutput', '_fpLogSP', '_fpLogSPDense',
'logPathInput', 'logPathOutput', 'logPathOutputDense'
]
def _getEphemeralMembersBase(self):
"""
Returns list of all ephemeral members.
"""
return [
'_loaded',
'_profileObj',
'_iterations',
]
def _getEphemeralMembersAll(self):
"""
Returns a concatenated list of both the standard base class
ephemeral members, as well as any additional ephemeral members
(e.g., file handles, etc.).
"""
return self._getEphemeralMembersBase() + self._getEphemeralMembers()
def _checkEphemeralMembers(self):
for attrName in self._getEphemeralMembersBase():
if not hasattr(self, attrName):
print "Missing base class member:", attrName
for attrName in self._getEphemeralMembers():
if not hasattr(self, attrName):
print "Missing derived class member:", attrName
for attrName in self._getEphemeralMembersBase():
assert hasattr(self, attrName)
for attrName in self._getEphemeralMembers():
assert hasattr(self, attrName), "Node missing attr '%s'." % attrName
#############################################################################
#
# Misc. code
#
#############################################################################
def _conditionalBreak(self):
if self.breakKomodo:
import dbgp.client; dbgp.client.brk()
if self.breakPdb:
import pdb; pdb.set_trace()
#############################################################################
#
# NuPIC 2 Support
# These methods are required by NuPIC 2
#
#############################################################################
def getOutputElementCount(self, name):
if name == 'bottomUpOut':
return self.columnCount
elif name == 'spatialTopDownOut' or name == 'temporalTopDownOut' or \
name == 'topDownOut':
return self.inputWidth
else:
raise Exception("Invalid output name specified")
# TODO: as a temporary hack, getParameterArrayCount checks to see if there's a
# variable, private or not, with that name. If so, it attempts to return the
# length of that variable.
def getParameterArrayCount(self, name, index):
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
return len(p)
# TODO: as a temporary hack, getParameterArray checks to see if there's a
# variable, private or not, with that name. If so, it returns the value of the
# variable.
def getParameterArray(self, name, index, a):
p = self.getParameter(name)
if (not hasattr(p, '__len__')):
raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name)
if len(p) > 0:
a[:] = p[:]
| 1 | 18,930 | haven't we got rid off `randomSP` recently? (I think `not learn` implied that) | numenta-nupic | py |
@@ -97,6 +97,7 @@ func (in *{{.Type}}) GetChaos() *ChaosInstance {
Kind: Kind{{.Type}},
StartTime: in.CreationTimestamp.Time,
Action: "",
+ Status: string(in.GetStatus().Experiment.Phase),
UID: string(in.UID),
}
| 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"text/template"
)
const implTemplate = `
const Kind{{.Type}} = "{{.Type}}"
// IsDeleted returns whether this resource has been deleted
func (in *{{.Type}}) IsDeleted() bool {
return !in.DeletionTimestamp.IsZero()
}
// IsPaused returns whether this resource has been paused
func (in *{{.Type}}) IsPaused() bool {
if in.Annotations == nil || in.Annotations[PauseAnnotationKey] != "true" {
return false
}
return true
}
// GetDuration would return the duration for chaos
func (in *{{.Type}}) GetDuration() (*time.Duration, error) {
if in.Spec.Duration == nil {
return nil, nil
}
duration, err := time.ParseDuration(*in.Spec.Duration)
if err != nil {
return nil, err
}
return &duration, nil
}
func (in *{{.Type}}) GetNextStart() time.Time {
if in.Status.Scheduler.NextStart == nil {
return time.Time{}
}
return in.Status.Scheduler.NextStart.Time
}
func (in *{{.Type}}) SetNextStart(t time.Time) {
if t.IsZero() {
in.Status.Scheduler.NextStart = nil
return
}
if in.Status.Scheduler.NextStart == nil {
in.Status.Scheduler.NextStart = &metav1.Time{}
}
in.Status.Scheduler.NextStart.Time = t
}
func (in *{{.Type}}) GetNextRecover() time.Time {
if in.Status.Scheduler.NextRecover == nil {
return time.Time{}
}
return in.Status.Scheduler.NextRecover.Time
}
func (in *{{.Type}}) SetNextRecover(t time.Time) {
if t.IsZero() {
in.Status.Scheduler.NextRecover = nil
return
}
if in.Status.Scheduler.NextRecover == nil {
in.Status.Scheduler.NextRecover = &metav1.Time{}
}
in.Status.Scheduler.NextRecover.Time = t
}
// GetScheduler would return the scheduler for chaos
func (in *{{.Type}}) GetScheduler() *SchedulerSpec {
return in.Spec.Scheduler
}
// GetChaos would return the a record for chaos
func (in *{{.Type}}) GetChaos() *ChaosInstance {
instance := &ChaosInstance{
Name: in.Name,
Namespace: in.Namespace,
Kind: Kind{{.Type}},
StartTime: in.CreationTimestamp.Time,
Action: "",
UID: string(in.UID),
}
action := reflect.ValueOf(in).Elem().FieldByName("Spec").FieldByName("Action")
if action.IsValid() {
instance.Action = action.String()
}
if in.Spec.Duration != nil {
instance.Duration = *in.Spec.Duration
}
if in.DeletionTimestamp != nil {
instance.EndTime = in.DeletionTimestamp.Time
}
return instance
}
// GetStatus returns the status
func (in *{{.Type}}) GetStatus() *ChaosStatus {
return &in.Status.ChaosStatus
}
// +kubebuilder:object:root=true
// {{.Type}}List contains a list of {{.Type}}
type {{.Type}}List struct {
metav1.TypeMeta ` + "`" + `json:",inline"` + "`" + `
metav1.ListMeta ` + "`" + `json:"metadata,omitempty"` + "`" + `
Items []{{.Type}} ` + "`" + `json:"items"` + "`" + `
}
// ListChaos returns a list of chaos
func (in *{{.Type}}List) ListChaos() []*ChaosInstance {
res := make([]*ChaosInstance, 0, len(in.Items))
for _, item := range in.Items {
res = append(res, item.GetChaos())
}
return res
}
`
func generateImpl(name string) string {
tmpl, err := template.New("impl").Parse(implTemplate)
if err != nil {
log.Error(err, "fail to build template")
return ""
}
buf := new(bytes.Buffer)
err = tmpl.Execute(buf, &metadata{
Type: name,
})
if err != nil {
log.Error(err, "fail to execute template")
return ""
}
return buf.String()
}
| 1 | 18,058 | Why we need use `in.GetStatus()` function here? Can we use `in.Status.xxxx` directly? | chaos-mesh-chaos-mesh | go |
@@ -93,7 +93,10 @@ func RenewManagedCertificates(allowPrompts bool) (err error) {
continue
}
- // this works well because managed certs are only associated with one name per config
+ // This works well because managed certs are only associated with one name per config.
+ // Note, the renewal inside here may not actually occur and no error will be returned
+ // due to renewal lock (i.e. because a renewal is already happening). This lack of
+ // error is by intention to force cache invalidation as though it has renewed.
err := cert.Config.RenewCert(allowPrompts)
if err != nil { | 1 | package caddytls
import (
"log"
"time"
"golang.org/x/crypto/ocsp"
)
func init() {
// maintain assets while this package is imported, which is
// always. we don't ever stop it, since we need it running.
go maintainAssets(make(chan struct{}))
}
const (
// RenewInterval is how often to check certificates for renewal.
RenewInterval = 12 * time.Hour
// OCSPInterval is how often to check if OCSP stapling needs updating.
OCSPInterval = 1 * time.Hour
// RenewDurationBefore is how long before expiration to renew certificates.
RenewDurationBefore = (24 * time.Hour) * 30
)
// maintainAssets is a permanently-blocking function
// that loops indefinitely and, on a regular schedule, checks
// certificates for expiration and initiates a renewal of certs
// that are expiring soon. It also updates OCSP stapling and
// performs other maintenance of assets. It should only be
// called once per process.
//
// You must pass in the channel which you'll close when
// maintenance should stop, to allow this goroutine to clean up
// after itself and unblock. (Not that you HAVE to stop it...)
func maintainAssets(stopChan chan struct{}) {
renewalTicker := time.NewTicker(RenewInterval)
ocspTicker := time.NewTicker(OCSPInterval)
for {
select {
case <-renewalTicker.C:
log.Println("[INFO] Scanning for expiring certificates")
RenewManagedCertificates(false)
log.Println("[INFO] Done checking certificates")
case <-ocspTicker.C:
log.Println("[INFO] Scanning for stale OCSP staples")
UpdateOCSPStaples()
log.Println("[INFO] Done checking OCSP staples")
case <-stopChan:
renewalTicker.Stop()
ocspTicker.Stop()
log.Println("[INFO] Stopped background maintenance routine")
return
}
}
}
// RenewManagedCertificates renews managed certificates.
func RenewManagedCertificates(allowPrompts bool) (err error) {
var renewed, deleted []Certificate
visitedNames := make(map[string]struct{})
certCacheMu.RLock()
for name, cert := range certCache {
if !cert.Config.Managed || cert.Config.SelfSigned {
continue
}
// the list of names on this cert should never be empty...
if cert.Names == nil || len(cert.Names) == 0 {
log.Printf("[WARNING] Certificate keyed by '%s' has no names: %v - removing from cache", name, cert.Names)
deleted = append(deleted, cert)
continue
}
// skip names whose certificate we've already renewed
if _, ok := visitedNames[name]; ok {
continue
}
for _, name := range cert.Names {
visitedNames[name] = struct{}{}
}
// if its time is up or ending soon, we need to try to renew it
timeLeft := cert.NotAfter.Sub(time.Now().UTC())
if timeLeft < RenewDurationBefore {
log.Printf("[INFO] Certificate for %v expires in %v; attempting renewal", cert.Names, timeLeft)
if cert.Config == nil {
log.Printf("[ERROR] %s: No associated TLS config; unable to renew", name)
continue
}
// this works well because managed certs are only associated with one name per config
err := cert.Config.RenewCert(allowPrompts)
if err != nil {
if allowPrompts && timeLeft < 0 {
// Certificate renewal failed, the operator is present, and the certificate
// is already expired; we should stop immediately and return the error. Note
// that we used to do this any time a renewal failed at startup. However,
// after discussion in https://github.com/mholt/caddy/issues/642 we decided to
// only stop startup if the certificate is expired. We still log the error
// otherwise.
certCacheMu.RUnlock()
return err
}
log.Printf("[ERROR] %v", err)
if cert.Config.OnDemand {
deleted = append(deleted, cert)
}
} else {
renewed = append(renewed, cert)
}
}
}
certCacheMu.RUnlock()
// Apply changes to the cache
for _, cert := range renewed {
if cert.Names[len(cert.Names)-1] == "" {
// Special case: This is the default certificate. We must
// flush it out of the cache so that we no longer point to
// the old, un-renewed certificate. Otherwise it will be
// renewed on every scan, which is too often. When we cache
// this certificate in a moment, it will be the default again.
certCacheMu.Lock()
delete(certCache, "")
certCacheMu.Unlock()
}
_, err := CacheManagedCertificate(cert.Names[0], cert.Config)
if err != nil {
if allowPrompts {
return err // operator is present, so report error immediately
}
log.Printf("[ERROR] %v", err)
}
}
for _, cert := range deleted {
certCacheMu.Lock()
for _, name := range cert.Names {
delete(certCache, name)
}
certCacheMu.Unlock()
}
return nil
}
// UpdateOCSPStaples updates the OCSP stapling in all
// eligible, cached certificates.
func UpdateOCSPStaples() {
// Create a temporary place to store updates
// until we release the potentially long-lived
// read lock and use a short-lived write lock.
type ocspUpdate struct {
rawBytes []byte
parsed *ocsp.Response
}
updated := make(map[string]ocspUpdate)
// A single SAN certificate maps to multiple names, so we use this
// set to make sure we don't waste cycles checking OCSP for the same
// certificate multiple times.
visited := make(map[string]struct{})
certCacheMu.RLock()
for name, cert := range certCache {
// skip this certificate if we've already visited it,
// and if not, mark all the names as visited
if _, ok := visited[name]; ok {
continue
}
for _, n := range cert.Names {
visited[n] = struct{}{}
}
// no point in updating OCSP for expired certificates
if time.Now().After(cert.NotAfter) {
continue
}
var lastNextUpdate time.Time
if cert.OCSP != nil {
// start checking OCSP staple about halfway through validity period for good measure
lastNextUpdate = cert.OCSP.NextUpdate
refreshTime := cert.OCSP.ThisUpdate.Add(lastNextUpdate.Sub(cert.OCSP.ThisUpdate) / 2)
// since OCSP is already stapled, we need only check if we're in that "refresh window"
if time.Now().Before(refreshTime) {
continue
}
}
err := stapleOCSP(&cert, nil)
if err != nil {
if cert.OCSP != nil {
// if there was no staple before, that's fine; otherwise we should log the error
log.Printf("[ERROR] Checking OCSP for %v: %v", cert.Names, err)
}
continue
}
// By this point, we've obtained the latest OCSP response.
// If there was no staple before, or if the response is updated, make
// sure we apply the update to all names on the certificate.
if lastNextUpdate.IsZero() || lastNextUpdate != cert.OCSP.NextUpdate {
log.Printf("[INFO] Advancing OCSP staple for %v from %s to %s",
cert.Names, lastNextUpdate, cert.OCSP.NextUpdate)
for _, n := range cert.Names {
updated[n] = ocspUpdate{rawBytes: cert.Certificate.OCSPStaple, parsed: cert.OCSP}
}
}
}
certCacheMu.RUnlock()
// This write lock should be brief since we have all the info we need now.
certCacheMu.Lock()
for name, update := range updated {
cert := certCache[name]
cert.OCSP = update.parsed
cert.Certificate.OCSPStaple = update.rawBytes
certCache[name] = cert
}
certCacheMu.Unlock()
}
| 1 | 8,575 | @cretz Just a thought: what if another renewal process updates the certificate between the beginning of this for loop (above on line 67) and actually calling RenewCert? Even though we have a read lock on the certCache, something else could have renewed it by now, and finished, which would cause this certificate to be renewed twice. Could you double-check my thinking here and see if that's a plausible 'race' condition? I wonder if the lock needs to be put over this whole function. (It's still early here; I could be wrong...) | caddyserver-caddy | go |
@@ -57,13 +57,13 @@ PDEBUG_SYSTEM_OBJECTS g_ExtSystem;
// Queries for all debugger interfaces.
#ifndef FEATURE_PAL
-extern "C" HRESULT
+HRESULT
ExtQuery(PDEBUG_CLIENT client)
{
HRESULT Status;
g_ExtClient = client;
#else
-extern "C" HRESULT
+HRESULT
ExtQuery(ILLDBServices* services)
{
// Initialize the PAL and extension suppport in one place and only once. | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// ==++==
//
//
// ==--==
#include "exts.h"
#include "disasm.h"
#ifndef FEATURE_PAL
#define VER_PRODUCTVERSION_W (0x0100)
//
// globals
//
WINDBG_EXTENSION_APIS ExtensionApis;
//
// Valid for the lifetime of the debug session.
//
PDEBUG_CLIENT g_ExtClient;
PDEBUG_DATA_SPACES2 g_ExtData2;
PDEBUG_ADVANCED g_ExtAdvanced;
#else
DebugClient* g_DebugClient;
ILLDBServices* g_ExtServices;
ILLDBServices2* g_ExtServices2;
bool g_palInitialized = false;
#endif // FEATURE_PAL
OnUnloadTask *OnUnloadTask::s_pUnloadTaskList = NULL;
IMachine* g_targetMachine = NULL;
BOOL g_bDacBroken = FALSE;
PDEBUG_CONTROL2 g_ExtControl;
PDEBUG_DATA_SPACES g_ExtData;
PDEBUG_REGISTERS g_ExtRegisters;
PDEBUG_SYMBOLS g_ExtSymbols;
PDEBUG_SYMBOLS2 g_ExtSymbols2;
PDEBUG_SYSTEM_OBJECTS g_ExtSystem;
#define SOS_ExtQueryFailGo(var, riid) \
var = NULL; \
if ((Status = client->QueryInterface(__uuidof(riid), \
(void **)&var)) != S_OK) \
{ \
goto Fail; \
}
// Queries for all debugger interfaces.
#ifndef FEATURE_PAL
extern "C" HRESULT
ExtQuery(PDEBUG_CLIENT client)
{
HRESULT Status;
g_ExtClient = client;
#else
extern "C" HRESULT
ExtQuery(ILLDBServices* services)
{
// Initialize the PAL and extension suppport in one place and only once.
if (!g_palInitialized)
{
if (PAL_InitializeDLL() != 0)
{
return E_FAIL;
}
g_palInitialized = true;
}
g_ExtServices = services;
HRESULT Status = services->QueryInterface(__uuidof(ILLDBServices2), (void**)&g_ExtServices2);
if (FAILED(Status))
{
g_ExtServices = NULL;
return Status;
}
DebugClient* client = new DebugClient(services, g_ExtServices2);
g_DebugClient = client;
#endif
SOS_ExtQueryFailGo(g_ExtControl, IDebugControl2);
SOS_ExtQueryFailGo(g_ExtData, IDebugDataSpaces);
SOS_ExtQueryFailGo(g_ExtRegisters, IDebugRegisters);
SOS_ExtQueryFailGo(g_ExtSymbols, IDebugSymbols);
SOS_ExtQueryFailGo(g_ExtSymbols2, IDebugSymbols2);
SOS_ExtQueryFailGo(g_ExtSystem, IDebugSystemObjects);
#ifndef FEATURE_PAL
SOS_ExtQueryFailGo(g_ExtData2, IDebugDataSpaces2);
SOS_ExtQueryFailGo(g_ExtAdvanced, IDebugAdvanced);
#endif // FEATURE_PAL
return Status;
Fail:
if (Status == E_OUTOFMEMORY)
ReportOOM();
ExtRelease();
return Status;
}
extern "C" HRESULT
ArchQuery(void)
{
ULONG targetArchitecture;
IMachine* targetMachine = NULL;
g_ExtControl->GetExecutingProcessorType(&targetArchitecture);
#ifdef SOS_TARGET_AMD64
if(targetArchitecture == IMAGE_FILE_MACHINE_AMD64)
{
targetMachine = AMD64Machine::GetInstance();
}
#endif // SOS_TARGET_AMD64
#ifdef SOS_TARGET_X86
if (targetArchitecture == IMAGE_FILE_MACHINE_I386)
{
targetMachine = X86Machine::GetInstance();
}
#endif // SOS_TARGET_X86
#ifdef SOS_TARGET_ARM
switch (targetArchitecture)
{
case IMAGE_FILE_MACHINE_ARM:
case IMAGE_FILE_MACHINE_THUMB:
case IMAGE_FILE_MACHINE_ARMNT:
targetMachine = ARMMachine::GetInstance();
break;
}
#endif // SOS_TARGET_ARM
#ifdef SOS_TARGET_ARM64
if (targetArchitecture == IMAGE_FILE_MACHINE_ARM64)
{
targetMachine = ARM64Machine::GetInstance();
}
#endif // SOS_TARGET_ARM64
if (targetMachine == NULL)
{
g_targetMachine = NULL;
ExtErr("The SOS that is loaded does not support the current target architecture '0x%04x'. A 32 bit target may require a 64 bit debugger or vice versa.\n", targetArchitecture);
return E_FAIL;
}
g_targetMachine = targetMachine;
return S_OK;
}
// Cleans up all debugger interfaces.
void
ExtRelease(void)
{
EXT_RELEASE(g_ExtControl);
EXT_RELEASE(g_ExtData);
EXT_RELEASE(g_ExtRegisters);
EXT_RELEASE(g_ExtSymbols);
EXT_RELEASE(g_ExtSymbols2);
EXT_RELEASE(g_ExtSystem);
#ifndef FEATURE_PAL
EXT_RELEASE(g_ExtData2);
EXT_RELEASE(g_ExtAdvanced);
g_ExtClient = nullptr;
#else
EXT_RELEASE(g_DebugClient);
EXT_RELEASE(g_ExtServices2);
g_ExtServices = nullptr;
#endif // FEATURE_PAL
ReleaseTarget();
}
#ifndef FEATURE_PAL
BOOL IsMiniDumpFileNODAC();
extern HMODULE g_hInstance;
// This function throws an exception that can be caught by the debugger,
// instead of allowing the default CRT behavior of invoking Watson to failfast.
void __cdecl _SOS_invalid_parameter(
const WCHAR * expression,
const WCHAR * function,
const WCHAR * file,
unsigned int line,
uintptr_t pReserved
)
{
ExtErr("\nSOS failure!\n");
throw "SOS failure";
}
bool g_Initialized = false;
bool IsInitializedByDbgEng()
{
return g_Initialized;
}
extern "C"
HRESULT
CALLBACK
DebugExtensionInitialize(PULONG Version, PULONG Flags)
{
HRESULT hr;
*Version = DEBUG_EXTENSION_VERSION(2, 0);
*Flags = 0;
if (g_Initialized)
{
return S_OK;
}
g_Initialized = true;
ReleaseHolder<IDebugClient> debugClient;
if ((hr = DebugCreate(__uuidof(IDebugClient), (void **)&debugClient)) != S_OK)
{
return hr;
}
if ((hr = SOSExtensions::Initialize(debugClient)) != S_OK)
{
return hr;
}
ReleaseHolder<IDebugControl> debugControl;
if ((hr = debugClient->QueryInterface(__uuidof(IDebugControl), (void **)&debugControl)) != S_OK)
{
return hr;
}
ExtensionApis.nSize = sizeof (ExtensionApis);
if ((hr = debugControl->GetWindbgExtensionApis64(&ExtensionApis)) != S_OK)
{
return hr;
}
// Fixes the "Unable to read dynamic function table entries" error messages by disabling the WinDbg security
// feature that prevents the loading of unknown out of proc stack walkers.
debugControl->Execute(DEBUG_OUTCTL_IGNORE, ".settings set EngineInitialization.VerifyFunctionTableCallbacks=false",
DEBUG_EXECUTE_NOT_LOGGED | DEBUG_EXECUTE_NO_REPEAT);
ExtQuery(debugClient);
if (IsMiniDumpFileNODAC())
{
ExtOut (
"----------------------------------------------------------------------------\n"
"The user dump currently examined is a minidump. Consequently, only a subset\n"
"of sos.dll functionality will be available. If needed, attaching to the live\n"
"process or debugging a full dump will allow access to sos.dll's full feature\n"
"set.\n"
"To create a full user dump use the command: .dump /ma <filename>\n"
"----------------------------------------------------------------------------\n");
}
ExtRelease();
#ifndef _ARM_
// Make sure we do not tear down the debugger when a security function fails
// Since we link statically against CRT this will only affect the SOS module.
_set_invalid_parameter_handler(_SOS_invalid_parameter);
#endif
return S_OK;
}
extern "C"
void
CALLBACK
DebugExtensionNotify(ULONG Notify, ULONG64 /*Argument*/)
{
}
extern "C"
void
CALLBACK
DebugExtensionUninitialize(void)
{
// Execute all registered cleanup tasks
OnUnloadTask::Run();
g_pRuntime = nullptr;
g_Initialized = false;
}
BOOL WINAPI
DllMain(HANDLE hInstance, DWORD dwReason, LPVOID lpReserved)
{
if (dwReason == DLL_PROCESS_ATTACH)
{
g_hInstance = (HMODULE) hInstance;
}
return true;
}
#else // FEATURE_PAL
HRESULT
DebugClient::QueryInterface(
REFIID InterfaceId,
PVOID* Interface
)
{
if (InterfaceId == __uuidof(IUnknown) ||
InterfaceId == __uuidof(IDebugControl2) ||
InterfaceId == __uuidof(IDebugControl4) ||
InterfaceId == __uuidof(IDebugDataSpaces) ||
InterfaceId == __uuidof(IDebugSymbols) ||
InterfaceId == __uuidof(IDebugSymbols2) ||
InterfaceId == __uuidof(IDebugSystemObjects) ||
InterfaceId == __uuidof(IDebugRegisters))
{
*Interface = this;
AddRef();
return S_OK;
}
else
{
*Interface = NULL;
return E_NOINTERFACE;
}
}
ULONG
DebugClient::AddRef()
{
LONG ref = InterlockedIncrement(&m_ref);
return ref;
}
ULONG
DebugClient::Release()
{
LONG ref = InterlockedDecrement(&m_ref);
if (ref == 0)
{
delete this;
}
return ref;
}
#endif // FEATURE_PAL
/// <summary>
/// Returns the host instance
///
/// * dotnet-dump - m_pHost has already been set by SOSInitializeByHost by SOS.Hosting
/// * lldb - m_pHost has already been set by SOSInitializeByHost by libsosplugin which gets it via the InitializeHostServices callback
/// * dbgeng - SOS.Extensions provides the instance via the InitializeHostServices callback
/// </summary>
IHost* SOSExtensions::GetHost()
{
if (m_pHost == nullptr)
{
#ifndef FEATURE_PAL
// Initialize the hosting runtime which will call InitializeHostServices and set m_pHost to the host instance
InitializeHosting();
#endif
// Otherwise, use the local host instance (hostimpl.*) that creates a local target instance (targetimpl.*)
if (m_pHost == nullptr)
{
m_pHost = Host::GetInstance();
}
}
return m_pHost;
}
| 1 | 13,472 | We don't need this for the PInvoke? | dotnet-diagnostics | cpp |
@@ -661,6 +661,10 @@ func (s *scheduler) getMentionedAccounts(ctx context.Context, event model.Notifi
return nil, err
}
+ if ds.GenericDeploymentConfig.DeploymentNotification == nil {
+ return nil, nil
+ }
+
for _, v := range ds.GenericDeploymentConfig.DeploymentNotification.Mentions {
if e := "EVENT_" + v.Event; e == event.String() {
return v.Slack, nil | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"fmt"
"io/ioutil"
"path/filepath"
"time"
"go.uber.org/atomic"
"go.uber.org/zap"
"github.com/pipe-cd/pipe/pkg/app/api/service/pipedservice"
"github.com/pipe-cd/pipe/pkg/app/piped/deploysource"
"github.com/pipe-cd/pipe/pkg/app/piped/executor"
"github.com/pipe-cd/pipe/pkg/app/piped/executor/registry"
"github.com/pipe-cd/pipe/pkg/app/piped/logpersister"
pln "github.com/pipe-cd/pipe/pkg/app/piped/planner"
"github.com/pipe-cd/pipe/pkg/cache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/model"
)
// scheduler is a dedicated object for a specific deployment of a single application.
type scheduler struct {
// Readonly deployment model.
deployment *model.Deployment
envName string
workingDir string
executorRegistry registry.Registry
apiClient apiClient
gitClient gitClient
commandLister commandLister
applicationLister applicationLister
liveResourceLister liveResourceLister
analysisResultStore analysisResultStore
logPersister logpersister.Persister
metadataStore *metadataStore
notifier notifier
secretDecrypter secretDecrypter
pipedConfig *config.PipedSpec
appManifestsCache cache.Cache
logger *zap.Logger
targetDSP deploysource.Provider
runningDSP deploysource.Provider
// Current status of each stages.
// We stores their current statuses into this field
// because the deployment model is readonly to avoid data race.
// We may need a mutex for this field in the future
// when the stages can be executed concurrently.
stageStatuses map[string]model.StageStatus
genericDeploymentConfig config.GenericDeploymentSpec
done atomic.Bool
doneTimestamp time.Time
doneDeploymentStatus model.DeploymentStatus
cancelled bool
cancelledCh chan *model.ReportableCommand
nowFunc func() time.Time
}
func newScheduler(
d *model.Deployment,
envName string,
workingDir string,
apiClient apiClient,
gitClient gitClient,
commandLister commandLister,
applicationLister applicationLister,
liveResourceLister liveResourceLister,
analysisResultStore analysisResultStore,
lp logpersister.Persister,
notifier notifier,
sd secretDecrypter,
pipedConfig *config.PipedSpec,
appManifestsCache cache.Cache,
logger *zap.Logger,
) *scheduler {
logger = logger.Named("scheduler").With(
zap.String("deployment-id", d.Id),
zap.String("app-id", d.ApplicationId),
zap.String("env-id", d.EnvId),
zap.String("project-id", d.ProjectId),
zap.String("app-kind", d.Kind.String()),
zap.String("working-dir", workingDir),
)
s := &scheduler{
deployment: d,
envName: envName,
workingDir: workingDir,
executorRegistry: registry.DefaultRegistry(),
apiClient: apiClient,
gitClient: gitClient,
commandLister: commandLister,
applicationLister: applicationLister,
liveResourceLister: liveResourceLister,
analysisResultStore: analysisResultStore,
logPersister: lp,
metadataStore: NewMetadataStore(apiClient, d),
notifier: notifier,
secretDecrypter: sd,
pipedConfig: pipedConfig,
appManifestsCache: appManifestsCache,
doneDeploymentStatus: d.Status,
cancelledCh: make(chan *model.ReportableCommand, 1),
logger: logger,
nowFunc: time.Now,
}
// Initialize the map of current status of all stages.
s.stageStatuses = make(map[string]model.StageStatus, len(d.Stages))
for _, stage := range d.Stages {
s.stageStatuses[stage.Id] = stage.Status
}
return s
}
// ID returns the id of scheduler.
// This is the same value with deployment ID.
func (s *scheduler) ID() string {
return s.deployment.Id
}
// CommitHash returns the hash value of deploying commit.
func (s *scheduler) CommitHash() string {
return s.deployment.CommitHash()
}
// IsDone tells whether this scheduler is done it tasks or not.
// Returning true means this scheduler can be removable.
func (s *scheduler) IsDone() bool {
return s.done.Load()
}
// DoneTimestamp returns the time when scheduler has done.
// This can be used only after IsDone() returns true.
func (s *scheduler) DoneTimestamp() time.Time {
if !s.IsDone() {
return time.Now().AddDate(1, 0, 0)
}
return s.doneTimestamp
}
// DoneDeploymentStatus returns the deployment status when scheduler has done.
// This can be used only after IsDone() returns true.
func (s *scheduler) DoneDeploymentStatus() model.DeploymentStatus {
if !s.IsDone() {
return s.deployment.Status
}
return s.doneDeploymentStatus
}
func (s *scheduler) Cancel(cmd model.ReportableCommand) {
if s.cancelled {
return
}
s.cancelled = true
s.cancelledCh <- &cmd
close(s.cancelledCh)
}
// Run starts running the scheduler.
// It determines what stage should be executed next by which executor.
// The returning error does not mean that the pipeline was failed,
// but it means that the scheduler could not finish its job normally.
func (s *scheduler) Run(ctx context.Context) error {
s.logger.Info("start running scheduler")
deploymentStatus := s.deployment.Status
defer func() {
s.doneTimestamp = s.nowFunc()
s.doneDeploymentStatus = deploymentStatus
s.done.Store(true)
}()
// If this deployment is already completed. Do nothing.
if model.IsCompletedDeployment(s.deployment.Status) {
s.logger.Info("this deployment is already completed")
return nil
}
// Update deployment status to RUNNING if needed.
if model.CanUpdateDeploymentStatus(s.deployment.Status, model.DeploymentStatus_DEPLOYMENT_RUNNING) {
err := s.reportDeploymentStatusChanged(ctx, model.DeploymentStatus_DEPLOYMENT_RUNNING, "The piped started handling this deployment")
if err != nil {
return err
}
}
var (
cancelCommand *model.ReportableCommand
cancelCommander string
lastStage *model.PipelineStage
repoID = s.deployment.GitPath.Repo.Id
statusReason = "The deployment was completed successfully"
)
deploymentStatus = model.DeploymentStatus_DEPLOYMENT_SUCCESS
repoCfg, ok := s.pipedConfig.GetRepository(repoID)
if !ok {
deploymentStatus = model.DeploymentStatus_DEPLOYMENT_FAILURE
statusReason = fmt.Sprintf("Repository %q is not found in the piped config", repoID)
s.reportDeploymentCompleted(ctx, deploymentStatus, statusReason, "")
return fmt.Errorf("unable to find %q from the repository list in piped config", repoID)
}
s.targetDSP = deploysource.NewProvider(
filepath.Join(s.workingDir, "target-deploysource"),
deploysource.NewGitSourceCloner(s.gitClient, repoCfg, "target", s.deployment.Trigger.Commit.Hash),
*s.deployment.GitPath,
s.secretDecrypter,
)
if s.deployment.RunningCommitHash != "" {
s.runningDSP = deploysource.NewProvider(
filepath.Join(s.workingDir, "running-deploysource"),
deploysource.NewGitSourceCloner(s.gitClient, repoCfg, "running", s.deployment.RunningCommitHash),
*s.deployment.GitPath,
s.secretDecrypter,
)
}
// We use another deploy source provider to load the deployment configuration at the target commit.
// This provider is configured with a nil secretDecrypter
// because decrypting the sealed secrets is not required.
// We need only the deployment configuration spec.
configDSP := deploysource.NewProvider(
filepath.Join(s.workingDir, "target-config"),
deploysource.NewGitSourceCloner(s.gitClient, repoCfg, "target", s.deployment.Trigger.Commit.Hash),
*s.deployment.GitPath,
nil,
)
ds, err := configDSP.GetReadOnly(ctx, ioutil.Discard)
if err != nil {
deploymentStatus = model.DeploymentStatus_DEPLOYMENT_FAILURE
statusReason = fmt.Sprintf("Unable to prepare deployment configuration source data at target commit (%v)", err)
s.reportDeploymentCompleted(ctx, deploymentStatus, statusReason, "")
return err
}
s.genericDeploymentConfig = ds.GenericDeploymentConfig
timer := time.NewTimer(s.genericDeploymentConfig.Timeout.Duration())
defer timer.Stop()
// Iterate all the stages and execute the uncompleted ones.
for i, ps := range s.deployment.Stages {
lastStage = s.deployment.Stages[i]
if ps.Status == model.StageStatus_STAGE_SUCCESS {
continue
}
if !ps.Visible || ps.Name == model.StageRollback.String() {
continue
}
// This stage is already completed by a previous scheduler.
if ps.Status == model.StageStatus_STAGE_CANCELLED {
deploymentStatus = model.DeploymentStatus_DEPLOYMENT_CANCELLED
statusReason = fmt.Sprintf("Deployment was cancelled while executing stage %s", ps.Id)
break
}
if ps.Status == model.StageStatus_STAGE_FAILURE {
deploymentStatus = model.DeploymentStatus_DEPLOYMENT_FAILURE
statusReason = fmt.Sprintf("Failed while executing stage %s", ps.Id)
break
}
var (
result model.StageStatus
sig, handler = executor.NewStopSignal()
doneCh = make(chan struct{})
)
go func() {
result = s.executeStage(sig, *ps, func(in executor.Input) (executor.Executor, bool) {
return s.executorRegistry.Executor(model.Stage(ps.Name), in)
})
close(doneCh)
}()
select {
case <-ctx.Done():
handler.Terminate()
<-doneCh
case <-timer.C:
handler.Timeout()
<-doneCh
case cmd := <-s.cancelledCh:
if cmd != nil {
cancelCommand = cmd
cancelCommander = cmd.Commander
handler.Cancel()
<-doneCh
}
case <-doneCh:
break
}
// If all operations of the stage were completed successfully
// handle the next stage.
if result == model.StageStatus_STAGE_SUCCESS {
continue
}
// The deployment was cancelled by a web user.
if result == model.StageStatus_STAGE_CANCELLED {
deploymentStatus = model.DeploymentStatus_DEPLOYMENT_CANCELLED
statusReason = fmt.Sprintf("Cancelled by %s while executing stage %s", cancelCommander, ps.Id)
break
}
if result == model.StageStatus_STAGE_FAILURE {
deploymentStatus = model.DeploymentStatus_DEPLOYMENT_FAILURE
// The stage was failed because of timing out.
if sig.Signal() == executor.StopSignalTimeout {
statusReason = fmt.Sprintf("Timed out while executing stage %s", ps.Id)
} else {
statusReason = fmt.Sprintf("Failed while executing stage %s", ps.Id)
}
break
}
// The deployment was cancelled at the previous stage and this stage was terminated before run.
if result == model.StageStatus_STAGE_NOT_STARTED_YET && cancelCommand != nil {
deploymentStatus = model.DeploymentStatus_DEPLOYMENT_CANCELLED
statusReason = fmt.Sprintf("Cancelled by %s while executing the previous stage of %s", cancelCommander, ps.Id)
break
}
s.logger.Info("stop scheduler because of temination signal", zap.String("stage-id", ps.Id))
return nil
}
// When the deployment has completed but not successful,
// we start rollback stage if the auto-rollback option is true.
if deploymentStatus == model.DeploymentStatus_DEPLOYMENT_CANCELLED ||
deploymentStatus == model.DeploymentStatus_DEPLOYMENT_FAILURE {
if stage, ok := s.deployment.FindRollbackStage(); ok {
// Update to change deployment status to ROLLING_BACK.
if err := s.reportDeploymentStatusChanged(ctx, model.DeploymentStatus_DEPLOYMENT_ROLLING_BACK, statusReason); err != nil {
return err
}
// Start running rollback stage.
var (
sig, handler = executor.NewStopSignal()
doneCh = make(chan struct{})
)
go func() {
rbs := *stage
rbs.Requires = []string{lastStage.Id}
s.executeStage(sig, rbs, func(in executor.Input) (executor.Executor, bool) {
return s.executorRegistry.RollbackExecutor(s.deployment.Kind, in)
})
close(doneCh)
}()
select {
case <-ctx.Done():
handler.Terminate()
<-doneCh
return nil
case <-doneCh:
break
}
}
}
if model.IsCompletedDeployment(deploymentStatus) {
err := s.reportDeploymentCompleted(ctx, deploymentStatus, statusReason, cancelCommander)
if err == nil && deploymentStatus == model.DeploymentStatus_DEPLOYMENT_SUCCESS {
s.reportMostRecentlySuccessfulDeployment(ctx)
}
}
if cancelCommand != nil {
if err := cancelCommand.Report(ctx, model.CommandStatus_COMMAND_SUCCEEDED, nil, nil); err != nil {
s.logger.Error("failed to report command status", zap.Error(err))
}
}
return nil
}
// executeStage finds the executor for the given stage and execute.
func (s *scheduler) executeStage(sig executor.StopSignal, ps model.PipelineStage, executorFactory func(executor.Input) (executor.Executor, bool)) (finalStatus model.StageStatus) {
var (
ctx = sig.Context()
originalStatus = ps.Status
lp = s.logPersister.StageLogPersister(s.deployment.Id, ps.Id)
)
defer func() {
// When the piped has been terminated (PS kill) while the stage is still running
// we should not mark the log persister as completed.
if !model.IsCompletedStage(finalStatus) && sig.Terminated() {
return
}
lp.Complete(time.Minute)
}()
// Update stage status to RUNNING if needed.
if model.CanUpdateStageStatus(ps.Status, model.StageStatus_STAGE_RUNNING) {
if err := s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_RUNNING, ps.Requires); err != nil {
return model.StageStatus_STAGE_FAILURE
}
originalStatus = model.StageStatus_STAGE_RUNNING
}
// Check the existence of the specified cloud provider.
if !s.pipedConfig.HasCloudProvider(s.deployment.CloudProvider, s.deployment.CloudProviderType()) {
lp.Errorf("This piped is not having the specified cloud provider in this deployment: %v", s.deployment.CloudProvider)
if err := s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires); err != nil {
s.logger.Error("failed to report stage status", zap.Error(err))
}
return model.StageStatus_STAGE_FAILURE
}
// Load the stage configuration.
var stageConfig config.PipelineStage
var stageConfigFound bool
if ps.Predefined {
stageConfig, stageConfigFound = pln.GetPredefinedStage(ps.Id)
} else {
stageConfig, stageConfigFound = s.genericDeploymentConfig.GetStage(ps.Index)
}
if !stageConfigFound {
lp.Error("Unable to find the stage configuration")
if err := s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires); err != nil {
s.logger.Error("failed to report stage status", zap.Error(err))
}
return model.StageStatus_STAGE_FAILURE
}
app, ok := s.applicationLister.Get(s.deployment.ApplicationId)
if !ok {
lp.Errorf("Application %s for this deployment was not found (Maybe it was disabled).", s.deployment.ApplicationId)
s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires)
return model.StageStatus_STAGE_FAILURE
}
cmdLister := stageCommandLister{
lister: s.commandLister,
deploymentID: s.deployment.Id,
stageID: ps.Id,
}
alrLister := appLiveResourceLister{
lister: s.liveResourceLister,
cloudProvider: app.CloudProvider,
appID: app.Id,
}
aStore := appAnalysisResultStore{
store: s.analysisResultStore,
applicationID: app.Id,
}
input := executor.Input{
Stage: &ps,
StageConfig: stageConfig,
Deployment: s.deployment,
Application: app,
PipedConfig: s.pipedConfig,
TargetDSP: s.targetDSP,
RunningDSP: s.runningDSP,
CommandLister: cmdLister,
LogPersister: lp,
MetadataStore: s.metadataStore,
AppManifestsCache: s.appManifestsCache,
AppLiveResourceLister: alrLister,
AnalysisResultStore: aStore,
Logger: s.logger,
Notifier: s.notifier,
EnvName: s.envName,
}
// Find the executor for this stage.
ex, ok := executorFactory(input)
if !ok {
err := fmt.Errorf("no registered executor for stage %s", ps.Name)
lp.Error(err.Error())
s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires)
return model.StageStatus_STAGE_FAILURE
}
// Start running executor.
status := ex.Execute(sig)
// Commit deployment state status in the following cases:
// - Apply state successfully.
// - State was canceled while running (cancel via Controlpane).
// - Apply state failed but not because of terminating piped process.
if status == model.StageStatus_STAGE_SUCCESS ||
status == model.StageStatus_STAGE_CANCELLED ||
(status == model.StageStatus_STAGE_FAILURE && !sig.Terminated()) {
s.reportStageStatus(ctx, ps.Id, status, ps.Requires)
return status
}
// In case piped process got killed (Terminated signal occurred)
// the original state status will be returned.
return originalStatus
}
func (s *scheduler) reportStageStatus(ctx context.Context, stageID string, status model.StageStatus, requires []string) error {
var (
err error
now = s.nowFunc()
req = &pipedservice.ReportStageStatusChangedRequest{
DeploymentId: s.deployment.Id,
StageId: stageID,
Status: status,
Requires: requires,
Visible: true,
CompletedAt: now.Unix(),
}
retry = pipedservice.NewRetry(10)
)
// Update stage status at local.
s.stageStatuses[stageID] = status
// Update stage status on the remote.
for retry.WaitNext(ctx) {
_, err = s.apiClient.ReportStageStatusChanged(ctx, req)
if err == nil {
break
}
err = fmt.Errorf("failed to report stage status to control-plane: %v", err)
}
return err
}
func (s *scheduler) reportDeploymentStatusChanged(ctx context.Context, status model.DeploymentStatus, desc string) error {
var (
err error
retry = pipedservice.NewRetry(10)
req = &pipedservice.ReportDeploymentStatusChangedRequest{
DeploymentId: s.deployment.Id,
Status: status,
StatusReason: desc,
}
)
// Update deployment status on remote.
for retry.WaitNext(ctx) {
if _, err = s.apiClient.ReportDeploymentStatusChanged(ctx, req); err == nil {
return nil
}
err = fmt.Errorf("failed to report deployment status to control-plane: %v", err)
}
return err
}
func (s *scheduler) reportDeploymentCompleted(ctx context.Context, status model.DeploymentStatus, desc, cancelCommander string) error {
var (
err error
now = s.nowFunc()
req = &pipedservice.ReportDeploymentCompletedRequest{
DeploymentId: s.deployment.Id,
Status: status,
StatusReason: desc,
StageStatuses: s.stageStatuses,
CompletedAt: now.Unix(),
}
retry = pipedservice.NewRetry(10)
)
defer func() {
switch status {
case model.DeploymentStatus_DEPLOYMENT_SUCCESS:
accounts, err := s.getMentionedAccounts(ctx, model.NotificationEventType_EVENT_DEPLOYMENT_SUCCEEDED)
if err != nil {
s.logger.Error("failed to get the list of accounts", zap.Error(err))
}
s.notifier.Notify(model.NotificationEvent{
Type: model.NotificationEventType_EVENT_DEPLOYMENT_SUCCEEDED,
Metadata: &model.NotificationEventDeploymentSucceeded{
Deployment: s.deployment,
EnvName: s.envName,
MentionedAccounts: accounts,
},
})
case model.DeploymentStatus_DEPLOYMENT_FAILURE:
accounts, err := s.getMentionedAccounts(ctx, model.NotificationEventType_EVENT_DEPLOYMENT_FAILED)
if err != nil {
s.logger.Error("failed to get the list of accounts", zap.Error(err))
}
s.notifier.Notify(model.NotificationEvent{
Type: model.NotificationEventType_EVENT_DEPLOYMENT_FAILED,
Metadata: &model.NotificationEventDeploymentFailed{
Deployment: s.deployment,
EnvName: s.envName,
Reason: desc,
MentionedAccounts: accounts,
},
})
case model.DeploymentStatus_DEPLOYMENT_CANCELLED:
accounts, err := s.getMentionedAccounts(ctx, model.NotificationEventType_EVENT_DEPLOYMENT_CANCELLED)
if err != nil {
s.logger.Error("failed to get the list of accounts", zap.Error(err))
}
s.notifier.Notify(model.NotificationEvent{
Type: model.NotificationEventType_EVENT_DEPLOYMENT_CANCELLED,
Metadata: &model.NotificationEventDeploymentCancelled{
Deployment: s.deployment,
EnvName: s.envName,
Commander: cancelCommander,
MentionedAccounts: accounts,
},
})
}
}()
// Update deployment status on remote.
for retry.WaitNext(ctx) {
if _, err = s.apiClient.ReportDeploymentCompleted(ctx, req); err == nil {
return nil
}
err = fmt.Errorf("failed to report deployment status to control-plane: %w", err)
}
return err
}
func (s *scheduler) getMentionedAccounts(ctx context.Context, event model.NotificationEventType) ([]string, error) {
if s.targetDSP == nil {
return nil, fmt.Errorf("targetDSP is not configured")
}
ds, err := s.targetDSP.GetReadOnly(ctx, ioutil.Discard)
if err != nil {
err = fmt.Errorf("failed to prepare running deploy source data (%w)", err)
return nil, err
}
for _, v := range ds.GenericDeploymentConfig.DeploymentNotification.Mentions {
if e := "EVENT_" + v.Event; e == event.String() {
return v.Slack, nil
}
}
return nil, nil
}
func (s *scheduler) reportMostRecentlySuccessfulDeployment(ctx context.Context) error {
var (
err error
req = &pipedservice.ReportApplicationMostRecentDeploymentRequest{
ApplicationId: s.deployment.ApplicationId,
Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS,
Deployment: &model.ApplicationDeploymentReference{
DeploymentId: s.deployment.Id,
Trigger: s.deployment.Trigger,
Summary: s.deployment.Summary,
Version: s.deployment.Version,
StartedAt: s.deployment.CreatedAt,
CompletedAt: s.deployment.CompletedAt,
},
}
retry = pipedservice.NewRetry(10)
)
for retry.WaitNext(ctx) {
if _, err = s.apiClient.ReportApplicationMostRecentDeployment(ctx, req); err == nil {
return nil
}
err = fmt.Errorf("failed to report most recent successful deployment: %w", err)
}
return err
}
type stageCommandLister struct {
lister commandLister
deploymentID string
stageID string
}
func (s stageCommandLister) ListCommands() []model.ReportableCommand {
return s.lister.ListStageCommands(s.deploymentID, s.stageID)
}
type appAnalysisResultStore struct {
store analysisResultStore
applicationID string
}
func (a appAnalysisResultStore) GetLatestAnalysisResult(ctx context.Context) (*model.AnalysisResult, error) {
return a.store.GetLatestAnalysisResult(ctx, a.applicationID)
}
func (a appAnalysisResultStore) PutLatestAnalysisResult(ctx context.Context, analysisResult *model.AnalysisResult) error {
return a.store.PutLatestAnalysisResult(ctx, a.applicationID, analysisResult)
}
| 1 | 20,483 | nits, I think add a log (using s.logger) to show why does this return with no error is better. | pipe-cd-pipe | go |
@@ -334,7 +334,10 @@ func (a *WebAPI) getPiped(ctx context.Context, pipedID string) (*model.Piped, er
// It gives back error unless the piped belongs to the project.
func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error {
pid, err := a.pipedProjectCache.Get(pipedID)
- if err == nil && pid == projectID {
+ if err == nil {
+ if pid != projectID {
+ return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
+ }
return nil
}
| 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/google/uuid"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore"
"github.com/pipe-cd/pipe/pkg/app/api/commandstore"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/app/api/stagelogstore"
"github.com/pipe-cd/pipe/pkg/cache/memorycache"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/crypto"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/git"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/rpc/rpcauth"
)
type encrypter interface {
Encrypt(text string) (string, error)
}
// WebAPI implements the behaviors for the gRPC definitions of WebAPI.
type WebAPI struct {
applicationStore datastore.ApplicationStore
environmentStore datastore.EnvironmentStore
deploymentStore datastore.DeploymentStore
pipedStore datastore.PipedStore
projectStore datastore.ProjectStore
stageLogStore stagelogstore.Store
applicationLiveStateStore applicationlivestatestore.Store
commandStore commandstore.Store
encrypter encrypter
appProjectCache *memorycache.TTLCache
deploymentProjectCache *memorycache.TTLCache
pipedProjectCache *memorycache.TTLCache
projectsInConfig map[string]config.ControlPlaneProject
logger *zap.Logger
}
// NewWebAPI creates a new WebAPI instance.
func NewWebAPI(
ctx context.Context,
ds datastore.DataStore,
sls stagelogstore.Store,
alss applicationlivestatestore.Store,
cmds commandstore.Store,
projs map[string]config.ControlPlaneProject,
encrypter encrypter,
logger *zap.Logger) *WebAPI {
a := &WebAPI{
applicationStore: datastore.NewApplicationStore(ds),
environmentStore: datastore.NewEnvironmentStore(ds),
deploymentStore: datastore.NewDeploymentStore(ds),
pipedStore: datastore.NewPipedStore(ds),
projectStore: datastore.NewProjectStore(ds),
stageLogStore: sls,
applicationLiveStateStore: alss,
commandStore: cmds,
projectsInConfig: projs,
encrypter: encrypter,
appProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
deploymentProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
pipedProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour),
logger: logger.Named("web-api"),
}
return a
}
// Register registers all handling of this service into the specified gRPC server.
func (a *WebAPI) Register(server *grpc.Server) {
webservice.RegisterWebServiceServer(server, a)
}
func (a *WebAPI) AddEnvironment(ctx context.Context, req *webservice.AddEnvironmentRequest) (*webservice.AddEnvironmentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
env := model.Environment{
Id: uuid.New().String(),
Name: req.Name,
Desc: req.Desc,
ProjectId: claims.Role.ProjectId,
}
err = a.environmentStore.AddEnvironment(ctx, &env)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The environment already exists")
}
if err != nil {
a.logger.Error("failed to create environment", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create environment")
}
return &webservice.AddEnvironmentResponse{}, nil
}
func (a *WebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (a *WebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
},
}
envs, err := a.environmentStore.ListEnvironments(ctx, opts)
if err != nil {
a.logger.Error("failed to get environments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get environments")
}
return &webservice.ListEnvironmentsResponse{
Environments: envs,
}, nil
}
func (a *WebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
id := uuid.New().String()
piped := model.Piped{
Id: id,
Name: req.Name,
Desc: req.Desc,
KeyHash: keyHash,
ProjectId: claims.Role.ProjectId,
EnvIds: req.EnvIds,
Status: model.Piped_OFFLINE,
}
err = a.pipedStore.AddPiped(ctx, &piped)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The piped already exists")
}
if err != nil {
a.logger.Error("failed to register piped", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to register piped")
}
return &webservice.RegisterPipedResponse{
Id: id,
Key: key,
}, nil
}
func (a *WebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) {
key, keyHash, err := model.GeneratePipedKey()
if err != nil {
a.logger.Error("failed to generate piped key", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to generate the piped key")
}
updater := func(ctx context.Context, pipedID string) error {
return a.pipedStore.UpdateKeyHash(ctx, pipedID, keyHash)
}
if err := a.updatePiped(ctx, req.Id, updater); err != nil {
return nil, err
}
return &webservice.RecreatePipedKeyResponse{
Key: key,
}, nil
}
func (a *WebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.EnablePiped); err != nil {
return nil, err
}
return &webservice.EnablePipedResponse{}, nil
}
func (a *WebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) {
if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.DisablePiped); err != nil {
return nil, err
}
return &webservice.DisablePipedResponse{}, nil
}
func (a *WebAPI) updatePiped(ctx context.Context, pipedID string, updater func(context.Context, string) error) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validatePipedBelongsToProject(ctx, pipedID, claims.Role.ProjectId); err != nil {
return err
}
if err := updater(ctx, pipedID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.InvalidArgument, "The piped is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the piped",
zap.String("piped-id", pipedID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the piped ")
}
}
return nil
}
// TODO: Consider using piped-stats to decide piped connection status.
func (a *WebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
opts := datastore.ListOptions{
Filters: []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
},
}
if req.Options != nil {
if req.Options.Enabled != nil {
opts.Filters = append(opts.Filters, datastore.ListFilter{
Field: "Disabled",
Operator: "==",
Value: !req.Options.Enabled.GetValue(),
})
}
}
pipeds, err := a.pipedStore.ListPipeds(ctx, opts)
if err != nil {
a.logger.Error("failed to get pipeds", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get pipeds")
}
// Redact all sensitive data inside piped message before sending to the client.
for i := range pipeds {
pipeds[i].RedactSensitiveData()
}
return &webservice.ListPipedsResponse{
Pipeds: pipeds,
}, nil
}
func (a *WebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := a.getPiped(ctx, req.PipedId)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
// Redact all sensitive data inside piped message before sending to the client.
piped.RedactSensitiveData()
return &webservice.GetPipedResponse{
Piped: piped,
}, nil
}
func (a *WebAPI) getPiped(ctx context.Context, pipedID string) (*model.Piped, error) {
piped, err := a.pipedStore.GetPiped(ctx, pipedID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "Piped is not found")
}
if err != nil {
a.logger.Error("failed to get piped", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get piped")
}
return piped, nil
}
// validatePipedBelongsToProject checks if the given piped belongs to the given project.
// It gives back error unless the piped belongs to the project.
func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error {
pid, err := a.pipedProjectCache.Get(pipedID)
if err == nil && pid == projectID {
return nil
}
piped, err := a.getPiped(ctx, pipedID)
if err != nil {
return err
}
a.pipedProjectCache.Put(pipedID, piped.ProjectId)
if piped.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in")
}
return nil
}
// TODO: Validate the specified piped to ensure that it belongs to the specified environment.
func (a *WebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
// The path to the application directory must be relative.
if strings.HasPrefix(req.GitPath.Path, "/") {
return nil, status.Error(codes.InvalidArgument, "The path must be a relative path")
}
gitpath, err := a.makeGitPath(ctx, req.GitPath.Repo.Id, req.GitPath.Path, req.GitPath.ConfigFilename, req.PipedId, claims.Role.ProjectId)
if err != nil {
return nil, err
}
app := model.Application{
Id: uuid.New().String(),
Name: req.Name,
EnvId: req.EnvId,
PipedId: req.PipedId,
ProjectId: claims.Role.ProjectId,
GitPath: gitpath,
Kind: req.Kind,
CloudProvider: req.CloudProvider,
}
err = a.applicationStore.AddApplication(ctx, &app)
if errors.Is(err, datastore.ErrAlreadyExists) {
return nil, status.Error(codes.AlreadyExists, "The application already exists")
}
if err != nil {
a.logger.Error("failed to create application", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to create application")
}
return &webservice.AddApplicationResponse{
ApplicationId: app.Id,
}, nil
}
// makeGitPath returns an ApplicationGitPath by adding Repository info and GitPath URL to given args.
func (a *WebAPI) makeGitPath(ctx context.Context, repoID, path, cfgFilename, pipedID, projectID string) (*model.ApplicationGitPath, error) {
piped, err := a.getPiped(ctx, pipedID)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, pipedID, projectID); err != nil {
return nil, err
}
var repo *model.ApplicationGitRepository
for _, r := range piped.Repositories {
if r.Id == repoID {
repo = r
break
}
}
if repo == nil {
a.logger.Error("repository not found",
zap.String("repo-id", repoID),
zap.String("piped-id", pipedID),
zap.Error(err),
)
return nil, status.Error(codes.Internal, "The repository is not found")
}
u, err := git.MakeDirURL(repo.Remote, path, repo.Branch)
if err != nil {
a.logger.Error("failed to make GitPath URL", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to make GitPath URL")
}
return &model.ApplicationGitPath{
Repo: repo,
Path: path,
ConfigFilename: cfgFilename,
Url: u,
}, nil
}
func (a *WebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, true); err != nil {
return nil, err
}
return &webservice.EnableApplicationResponse{}, nil
}
func (a *WebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) {
if err := a.updateApplicationEnable(ctx, req.ApplicationId, false); err != nil {
return nil, err
}
return &webservice.DisableApplicationResponse{}, nil
}
func (a *WebAPI) updateApplicationEnable(ctx context.Context, appID string, enable bool) error {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return err
}
if err := a.validateApplicationBelongsToProject(ctx, appID, claims.Role.ProjectId); err != nil {
return err
}
var updater func(context.Context, string) error
if enable {
updater = a.applicationStore.EnableApplication
} else {
updater = a.applicationStore.DisableApplication
}
if err := updater(ctx, appID); err != nil {
switch err {
case datastore.ErrNotFound:
return status.Error(codes.InvalidArgument, "The application is not found")
case datastore.ErrInvalidArgument:
return status.Error(codes.InvalidArgument, "Invalid value for update")
default:
a.logger.Error("failed to update the application",
zap.String("application-id", appID),
zap.Error(err),
)
return status.Error(codes.Internal, "Failed to update the application")
}
}
return nil
}
func (a *WebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
if o.Enabled != nil {
filters = append(filters, datastore.ListFilter{
Field: "Disabled",
Operator: "==",
Value: !o.Enabled.GetValue(),
})
}
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: "==",
Value: o.Kinds[0],
})
}
if len(o.SyncStatuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "SyncState.Status",
Operator: "==",
Value: o.SyncStatuses[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: "==",
Value: o.EnvIds[0],
})
}
}
apps, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
})
if err != nil {
a.logger.Error("failed to get applications", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get applications")
}
return &webservice.ListApplicationsResponse{
Applications: apps,
}, nil
}
func (a *WebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := a.getApplication(ctx, req.ApplicationId)
if err != nil {
return nil, err
}
if err := a.validateApplicationBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
commandID := uuid.New().String()
cmd := model.Command{
Id: commandID,
PipedId: app.PipedId,
ApplicationId: app.Id,
Type: model.Command_SYNC_APPLICATION,
Commander: claims.Subject,
SyncApplication: &model.Command_SyncApplication{
ApplicationId: req.ApplicationId,
},
}
if err := a.addCommand(ctx, &cmd); err != nil {
return nil, err
}
return &webservice.SyncApplicationResponse{
CommandId: commandID,
}, nil
}
func (a *WebAPI) addCommand(ctx context.Context, cmd *model.Command) error {
if err := a.commandStore.AddCommand(ctx, cmd); err != nil {
a.logger.Error("failed to create command", zap.Error(err))
return status.Error(codes.Internal, "Failed to create command")
}
return nil
}
func (a *WebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := a.getApplication(ctx, req.ApplicationId)
if err != nil {
return nil, err
}
if err := a.validateApplicationBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
return &webservice.GetApplicationResponse{
Application: app,
}, nil
}
func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webservice.GenerateApplicationSealedSecretRequest) (*webservice.GenerateApplicationSealedSecretResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
piped, err := a.getPiped(ctx, req.PipedId)
if err != nil {
return nil, err
}
if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil {
return nil, err
}
sse := piped.SealedSecretEncryption
if sse == nil {
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain the encryption configuration")
}
var enc encrypter
switch model.SealedSecretManagementType(sse.Type) {
case model.SealedSecretManagementSealingKey:
if sse.PublicKey == "" {
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a public key")
}
enc, err = crypto.NewHybridEncrypter(sse.PublicKey)
if err != nil {
a.logger.Error("failed to initialize the crypter", zap.Error(err))
return nil, status.Error(codes.FailedPrecondition, "Failed to initialize the encrypter")
}
default:
return nil, status.Error(codes.FailedPrecondition, "The piped does not contain a valid encryption type")
}
encryptedText, err := enc.Encrypt(req.Data)
if err != nil {
a.logger.Error("failed to encrypt the secret", zap.Error(err))
return nil, status.Error(codes.FailedPrecondition, "Failed to encrypt the secret")
}
return &webservice.GenerateApplicationSealedSecretResponse{
Data: encryptedText,
}, nil
}
func (a *WebAPI) getApplication(ctx context.Context, appID string) (*model.Application, error) {
app, err := a.applicationStore.GetApplication(ctx, appID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The application is not found")
}
if err != nil {
a.logger.Error("failed to get application", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get application")
}
return app, nil
}
// validateApplicationBelongsToProject checks if the given application belongs to the given project.
// It gives back error unless the application belongs to the project.
func (a *WebAPI) validateApplicationBelongsToProject(ctx context.Context, appID, projectID string) error {
pid, err := a.appProjectCache.Get(appID)
if err == nil && pid == projectID {
return nil
}
app, err := a.getApplication(ctx, appID)
if err != nil {
return err
}
a.appProjectCache.Put(appID, app.ProjectId)
if app.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
orders := []datastore.Order{
{
Field: "UpdatedAt",
Direction: datastore.Desc,
},
}
filters := []datastore.ListFilter{
{
Field: "ProjectId",
Operator: "==",
Value: claims.Role.ProjectId,
},
}
if o := req.Options; o != nil {
// Allowing multiple so that it can do In Query later.
// Currently only the first value is used.
if len(o.Statuses) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Status",
Operator: "==",
Value: o.Statuses[0],
})
}
if len(o.Kinds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "Kind",
Operator: "==",
Value: o.Kinds[0],
})
}
if len(o.ApplicationIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "ApplicationId",
Operator: "==",
Value: o.ApplicationIds[0],
})
}
if len(o.EnvIds) > 0 {
filters = append(filters, datastore.ListFilter{
Field: "EnvId",
Operator: "==",
Value: o.EnvIds[0],
})
}
if o.MaxUpdatedAt != 0 {
filters = append(filters, datastore.ListFilter{
Field: "UpdatedAt",
Operator: "<=",
Value: o.MaxUpdatedAt,
})
}
}
deployments, err := a.deploymentStore.ListDeployments(ctx, datastore.ListOptions{
Filters: filters,
Orders: orders,
PageSize: int(req.PageSize),
})
if err != nil {
a.logger.Error("failed to get deployments", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get deployments")
}
return &webservice.ListDeploymentsResponse{
Deployments: deployments,
}, nil
}
func (a *WebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := a.getDeployment(ctx, req.DeploymentId)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
return &webservice.GetDeploymentResponse{
Deployment: deployment,
}, nil
}
func (a *WebAPI) getDeployment(ctx context.Context, deploymentID string) (*model.Deployment, error) {
deployment, err := a.deploymentStore.GetDeployment(ctx, deploymentID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The deployment is not found")
}
if err != nil {
a.logger.Error("failed to get deployment", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get deployment")
}
return deployment, nil
}
// validateDeploymentBelongsToProject checks if the given deployment belongs to the given project.
// It gives back error unless the deployment belongs to the project.
func (a *WebAPI) validateDeploymentBelongsToProject(ctx context.Context, deploymentID, projectID string) error {
pid, err := a.deploymentProjectCache.Get(deploymentID)
if err == nil && pid == projectID {
return nil
}
deployment, err := a.getDeployment(ctx, deploymentID)
if err != nil {
return err
}
a.deploymentProjectCache.Put(deploymentID, deployment.ProjectId)
if deployment.ProjectId != projectID {
return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in")
}
return nil
}
func (a *WebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
blocks, completed, err := a.stageLogStore.FetchLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.OffsetIndex)
if errors.Is(err, stagelogstore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The stage log not found")
}
if err != nil {
a.logger.Error("failed to get stage logs", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get stage logs")
}
return &webservice.GetStageLogResponse{
Blocks: blocks,
Completed: completed,
}, nil
}
func (a *WebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := a.getDeployment(ctx, req.DeploymentId)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
if model.IsCompletedDeployment(deployment.Status) {
return nil, status.Errorf(codes.FailedPrecondition, "could not cancel the deployment because it was already completed")
}
commandID := uuid.New().String()
cmd := model.Command{
Id: commandID,
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
DeploymentId: req.DeploymentId,
Type: model.Command_CANCEL_DEPLOYMENT,
Commander: claims.Subject,
CancelDeployment: &model.Command_CancelDeployment{
DeploymentId: req.DeploymentId,
ForceRollback: req.ForceRollback,
ForceNoRollback: req.ForceNoRollback,
},
}
if err := a.addCommand(ctx, &cmd); err != nil {
return nil, err
}
return &webservice.CancelDeploymentResponse{
CommandId: commandID,
}, nil
}
func (a *WebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
deployment, err := a.getDeployment(ctx, req.DeploymentId)
if err != nil {
return nil, err
}
if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil {
return nil, err
}
stage, ok := deployment.StageStatusMap()[req.StageId]
if !ok {
return nil, status.Error(codes.FailedPrecondition, "The stage was not found in the deployment")
}
if model.IsCompletedStage(stage) {
return nil, status.Errorf(codes.FailedPrecondition, "Could not approve the stage because it was already completed")
}
commandID := uuid.New().String()
cmd := model.Command{
Id: commandID,
PipedId: deployment.PipedId,
ApplicationId: deployment.ApplicationId,
DeploymentId: req.DeploymentId,
StageId: req.StageId,
Type: model.Command_APPROVE_STAGE,
Commander: claims.Subject,
ApproveStage: &model.Command_ApproveStage{
DeploymentId: req.DeploymentId,
StageId: req.StageId,
},
}
if err := a.addCommand(ctx, &cmd); err != nil {
return nil, err
}
return &webservice.ApproveStageResponse{
CommandId: commandID,
}, nil
}
func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if err := a.validateApplicationBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
snapshot, err := a.applicationLiveStateStore.GetStateSnapshot(ctx, req.ApplicationId)
if err != nil {
a.logger.Error("failed to get application live state", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get application live state")
}
return &webservice.GetApplicationLiveStateResponse{
Snapshot: snapshot,
}, nil
}
// GetProject gets the specified porject without sensitive data.
func (a *WebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
project, err := a.getProject(ctx, claims.Role.ProjectId)
if err != nil {
return nil, err
}
// Redact all sensitive data inside project message before sending to the client.
project.RedactSensitiveData()
return &webservice.GetProjectResponse{
Project: project,
}, nil
}
func (a *WebAPI) getProject(ctx context.Context, projectID string) (*model.Project, error) {
if p, ok := a.projectsInConfig[projectID]; ok {
return &model.Project{
Id: p.Id,
Desc: p.Desc,
StaticAdmin: &model.ProjectStaticUser{
Username: p.StaticAdmin.Username,
PasswordHash: p.StaticAdmin.PasswordHash,
},
}, nil
}
project, err := a.projectStore.GetProject(ctx, projectID)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The project is not found")
}
if err != nil {
a.logger.Error("failed to get project", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to get project")
}
return project, nil
}
// UpdateProjectStaticAdmin updates the static admin user settings.
func (a *WebAPI) UpdateProjectStaticAdmin(ctx context.Context, req *webservice.UpdateProjectStaticAdminRequest) (*webservice.UpdateProjectStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectStaticAdmin(ctx, claims.Role.ProjectId, req.Username, req.Password); err != nil {
a.logger.Error("failed to update static admin", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update static admin")
}
return &webservice.UpdateProjectStaticAdminResponse{}, nil
}
// EnableStaticAdmin enables static admin login.
func (a *WebAPI) EnableStaticAdmin(ctx context.Context, req *webservice.EnableStaticAdminRequest) (*webservice.EnableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.EnableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to enable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to enable static admin login")
}
return &webservice.EnableStaticAdminResponse{}, nil
}
// DisableStaticAdmin disables static admin login.
func (a *WebAPI) DisableStaticAdmin(ctx context.Context, req *webservice.DisableStaticAdminRequest) (*webservice.DisableStaticAdminResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.DisableStaticAdmin(ctx, claims.Role.ProjectId); err != nil {
a.logger.Error("failed to disenable static admin login", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to disenable static admin login")
}
return &webservice.DisableStaticAdminResponse{}, nil
}
// UpdateProjectSSOConfig updates the sso settings.
func (a *WebAPI) UpdateProjectSSOConfig(ctx context.Context, req *webservice.UpdateProjectSSOConfigRequest) (*webservice.UpdateProjectSSOConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := req.Sso.Encrypt(a.encrypter); err != nil {
a.logger.Error("failed to encrypt sensitive data in sso configurations", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to encrypt sensitive data in sso configurations")
}
if err := a.projectStore.UpdateProjectSSOConfig(ctx, claims.Role.ProjectId, req.Sso); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectSSOConfigResponse{}, nil
}
// UpdateProjectRBACConfig updates the sso settings.
func (a *WebAPI) UpdateProjectRBACConfig(ctx context.Context, req *webservice.UpdateProjectRBACConfigRequest) (*webservice.UpdateProjectRBACConfigResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok {
return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration")
}
if err := a.projectStore.UpdateProjectRBACConfig(ctx, claims.Role.ProjectId, req.Rbac); err != nil {
a.logger.Error("failed to update project single sign on settings", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to update project single sign on settings")
}
return &webservice.UpdateProjectRBACConfigResponse{}, nil
}
// GetMe gets information about the current user.
func (a *WebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
return &webservice.GetMeResponse{
Subject: claims.Subject,
AvatarUrl: claims.AvatarURL,
ProjectId: claims.Role.ProjectId,
ProjectRole: claims.Role.ProjectRole,
}, nil
}
func (a *WebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) {
cmd, err := a.commandStore.GetCommand(ctx, req.CommandId)
if errors.Is(err, datastore.ErrNotFound) {
return nil, status.Error(codes.NotFound, "The command is not found")
}
if err != nil {
return nil, status.Error(codes.Internal, "Failed to get command")
}
// TODO: Add check if requested command belongs to logged-in project, after adding project id field to model.Command.
return &webservice.GetCommandResponse{
Command: cmd,
}, nil
}
func (a *WebAPI) ListDeploymentConfigTemplates(ctx context.Context, req *webservice.ListDeploymentConfigTemplatesRequest) (*webservice.ListDeploymentConfigTemplatesResponse, error) {
claims, err := rpcauth.ExtractClaims(ctx)
if err != nil {
a.logger.Error("failed to authenticate the current user", zap.Error(err))
return nil, err
}
app, err := a.getApplication(ctx, req.ApplicationId)
if err != nil {
return nil, err
}
if err := a.validateApplicationBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil {
return nil, err
}
var templates []*webservice.DeploymentConfigTemplate
switch app.Kind {
case model.ApplicationKind_KUBERNETES:
templates = k8sDeploymentConfigTemplates
case model.ApplicationKind_TERRAFORM:
templates = terraformDeploymentConfigTemplates
case model.ApplicationKind_CROSSPLANE:
templates = crossplaneDeploymentConfigTemplates
case model.ApplicationKind_LAMBDA:
templates = lambdaDeploymentConfigTemplates
case model.ApplicationKind_CLOUDRUN:
templates = cloudrunDeploymentConfigTemplates
default:
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Unknown application kind %v", app.Kind))
}
for _, t := range templates {
g := app.GetGitPath()
filename := g.ConfigFilename
if filename == "" {
filename = ".pipe.yaml"
}
t.FileCreationUrl, err = git.MakeFileCreationURL(g.Repo.Remote, g.Path, g.Repo.Branch, filename, t.Content)
if err != nil {
a.logger.Error("failed to make a link to creat a file", zap.Error(err))
return nil, status.Error(codes.Internal, "Failed to make a link to creat a file")
}
}
if len(req.Labels) == 0 {
return &webservice.ListDeploymentConfigTemplatesResponse{Templates: templates}, nil
}
filtered := filterDeploymentConfigTemplates(templates, req.Labels)
return &webservice.ListDeploymentConfigTemplatesResponse{Templates: filtered}, nil
}
// Returns the one from the given templates with all the specified labels.
func filterDeploymentConfigTemplates(templates []*webservice.DeploymentConfigTemplate, labels []webservice.DeploymentConfigTemplateLabel) []*webservice.DeploymentConfigTemplate {
filtered := make([]*webservice.DeploymentConfigTemplate, 0, len(templates))
L:
for _, template := range templates {
for _, l := range labels {
if !template.HasLabel(l) {
continue L
}
}
filtered = append(filtered, template)
}
return filtered
}
| 1 | 11,134 | Btw, It would be nice if we have some tests for those validation functions. | pipe-cd-pipe | go |
@@ -23,6 +23,8 @@ public abstract class PathTemplateCheckView {
public abstract String paramName();
+ public abstract String methodName();
+
public static Builder newBuilder() {
return new AutoValue_PathTemplateCheckView.Builder();
} | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.viewmodel;
import com.google.auto.value.AutoValue;
@AutoValue
public abstract class PathTemplateCheckView {
public abstract String pathTemplateName();
public abstract String paramName();
public static Builder newBuilder() {
return new AutoValue_PathTemplateCheckView.Builder();
}
@AutoValue.Builder
public static abstract class Builder {
public abstract Builder pathTemplateName(String val);
public abstract Builder paramName(String val);
public abstract PathTemplateCheckView build();
}
}
| 1 | 16,382 | Should this be called something that indicates its function, rather than its content? validationMessagePrefix, or similar? | googleapis-gapic-generator | java |
@@ -3,7 +3,8 @@
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
-#Copyright (C) 2010-2015 NV Access Limited, Mesar Hameed
+#Copyright (C) 2010-2019 NV Access Limited, Mesar Hameed, Takuya Nishimoto
+#Copyright (C) 2018-2019 Takuya Nishimoto
"""Utilities related to NVDA Key Commands documents.
""" | 1 | # -*- coding: UTF-8 -*-
#keyCommandsDoc.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2010-2015 NV Access Limited, Mesar Hameed
"""Utilities related to NVDA Key Commands documents.
"""
import os
import codecs
import re
import txt2tags
LINE_END = u"\r\n"
class KeyCommandsError(Exception):
"""Raised due to an error encountered in the User Guide related to generation of the Key Commands document.
"""
class KeyCommandsMaker(object):
"""Generates the Key Commands document from the User Guide.
To generate a Key Commands document, create an instance and then call L{make} on it.
Generation of the Key Commands document requires certain commands to be included in the user guide.
These commands must begin at the start of the line and take the form::
%kc:command: arg
The kc:title command must appear first and specifies the title of the Key Commands document. For example::
%kc:title: NVDA Key Commands
The kc:includeconf command allows you to insert a txt2tags includeconf command in the Key Commands document. For example::
%kc:includeconf: ../ar.t2tconf
You may use multiple kc:includeconf commands, but they must appear before any of the other commands below.
The rest of these commands are used to include key commands into the document.
Appropriate headings from the User Guide will be included implicitly.
The kc:beginInclude command begins a block of text which should be included verbatim.
The block ends at the kc:endInclude command.
For example::
%kc:beginInclude
|| Name | Desktop command | Laptop command | Description |
...
%kc:endInclude
The kc:settingsSection command indicates the beginning of a section documenting individual settings.
It specifies the header row for a table summarising the settings indicated by the kc:setting command (see below).
In order, it must consist of a name column, a column for each keyboard layout and a description column.
For example::
%kc:settingsSection: || Name | Desktop command | Laptop command | Description |
The kc:setting command indicates a section for an individual setting.
It must be followed by:
* A heading containing the name of the setting;
* A table row for each keyboard layout, or if the key is common to all layouts, a single line of text specifying the key after a colon;
* A blank line; and
* A line describing the setting.
For example::
%kc:setting
==== Braille Tethered To ====
| Desktop command | NVDA+control+t |
| Laptop Command | NVDA+control+t |
This option allows you to choose whether the braille display will follow the system focus, or whether it follows the navigator object / review cursor.
"""
t2tRe = None
RE_COMMAND = re.compile(r"^%kc:(?P<cmd>[^:\s]+)(?:: (?P<arg>.*))?$")
KCSECT_HEADER = 0
KCSECT_CONFIG = 1
KCSECT_BODY = 2
@classmethod
def _initClass(cls):
if cls.t2tRe:
return
# Only fetch this once.
cls.t2tRe = txt2tags.getRegexes()
def __init__(self, userGuideFilename,keyCommandsFileName):
"""Constructor.
@param userGuideFilename: The file name of the User Guide to be used as input.
@type userGuideFilename: str
@param keyCommandsFilename: The file name of the key commands file to be output.
@type keyCommandsFilename: str
"""
self._initClass()
self.ugFn = userGuideFilename
#: The file name of the Key Commands document that will be generated.
#: This will be in the same directory as the User Guide.
self.kcFn = keyCommandsFileName
#: The current section of the key commands file.
self._kcSect = self.KCSECT_HEADER
#: The current stack of headings.
self._headings = []
#: The 0 based level of the last heading in L{_headings} written to the key commands file.
self._kcLastHeadingLevel = -1
#: Whether lines which aren't commands should be written to the key commands file as is.
self._kcInclude = False
#: The header row for settings sections.
self._settingsHeaderRow = None
#: The number of layouts for settings in a settings section.
self._settingsNumLayouts = 0
#: The current line number being processed, used to present location of syntax errors
self._lineNum = 0
def make(self):
"""Generate the Key Commands document.
@postcondition: If the User Guide contains appropriate commands, the Key Commands document will be generated and saved as L{kcFn}.
Otherwise, no file will be generated.
@return: C{True} if a document was generated, C{False} otherwise.
@rtype: bool
@raise IOError:
@raise KeyCommandsError:
"""
tKcFn=self.kcFn+'__'
self._ug = codecs.open(self.ugFn, "r", "utf-8-sig")
self._kc = codecs.open(tKcFn, "w", "utf-8-sig")
success=False
with self._ug, self._kc:
self._make()
success=self._kc.tell() > 0
if success:
os.rename(tKcFn,self.kcFn)
else:
os.remove(tKcFn)
return success
def _make(self):
for line in self._ug:
self._lineNum += 1
line = line.rstrip()
m = self.RE_COMMAND.match(line)
if m:
self._command(**m.groupdict())
continue
m = self.t2tRe["numtitle"].match(line)
if m:
self._heading(m)
continue
if self._kcInclude:
self._kc.write(line + LINE_END)
def _command(self, cmd=None, arg=None):
# Handle header commands.
if cmd == "title":
if self._kcSect > self.KCSECT_HEADER:
raise KeyCommandsError("%d, title command is not valid here" % self._lineNum)
# Write the title and two blank lines to complete the txt2tags header section.
self._kc.write(arg + LINE_END * 3)
self._kcSect = self.KCSECT_CONFIG
self._kc.write("%%!includeconf: ../global.t2tconf%s" % LINE_END)
return
elif self._kcSect == self.KCSECT_HEADER:
raise KeyCommandsError("%d, title must be the first command" % self._lineNum)
elif cmd == "includeconf":
if self._kcSect > self.KCSECT_CONFIG:
raise KeyCommandsError("%d, includeconf command is not valid here" % self._lineNum)
self._kc.write("%%!includeconf: %s%s" % (arg, LINE_END))
return
elif self._kcSect == self.KCSECT_CONFIG:
self._kc.write(LINE_END)
self._kcSect = self.KCSECT_BODY
if cmd == "beginInclude":
self._writeHeadings()
self._kcInclude = True
elif cmd == "endInclude":
self._kcInclude = False
self._kc.write(LINE_END)
elif cmd == "settingsSection":
# The argument is the table header row for the settings section.
self._settingsHeaderRow = arg
# There are name and description columns.
# Each of the remaining columns provides keystrokes for one layout.
# There's one less delimiter than there are columns, hence subtracting 1 instead of 2.
self._settingsNumLayouts = arg.strip("|").count("|") - 1
elif cmd == "setting":
self._handleSetting()
else:
raise KeyCommandsError("%d, Invalid command %s" % (self._lineNum, cmd))
def _areHeadingsPending(self):
return self._kcLastHeadingLevel < len(self._headings) - 1
def _writeHeadings(self):
level = self._kcLastHeadingLevel + 1
# Only write headings we haven't yet written.
for level, heading in enumerate(self._headings[level:], level):
# We don't want numbered headings in the output.
label=heading.group("label")
headingText = u"{id}{txt}{id}{label}".format(
id="=" * len(heading.group("id")),
txt=heading.group("txt"),
label="[%s]" % label if label else "")
# Write the heading and a blank line.
self._kc.write(headingText + LINE_END * 2)
self._kcLastHeadingLevel = level
def _heading(self, m):
# We work with 0 based heading levels.
level = len(m.group("id")) - 1
try:
del self._headings[level:]
except IndexError:
pass
self._headings.append(m)
self._kcLastHeadingLevel = min(self._kcLastHeadingLevel, level - 1)
RE_SETTING_SINGLE_KEY = re.compile(ur"^[^|]+?[::]\s*(.+?)\s*$")
def _handleSetting(self):
if not self._settingsHeaderRow:
raise KeyCommandsError("%d, setting command cannot be used before settingsSection command" % self._lineNum)
if self._areHeadingsPending():
# There are new headings to write.
# If there was a previous settings table, it ends here, so write a blank line.
self._kc.write(LINE_END)
self._writeHeadings()
# New headings were written, so we need to output the header row.
self._kc.write(self._settingsHeaderRow + LINE_END)
# The next line should be a heading which is the name of the setting.
line = next(self._ug)
self._lineNum += 1
m = self.t2tRe["title"].match(line)
if not m:
raise KeyCommandsError("%d, setting command must be followed by heading" % self._lineNum)
name = m.group("txt")
# The next few lines should be table rows for each layout.
# Alternatively, if the key is common to all layouts, there will be a single line of text specifying the key after a colon.
keys = []
for layout in xrange(self._settingsNumLayouts):
line = next(self._ug).strip()
self._lineNum += 1
m = self.RE_SETTING_SINGLE_KEY.match(line)
if m:
keys.append(m.group(1))
break
elif not self.t2tRe["table"].match(line):
raise KeyCommandsError("%d, setting command: There must be one table row for each keyboard layout" % self._lineNum)
# This is a table row.
# The key will be the second column.
# TODO: Error checking.
key = line.strip("|").split("|")[1].strip()
keys.append(key)
if 1 == len(keys) < self._settingsNumLayouts:
# The key has only been specified once, so it is the same in all layouts.
key = keys[0]
keys[1:] = (key for layout in xrange(self._settingsNumLayouts - 1))
# There should now be a blank line.
line = next(self._ug).strip()
self._lineNum += 1
if line:
raise KeyCommandsError("%d, setting command: The keyboard shortcuts must be followed by a blank line. Multiple keys must be included in a table. Erroneous key: %s" % (self._lineNum, key))
# Finally, the next line should be the description.
desc = next(self._ug).strip()
self._lineNum += 1
self._kc.write(u"| {name} | {keys} | {desc} |{lineEnd}".format(
name=name,
keys=u" | ".join(keys),
desc=desc, lineEnd=LINE_END))
def remove(self):
"""Remove the generated Key Commands document.
"""
try:
os.remove(self.kcFn)
except OSError:
pass
| 1 | 25,409 | Please remove this line | nvaccess-nvda | py |
@@ -599,7 +599,7 @@ void CBOINCGUIApp::OnInitCmdLine(wxCmdLineParser &parser) {
#if (defined(__WXMAC__) && defined(_DEBUG))
parser.AddLongOption("NSDocumentRevisionsDebugMode", _("Not used: workaround for bug in XCode 4.2"));
#endif
- parser.AddSwitch("nd", "no-daemon", _("Not run the daemon"));
+ parser.AddSwitch("nd", "no-daemon", _("Don't run the client"));
}
| 1 | // This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2017 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
#if defined(__GNUG__) && !defined(__APPLE__)
#pragma implementation "BOINCGUIApp.h"
#endif
#ifdef __WXMAC__
#include <Carbon/Carbon.h>
#include "filesys.h"
#include "util.h"
#include "mac_util.h"
#include "sandbox.h"
#endif
#include "stdwx.h"
#include "diagnostics.h"
#include "network.h"
#include "util.h"
#include "mfile.h"
#include "miofile.h"
#include "parse.h"
#include "idlemon.h"
#include "Events.h"
#include "LogBOINC.h"
#include "BOINCGUIApp.h"
#include "SkinManager.h"
#include "MainDocument.h"
#include "BOINCClientManager.h"
#include "BOINCTaskBar.h"
#include "BOINCBaseFrame.h"
#include "AdvancedFrame.h"
#include "DlgExitMessage.h"
#include "DlgEventLog.h"
#include "procinfo.h"
#include "sg_BoincSimpleFrame.h"
bool s_bSkipExitConfirmation = false;
DEFINE_EVENT_TYPE(wxEVT_RPC_FINISHED)
IMPLEMENT_APP(CBOINCGUIApp)
IMPLEMENT_DYNAMIC_CLASS(CBOINCGUIApp, wxApp)
BEGIN_EVENT_TABLE (CBOINCGUIApp, wxApp)
EVT_ACTIVATE_APP(CBOINCGUIApp::OnActivateApp)
EVT_RPC_FINISHED(CBOINCGUIApp::OnRPCFinished)
#ifndef __WXMAC__
EVT_END_SESSION(CBOINCGUIApp::OnEndSession)
#endif
END_EVENT_TABLE ()
bool CBOINCGUIApp::OnInit() {
// Initialize globals
#ifdef SANDBOX
g_use_sandbox = true;
#else
g_use_sandbox = false;
#endif
s_bSkipExitConfirmation = false;
m_bFilterEvents = false;
m_bAboutDialogIsOpen = false;
// Initialize class variables
m_pInstanceChecker = NULL;
m_pLocale = NULL;
m_pSkinManager = NULL;
m_pFrame = NULL;
m_pDocument = NULL;
m_pTaskBarIcon = NULL;
m_pEventLog = NULL;
m_bEventLogWasActive = false;
m_bProcessingActivateAppEvent = false;
#ifdef __WXMAC__
m_pMacDockIcon = NULL;
#endif
m_strBOINCMGRExecutableName = wxEmptyString;
m_strBOINCMGRRootDirectory = wxEmptyString;
m_strBOINCMGRDataDirectory = wxEmptyString;
m_strHostNameArg = wxEmptyString;
m_strPasswordArg = wxEmptyString;
m_iRPCPortArg = GUI_RPC_PORT;
m_strBOINCArguments = wxEmptyString;
m_strISOLanguageCode = wxEmptyString;
m_bGUIVisible = true;
m_bDebugSkins = false;
m_bMultipleInstancesOK = false;
m_bBOINCMGRAutoStarted = false;
m_iBOINCMGRDisableAutoStart = 0;
m_iShutdownCoreClient = 0;
m_iDisplayExitDialog = 1;
m_iDisplayShutdownConnectedClientDialog = 1;
m_iGUISelected = BOINC_SIMPLEGUI;
m_bSafeMessageBoxDisplayed = 0;
m_bRunDaemon = true;
m_bNeedRunDaemon = true;
// Initialize local variables
int iErrorCode = 0;
int iDesiredLanguageCode = 0;
bool bOpenEventLog = false;
wxString strDesiredSkinName = wxEmptyString;
#ifdef SANDBOX
wxString strDialogMessage = wxEmptyString;
#endif
bool success = false;
// Configure wxWidgets platform specific code
#ifdef __WXMSW__
wxSystemOptions::SetOption(wxT("msw.staticbox.optimized-paint"), 0);
#endif
#ifdef __WXMAC__
// In wxMac-2.8.7, default wxListCtrl::RefreshItem() does not work
// so use traditional generic implementation.
// This has been fixed in wxMac-2.8.8, but the Mac native implementation:
// - takes 3 times the CPU time as the Mac generic version.
// - seems to always redraw entire control even if asked to refresh only one row.
// - causes major flicker of progress bars, (probably due to full redraws.)
wxSystemOptions::SetOption(wxT("mac.listctrl.always_use_generic"), 1);
AEInstallEventHandler( kCoreEventClass, kAEQuitApplication, NewAEEventHandlerUPP((AEEventHandlerProcPtr)QuitAppleEventHandler), 0, false );
#endif
// Commandline parsing is done in wxApp::OnInit()
if (!wxApp::OnInit()) {
return false;
}
if (g_use_sandbox) {
wxCHANGE_UMASK(2); // Set file creation mask to be writable by both user and group
// Our umask will be inherited by all our child processes
}
// Setup application and company information
SetAppName(wxT("BOINC Manager"));
SetVendorName(wxT("Space Sciences Laboratory, U.C. Berkeley"));
// Initialize the configuration storage module
m_pConfig = new wxConfig(GetAppName());
wxConfigBase::Set(m_pConfig);
wxASSERT(m_pConfig);
// Restore Application State
m_pConfig->SetPath(wxT("/"));
m_pConfig->Read(wxT("AutomaticallyShutdownClient"), &m_iShutdownCoreClient, 0L);
m_pConfig->Read(wxT("DisplayShutdownClientDialog"), &m_iDisplayExitDialog, 1L);
m_pConfig->Read(wxT("DisplayShutdownConnectedClientDialog"), &m_iDisplayShutdownConnectedClientDialog, 1L);
m_pConfig->Read(wxT("DisableAutoStart"), &m_iBOINCMGRDisableAutoStart, 0L);
m_pConfig->Read(wxT("LanguageISO"), &m_strISOLanguageCode, wxT(""));
m_pConfig->Read(wxT("GUISelection"), &m_iGUISelected, BOINC_SIMPLEGUI);
m_pConfig->Read(wxT("EventLogOpen"), &bOpenEventLog);
m_pConfig->Read(wxT("RunDaemon"), &m_bRunDaemon, 1L);
// Detect if the daemon should be launched
m_bNeedRunDaemon = m_bNeedRunDaemon && m_bRunDaemon;
// Should we abort the BOINC Manager startup process?
if (m_bBOINCMGRAutoStarted && m_iBOINCMGRDisableAutoStart) {
return false;
}
// Detect where BOINC Manager executable name.
DetectExecutableName();
// Detect where BOINC Manager was installed too.
DetectRootDirectory();
// Detect where the BOINC Data files are.
DetectDataDirectory();
// Switch the current directory to the BOINC Data directory
if (!GetDataDirectory().IsEmpty()) {
success = wxSetWorkingDirectory(GetDataDirectory());
if (!success) {
if (!g_use_sandbox) {
if (!wxDirExists(GetDataDirectory())) {
success = wxMkdir(GetDataDirectory(), 0777); // Does nothing if dir exists
}
}
}
}
if (!success) iErrorCode = -1016;
// Initialize the BOINC Diagnostics Framework
int dwDiagnosticsFlags =
#ifdef _DEBUG
BOINC_DIAG_HEAPCHECKENABLED |
BOINC_DIAG_MEMORYLEAKCHECKENABLED |
#endif
BOINC_DIAG_DUMPCALLSTACKENABLED |
BOINC_DIAG_PERUSERLOGFILES |
BOINC_DIAG_REDIRECTSTDERR |
BOINC_DIAG_REDIRECTSTDOUT |
BOINC_DIAG_TRACETOSTDOUT;
diagnostics_init(dwDiagnosticsFlags, "stdoutgui", "stderrgui");
// Enable Logging and Trace Masks
m_pLog = new wxLogBOINC();
wxLog::SetActiveTarget(m_pLog);
m_pLog->AddTraceMask(wxT("Function Start/End"));
m_pLog->AddTraceMask(wxT("Function Status"));
// Initialize the internationalization module
#ifdef __WXMSW__
// On Windows, set all locales for this thread on a per-thread basis
_configthreadlocale(_ENABLE_PER_THREAD_LOCALE);
#endif
m_pLocale = new wxLocale();
wxASSERT(m_pLocale);
//
if (m_strISOLanguageCode.IsEmpty()) {
iDesiredLanguageCode = wxLANGUAGE_DEFAULT;
m_pLocale->Init(iDesiredLanguageCode);
m_strISOLanguageCode = m_pLocale->GetCanonicalName();
} else {
m_pLocale->Init(wxLocale::FindLanguageInfo(m_strISOLanguageCode)->Language);
}
// Look for the localization files by absolute and relative locations.
// preference given to the absolute location.
if (!m_strBOINCMGRRootDirectory.IsEmpty()) {
m_pLocale->AddCatalogLookupPathPrefix(
wxString(m_strBOINCMGRRootDirectory + wxT("locale"))
);
}
m_pLocale->AddCatalogLookupPathPrefix(wxT("locale"));
m_pLocale->AddCatalog(wxT("BOINC-Manager"));
m_pLocale->AddCatalog(wxT("BOINC-Client"));
m_pLocale->AddCatalog(wxT("BOINC-Web"));
InitSupportedLanguages();
// Note: JAWS for Windows will only speak the context-sensitive
// help if you use this help provider:
wxHelpProvider::Set(new wxHelpControllerHelpProvider());
// Enable known image types
wxInitAllImageHandlers();
// Initialize the skin manager
m_pSkinManager = new CSkinManager(m_bDebugSkins);
wxASSERT(m_pSkinManager);
// Load desired manager skin
m_pConfig->Read(wxT("Skin"), &strDesiredSkinName, m_pSkinManager->GetDefaultSkinName());
m_pSkinManager->ReloadSkin(strDesiredSkinName);
#ifdef SANDBOX
// Make sure owners, groups and permissions are correct for the current setting of g_use_sandbox
//
// NOTE: GDB and LLDB can't attach to applications which are running as
// a different user or group.
// Normally, the Mac Development (Debug) builds do not define SANDBOX, so
// check_security() is never called. However, it is possible to use GDB
// or LLDB on sandbox-specific code, as long as the code is run as the
// current user (i.e., not as boinc_master or boinc_project), and the
// current user is a member of both groups boinc_master and boinc_project.
// However, this has not been thoroughly tested. Please see the comments
// in SetupSecurity.cpp and check_security.cpp for more details.
//
char path_to_error[MAXPATHLEN];
path_to_error[0] = '\0';
if (!iErrorCode) {
iErrorCode = check_security(
g_use_sandbox, true, path_to_error, sizeof(path_to_error)
);
}
if (iErrorCode) {
ShowApplication(true);
if (iErrorCode == -1099) {
#if (defined(__WXMAC__) && defined (_DEBUG))
strDialogMessage.Printf(
"To debug with sandbox security enabled, the current user\n"
"must be a member of both groups boinc_master and boinc_project."
);
#else // ! (defined(__WXMAC__) && defined (_DEBUG))
strDialogMessage.Printf(
_("You currently are not authorized to manage %s.\n\nTo run %s as this user, please:\n- reinstall %s answering \"Yes\" to the question about non-administrative users\n or\n- contact your administrator to add you to the 'boinc_master' user group."),
m_pSkinManager->GetAdvanced()->GetApplicationShortName().c_str(),
m_pSkinManager->GetAdvanced()->GetApplicationShortName().c_str(),
m_pSkinManager->GetAdvanced()->GetApplicationShortName().c_str()
);
#endif // ! (defined(__WXMAC__) && defined (_DEBUG))
} else {
strDialogMessage.Printf(
_("%s ownership or permissions are not set properly; please reinstall %s.\n(Error code %d"),
m_pSkinManager->GetAdvanced()->GetApplicationShortName().c_str(),
m_pSkinManager->GetAdvanced()->GetApplicationShortName().c_str(),
iErrorCode
);
if (path_to_error[0]) {
strDialogMessage += _(" at ");
strDialogMessage += wxString::FromUTF8(path_to_error);
}
strDialogMessage += _(")");
fprintf(stderr, "%s\n", (const char*)strDialogMessage.utf8_str());
}
wxMessageDialog* pDlg = new wxMessageDialog(
NULL,
strDialogMessage,
m_pSkinManager->GetAdvanced()->GetApplicationName(),
wxOK
);
pDlg->ShowModal();
if (pDlg)
pDlg->Destroy();
return false;
}
#endif // SANDBOX
#ifdef __WXMSW__
// Perform any last minute checks that should keep the manager
// from starting up.
wxString strRebootPendingFile =
GetRootDirectory() + wxFileName::GetPathSeparator() + wxT("RebootPending.txt");
if (wxFile::Exists(strRebootPendingFile)) {
wxMessageDialog dialog(
NULL,
_("A reboot is required in order for BOINC to run properly.\nPlease reboot your computer and try again."),
_("BOINC Manager"),
wxOK|wxICON_ERROR
);
dialog.ShowModal();
return false;
}
#endif
#ifdef __WXMAC__
// Prevent a situation where wxSingleInstanceChecker lock file
// from last login auto start (with same pid) was not deleted.
// This path must match that in DetectDuplicateInstance()
wxString lockFilePath = wxString(wxFileName::GetHomeDir() +
"/Library/Application Support/BOINC/" +
wxTheApp->GetAppName() +
'-' + wxGetUserId()
);
if (WasFileModifiedBeforeSystemBoot((char *)(const char*)lockFilePath.utf8_str())) {
boinc_delete_file(lockFilePath.utf8_str());
}
#endif
// Detect if BOINC Manager is already running, if so, bring it into the
// foreground and then exit.
if (DetectDuplicateInstance()) {
return false;
}
// Initialize the main document
m_pDocument = new CMainDocument();
wxASSERT(m_pDocument);
m_pDocument->OnInit();
// Is there a condition in which the Simple GUI should not be used?
if (BOINC_SIMPLEGUI == m_iGUISelected) {
// Screen too small?
if (wxGetDisplaySize().GetHeight() < 600) {
m_iGUISelected = BOINC_ADVANCEDGUI;
}
}
// Initialize the task bar icon
m_pTaskBarIcon = new CTaskBarIcon(
m_pSkinManager->GetAdvanced()->GetApplicationName(),
m_pSkinManager->GetAdvanced()->GetApplicationIcon(),
m_pSkinManager->GetAdvanced()->GetApplicationDisconnectedIcon(),
m_pSkinManager->GetAdvanced()->GetApplicationSnoozeIcon()
#ifdef __WXMAC__
, wxTBI_CUSTOM_STATUSITEM
#endif
);
wxASSERT(m_pTaskBarIcon);
#ifdef __WXMAC__
m_pMacDockIcon = new CTaskBarIcon(
m_pSkinManager->GetAdvanced()->GetApplicationName(),
m_pSkinManager->GetAdvanced()->GetApplicationIcon(),
m_pSkinManager->GetAdvanced()->GetApplicationDisconnectedIcon(),
m_pSkinManager->GetAdvanced()->GetApplicationSnoozeIcon()
, wxTBI_DOCK
);
wxASSERT(m_pMacDockIcon);
#endif
// Startup the System Idle Detection code
IdleTrackerAttach();
#ifdef __WXMAC__
// Don't open main window if we were started automatically at login
// We are launched hidden if started from our login item (except if
// we had windows open at logout, the system "restores" them.)
m_bGUIVisible = IsApplicationVisible();
if (getTimeSinceBoot() < 30.) {
// If the system was just started, we usually get a "Connection
// failed" error if we try to connect too soon, so delay a bit.
sleep(10);
}
#endif
// Show the UI
SetActiveGUI(m_iGUISelected, m_bGUIVisible);
if (!m_bGUIVisible) {
ShowApplication(false);
}
if (bOpenEventLog) {
DisplayEventLog(m_bGUIVisible);
if (m_bGUIVisible && m_pFrame) {
m_pFrame->Raise();
}
}
return true;
}
#ifdef __WXMAC__
// We can "show" (unhide) the main window when the
// application is hidden and it won't be visible.
// If we don't do this under wxCocoa 3.0, the Dock
// icon will bounce (as in notification) when we
// click on our menu bar icon.
// But wxFrame::Show(true) makes the application
// visible again, so we instead call
// m_pFrame->wxWindow::Show() here.
//
// We need to call HideThisApp() after the event
// loop is running, so this is called from
// CBOINCBaseFrame::OnPeriodicRPC() at the first
// firing of ID_PERIODICRPCTIMER.
//
void CBOINCGUIApp::OnFinishInit() {
if (!m_bGUIVisible) {
HideThisApp();
m_pFrame->wxWindow::Show();
if (m_pEventLog) {
m_pEventLog->wxWindow::Show();
}
}
}
#endif
int CBOINCGUIApp::OnExit() {
// Shutdown the System Idle Detection code
IdleTrackerDetach();
// Under wxWidgets 2.8.0, the task bar icons
// must be deleted for app to exit its main loop
#ifdef __WXMAC__
if (m_pMacDockIcon) {
delete m_pMacDockIcon;
}
m_pMacDockIcon = NULL;
#endif
if (m_pTaskBarIcon) {
delete m_pTaskBarIcon;
}
m_pTaskBarIcon = NULL;
if (m_pDocument) {
m_pDocument->OnExit();
delete m_pDocument;
m_pDocument = NULL;
}
// Save Application State
SaveState();
if (m_pSkinManager) {
delete m_pSkinManager;
m_pSkinManager = NULL;
}
if (m_pLocale) {
delete m_pLocale;
m_pLocale = NULL;
}
if (m_pEventLog) {
m_pEventLog->Destroy();
m_pEventLog = NULL;
}
if (m_pInstanceChecker) {
delete m_pInstanceChecker;
m_pInstanceChecker = NULL;
}
diagnostics_finish();
return wxApp::OnExit();
}
#ifndef __WXMAC__
// Ensure we shut down gracefully on Windows logout or shutdown
void CBOINCGUIApp::OnEndSession(wxCloseEvent& ) {
s_bSkipExitConfirmation = true;
// On Windows Vista with UAC turned on, we have to spawn a new process to change the
// state of a service. When Windows is shutting down it'll prevent new processes from
// being created. Sometimes it'll present a crash dialog for the newly spawned application.
//
// So, we will just let the OS shutdown the service via the service control manager.
//
if (m_iShutdownCoreClient && m_pDocument->m_pClientManager->IsBOINCConfiguredAsDaemon()) {
m_iShutdownCoreClient = false;
}
CBOINCBaseFrame* pFrame = wxGetApp().GetFrame();
wxCommandEvent evt(wxEVT_COMMAND_MENU_SELECTED, wxID_EXIT);
// The event loop has already been stopped,
// so we must call OnExit directly
pFrame->OnExit(evt);
OnExit();
}
#endif
void CBOINCGUIApp::SaveState() {
// Save Application State
m_pConfig->SetPath(wxT("/"));
if (m_pSkinManager) {
m_pConfig->Write(wxT("Skin"), m_pSkinManager->GetSelectedSkin());
}
m_pConfig->Write(wxT("LanguageISO"), m_strISOLanguageCode);
m_pConfig->Write(wxT("AutomaticallyShutdownClient"), m_iShutdownCoreClient);
m_pConfig->Write(wxT("DisplayShutdownClientDialog"), m_iDisplayExitDialog);
m_pConfig->Write(wxT("DisplayShutdownConnectedClientDialog"), m_iDisplayShutdownConnectedClientDialog);
m_pConfig->Write(wxT("DisableAutoStart"), m_iBOINCMGRDisableAutoStart);
m_pConfig->Write(wxT("RunDaemon"), m_bRunDaemon);
}
///
/// Pass the command line parameters and discriptions to wxWidgets for displaying.
///
void CBOINCGUIApp::OnInitCmdLine(wxCmdLineParser &parser) {
wxApp::OnInitCmdLine(parser);
parser.AddSwitch("a", "autostart", _("BOINC Manager was started by the operating system automatically"));
#if defined(__WXMSW__) || defined(__WXMAC__)
parser.AddSwitch("s", "systray", _("Startup BOINC so only the system tray icon is visible"));
#else
parser.AddOption("e", "clientdir", _("Directory containing the BOINC Client executable"));
parser.AddOption("d", "datadir", _("BOINC data directory"));
#endif
parser.AddOption("n", "namehost", _("Host name or IP address"));
parser.AddOption("g", "gui_rpc_port", _("GUI RPC port number"));
parser.AddOption("p", "password", _("Password"));
parser.AddOption("b", "boincargs", _("Startup BOINC with these optional arguments"));
parser.AddSwitch("i","insecure", _("disable BOINC security users and permissions"));
parser.AddSwitch("c", "checkskins", _("set skin debugging mode to enable skin manager error messages"));
parser.AddSwitch("m", "multiple", _("multiple instances of BOINC Manager allowed"));
#if (defined(__WXMAC__) && defined(_DEBUG))
parser.AddLongOption("NSDocumentRevisionsDebugMode", _("Not used: workaround for bug in XCode 4.2"));
#endif
parser.AddSwitch("nd", "no-daemon", _("Not run the daemon"));
}
///
/// Parse command line parameters.
///
bool CBOINCGUIApp::OnCmdLineParsed(wxCmdLineParser &parser) {
// Give default processing (-?, --help and --verbose) the chance to do something.
wxApp::OnCmdLineParsed(parser);
wxString portNum = wxEmptyString;
long longPort;
bool hostNameSpecified = false;
bool passwordSpecified = false;
parser.Found(wxT("boincargs"), &m_strBOINCArguments);
if (parser.Found(wxT("autostart"))) {
m_bBOINCMGRAutoStarted = true;
}
#if defined(__WXMSW__) || defined(__WXMAC__)
if (parser.Found(wxT("systray"))) {
m_bGUIVisible = false;
}
#endif
if (parser.Found(wxT("insecure"))) {
g_use_sandbox = false;
}
if (parser.Found(wxT("checkskins"))) {
m_bDebugSkins = true;
}
if (parser.Found(wxT("multiple"))) {
m_bMultipleInstancesOK = true;
}
#if !(defined(__WXMSW__) || defined(__WXMAC__))
if (!parser.Found(wxT("clientdir"), &m_strBOINCMGRRootDirectory)) {
m_strBOINCMGRRootDirectory = ::wxGetCwd();
}
if (m_strBOINCMGRRootDirectory.Last() != '/') {
m_strBOINCMGRRootDirectory.Append('/');
}
if (!parser.Found(wxT("datadir"), &m_strBOINCMGRDataDirectory)) {
m_strBOINCMGRDataDirectory = m_strBOINCMGRRootDirectory;
}
if (m_strBOINCMGRDataDirectory.Last() != '/') {
m_strBOINCMGRDataDirectory.Append('/');
}
#endif
if (parser.Found(wxT("namehost"), &m_strHostNameArg)) {
hostNameSpecified = true;
} else {
m_strHostNameArg = wxT("localhost");
}
if (parser.Found(wxT("gui_rpc_port"), &portNum)) {
if (portNum.ToLong(&longPort)) {
m_iRPCPortArg = longPort;
} else {
m_iRPCPortArg = GUI_RPC_PORT; // conversion failed
}
} else {
m_iRPCPortArg = GUI_RPC_PORT;
}
if (parser.Found(wxT("password"), &m_strPasswordArg)) {
passwordSpecified = true;
} else {
m_strPasswordArg = wxEmptyString;
}
if (hostNameSpecified && passwordSpecified) {
m_bMultipleInstancesOK = true;
}
if (parser.Found(wxT("no-daemon"))) {
m_bNeedRunDaemon = false;
}
return true;
}
///
/// Detect if another instance of this application is running.
// Returns true if there is and it is forbidden, otherwise false
//
// We must initialize m_pInstanceChecker even if m_bMultipleInstancesOK
// is true so CMainDocument::OnPoll() can call IsMgrMultipleInstance().
///
bool CBOINCGUIApp::DetectDuplicateInstance() {
#ifdef __WXMAC__
m_pInstanceChecker = new wxSingleInstanceChecker(
wxTheApp->GetAppName() + '-' + wxGetUserId(),
wxFileName::GetHomeDir() + "/Library/Application Support/BOINC"
);
#else
m_pInstanceChecker = new wxSingleInstanceChecker();
#endif
if (m_pInstanceChecker->IsAnotherRunning()) {
if (m_bMultipleInstancesOK) return false;
#ifdef __WXMSW__
CTaskBarIcon::FireAppRestore();
#endif
return true;
}
return false;
}
///
/// Determines what name BOINC Manager is called.
///
void CBOINCGUIApp::DetectExecutableName() {
#ifdef __WXMSW__
TCHAR szPath[MAX_PATH-1];
// change the current directory to the boinc install directory
GetModuleFileName(NULL, szPath, (sizeof(szPath)/sizeof(TCHAR)));
TCHAR *pszProg = _tcsrchr(szPath, '\\');
if (pszProg) {
pszProg++;
}
// Store the root directory for later use.
m_strBOINCMGRExecutableName = pszProg;
#elif defined(__WXGTK__)
char path[PATH_MAX];
if (!get_real_executable_path(path, PATH_MAX)) {
// find filename component
char* name = strrchr(path, '/');
if (name) {
name++;
m_strBOINCMGRExecutableName = name;
}
}
#endif
}
///
/// Determines where the BOINC Manager is executing from.
///
void CBOINCGUIApp::DetectRootDirectory() {
#ifdef __WXMSW__
TCHAR szPath[MAX_PATH-1];
// change the current directory to the boinc install directory
GetModuleFileName(NULL, szPath, (sizeof(szPath)/sizeof(TCHAR)));
TCHAR *pszProg = _tcsrchr(szPath, '\\');
if (pszProg) {
szPath[pszProg - szPath + 1] = 0;
}
// Store the root directory for later use.
m_strBOINCMGRRootDirectory = szPath;
#elif defined(__WXGTK__)
char path[PATH_MAX];
if (!get_real_executable_path(path, PATH_MAX)) {
// find path component
char* name = strrchr(path, '/');
if (name) {
name++;
*name = '\0';
m_strBOINCMGRRootDirectory = path;
}
}
#endif
}
///
/// Determines where the BOINC data directory is.
///
void CBOINCGUIApp::DetectDataDirectory() {
#ifdef __WXMSW__
//
// Determine BOINCMgr Data Directory
//
LONG lReturnValue;
HKEY hkSetupHive;
TCHAR szPath[MAX_PATH];
LPTSTR lpszValue = NULL;
LPTSTR lpszExpandedValue = NULL;
DWORD dwValueType = REG_EXPAND_SZ;
DWORD dwSize = 0;
// change the current directory to the boinc data directory if it exists
lReturnValue = RegOpenKeyEx(
HKEY_LOCAL_MACHINE,
_T("SOFTWARE\\Space Sciences Laboratory, U.C. Berkeley\\BOINC Setup"),
0,
KEY_READ,
&hkSetupHive
);
if (lReturnValue == ERROR_SUCCESS) {
// How large does our buffer need to be?
lReturnValue = RegQueryValueEx(
hkSetupHive,
_T("DATADIR"),
NULL,
&dwValueType,
NULL,
&dwSize
);
if (lReturnValue != ERROR_FILE_NOT_FOUND) {
// Allocate the buffer space.
lpszValue = (LPTSTR) malloc(dwSize);
(*lpszValue) = NULL;
// Now get the data
lReturnValue = RegQueryValueEx(
hkSetupHive,
_T("DATADIR"),
NULL,
&dwValueType,
(LPBYTE)lpszValue,
&dwSize
);
// Expand the Strings
// We need to get the size of the buffer needed
dwSize = 0;
lReturnValue = ExpandEnvironmentStrings(lpszValue, NULL, dwSize);
if (lReturnValue) {
// Make the buffer big enough for the expanded string
lpszExpandedValue = (LPTSTR) malloc(lReturnValue*sizeof(TCHAR));
(*lpszExpandedValue) = NULL;
dwSize = lReturnValue;
ExpandEnvironmentStrings(lpszValue, lpszExpandedValue, dwSize);
// Store the root directory for later use.
m_strBOINCMGRDataDirectory = lpszExpandedValue;
}
}
} else {
if (SUCCEEDED(SHGetFolderPath(NULL, CSIDL_COMMON_APPDATA|CSIDL_FLAG_CREATE, NULL, SHGFP_TYPE_CURRENT, szPath))) {
_tcsncat(szPath, _T("\\boinc"), ((sizeof(szPath)/sizeof(TCHAR)) - _tcslen(szPath)));
if (wxDir::Exists(szPath)) {
// Store the root directory for later use.
m_strBOINCMGRDataDirectory = szPath;
}
}
}
// Cleanup
if (hkSetupHive) RegCloseKey(hkSetupHive);
if (lpszValue) free(lpszValue);
if (lpszExpandedValue) free(lpszExpandedValue);
#endif
#ifdef __WXMAC__
m_strBOINCMGRDataDirectory = wxT("/Library/Application Support/BOINC Data");
#endif
}
void CBOINCGUIApp::InitSupportedLanguages() {
wxInt32 iIndex = 0;
const wxLanguageInfo* liLanguage = NULL;
// Prepare the array
m_astrLanguages.Insert(wxEmptyString, 0, wxLANGUAGE_USER_DEFINED+1);
// These are just special tags so deal with them in a special way
m_astrLanguages[wxLANGUAGE_DEFAULT] = _("(Automatic Detection)");
m_astrLanguages[wxLANGUAGE_UNKNOWN] = _("(Unknown)");
m_astrLanguages[wxLANGUAGE_USER_DEFINED] = _("(User Defined)");
for (iIndex = 0; iIndex <= wxLANGUAGE_USER_DEFINED; iIndex++) {
liLanguage = wxLocale::GetLanguageInfo(iIndex);
if (liLanguage) {
m_astrLanguages[iIndex] = liLanguage->Description;
}
}
}
int CBOINCGUIApp::IdleTrackerAttach() {
#ifdef __WXMSW__
::attach_idle_monitor();
#endif
return 0;
}
int CBOINCGUIApp::IdleTrackerDetach() {
#ifdef __WXMSW__
::detach_idle_monitor();
#endif
return 0;
}
void CBOINCGUIApp::OnActivateApp(wxActivateEvent& event) {
m_bProcessingActivateAppEvent = true;
if (event.GetActive()) {
#ifdef __WXMAC__
ShowInterface();
#else
#ifdef __WXGTK__
// Linux allows the Event Log to be brought forward and made active
// even if we have a modal dialog displayed (associated with our
// main frame.) This test is needed to allow bringing the modal
// dialog forward again by clicking on its title bar.
if (!IsModalDialogDisplayed())
#endif
{
bool keepEventLogInFront = m_bEventLogWasActive;
if (m_pEventLog && !m_pEventLog->IsIconized() && !keepEventLogInFront) {
m_pEventLog->Raise();
}
if (m_pFrame) {
m_pFrame->Raise();
}
if (m_pEventLog && !m_pEventLog->IsIconized() && keepEventLogInFront) {
m_pEventLog->Raise();
}
}
#endif
}
event.Skip();
m_bProcessingActivateAppEvent = false;
}
void CBOINCGUIApp::OnRPCFinished( CRPCFinishedEvent& event ) {
CMainDocument* pDoc = wxGetApp().GetDocument();
wxASSERT(pDoc);
wxASSERT(wxDynamicCast(pDoc, CMainDocument));
pDoc->OnRPCComplete(event);
}
int CBOINCGUIApp::UpdateSystemIdleDetection() {
#ifdef __WXMSW__
return get_idle_tick_count();
#else
return TRUE;
#endif
}
int CBOINCGUIApp::StartBOINCScreensaverTest() {
#ifdef __WXMSW__
wxString strExecute = wxEmptyString;
wxChar szExecutableDirectory[4096];
memset(szExecutableDirectory, 0, sizeof(szExecutableDirectory));
// On Windows the screensaver is located in the Windows directory.
GetWindowsDirectory(
szExecutableDirectory,
(sizeof(szExecutableDirectory) / sizeof(wxChar))
);
// Append boinc.scr to the end of the strExecute string and get ready to rock
strExecute = wxT("\"") + wxString(szExecutableDirectory) + wxT("\\boinc.scr\" /t");
::wxExecute(strExecute);
#endif
return 0;
}
int CBOINCGUIApp::StartBOINCDefaultScreensaverTest() {
#ifdef __WXMSW__
wxString strExecute = wxEmptyString;
strExecute = wxT("\"") + m_strBOINCMGRRootDirectory + wxT("\\boincscr.exe\" --test");
::wxExecute(strExecute);
#endif
return 0;
}
// Display the Event Log, it is a modeless dialog not owned by
// any other UI element.
// To work around a Linux bug in wxWidgets 3.0 which prevents
// bringing the main frame forward on top of a modeless dialog,
// the Event Log is now a wxFrame on Linux only.
void CBOINCGUIApp::DisplayEventLog(bool bShowWindow) {
if (m_pEventLog) {
if (bShowWindow) {
if (m_pEventLog->IsIconized()) {
m_pEventLog->Iconize(false);
}
m_pEventLog->Raise();
}
} else {
m_pEventLog = new CDlgEventLog();
if (m_pEventLog) {
m_pEventLog->Show(bShowWindow);
if (bShowWindow) {
m_pEventLog->Raise();
}
if (m_pFrame) {
m_pFrame->UpdateRefreshTimerInterval();
}
}
}
}
void CBOINCGUIApp::OnEventLogClose() {
m_pEventLog = NULL;
if (m_pFrame) {
m_pFrame->UpdateRefreshTimerInterval();
}
}
// The skin has changed and all UI elements need to reload their bitmaps.
//
void CBOINCGUIApp::FireReloadSkin() {
if (m_pFrame) {
m_pFrame->FireReloadSkin();
}
if (m_pTaskBarIcon) {
m_pTaskBarIcon->FireReloadSkin();
}
}
bool CBOINCGUIApp::SetActiveGUI(int iGUISelection, bool bShowWindow) {
wxLogTrace(wxT("Function Start/End"), wxT("CBOINCGUIApp::SetActiveGUI - Function Begin"));
wxLogTrace(wxT("Function Start/End"), wxT("CBOINCGUIApp::SetActiveGUI - GUI Selection: '%d', Show: %d'"), iGUISelection, (int)bShowWindow);
CBOINCBaseFrame* pNewFrame = NULL;
CBOINCBaseFrame* pOldFrame = m_pFrame;
wxInt32 iTop = 0;
wxInt32 iLeft = 0;
wxInt32 iHeight = 0;
wxInt32 iWidth = 0;
bool bWindowMaximized = false;
// Create the new window
if ((iGUISelection != m_iGUISelected) || !m_pFrame) {
// Retrieve the desired window state before creating the
// desired frames
if (BOINC_ADVANCEDGUI == iGUISelection) {
m_pConfig->SetPath(wxT("/"));
m_pConfig->Read(wxT("YPos"), &iTop, 30);
m_pConfig->Read(wxT("XPos"), &iLeft, 30);
m_pConfig->Read(wxT("Width"), &iWidth, 800);
m_pConfig->Read(wxT("Height"), &iHeight, 600);
m_pConfig->Read(wxT("WindowMaximized"), &bWindowMaximized, false);
// Guard against a rare situation where registry values are zero
if (iWidth < 50) iWidth = 800;
if (iHeight < 50) iHeight = 600;
} else {
m_pConfig->SetPath(wxT("/Simple"));
m_pConfig->Read(wxT("YPos"), &iTop, 30);
m_pConfig->Read(wxT("XPos"), &iLeft, 30);
// We don't save Simple View's width & height since it's
// window is not resizable, so don't try to read them
#ifdef __WXMAC__
// m_pConfig->Read(wxT("Width"), &iWidth, 409);
// m_pConfig->Read(wxT("Height"), &iHeight, 561);
iWidth = 409;
iHeight = 561;
#else
// m_pConfig->Read(wxT("Width"), &iWidth, 416);
// m_pConfig->Read(wxT("Height"), &iHeight, 570);
iWidth = 416;
iHeight = 570;
#endif
}
// Make sure that the new window is going to be visible
// on a screen
#ifdef __WXMAC__
if (!IsWindowOnScreen(iLeft, iTop, iWidth, iHeight)) {
iTop = iLeft = 30;
}
#else
// If either co-ordinate is less then 0 then set it equal to 0 to ensure
// it displays on the screen.
if ( iLeft < 0 ) iLeft = 30;
if ( iTop < 0 ) iTop = 30;
// Read the size of the screen
wxInt32 iMaxWidth = wxSystemSettings::GetMetric( wxSYS_SCREEN_X );
wxInt32 iMaxHeight = wxSystemSettings::GetMetric( wxSYS_SCREEN_Y );
// Max sure that it doesn't go off to the right or bottom
if ( iLeft + iWidth > iMaxWidth ) iLeft = iMaxWidth - iWidth;
if ( iTop + iHeight > iMaxHeight ) iTop = iMaxHeight - iHeight;
#endif
// Create the main window
//
if (BOINC_ADVANCEDGUI == iGUISelection) {
// Initialize the advanced gui window
pNewFrame = new CAdvancedFrame(
m_pSkinManager->GetAdvanced()->GetApplicationName(),
m_pSkinManager->GetAdvanced()->GetApplicationIcon(),
wxPoint(iLeft, iTop),
wxSize(iWidth, iHeight)
);
} else {
// Initialize the simple gui window
pNewFrame = new CSimpleFrame(
m_pSkinManager->GetAdvanced()->GetApplicationName(),
m_pSkinManager->GetAdvanced()->GetApplicationIcon(),
wxPoint(iLeft, iTop),
wxSize(iWidth, iHeight)
);
}
wxASSERT(pNewFrame);
if (pNewFrame) {
SetTopWindow(pNewFrame);
// Store the new frame for future use
m_pFrame = pNewFrame;
// Hide the old one if it exists. We must do this
// after updating m_pFrame to prevent Mac OSX from
// hiding the application
if (pOldFrame) pOldFrame->Hide();
// Delete the old one if it exists
if (pOldFrame) pOldFrame->Destroy();
if (iGUISelection != m_iGUISelected) {
m_iGUISelected = iGUISelection;
m_pConfig->SetPath(wxT("/"));
m_pConfig->Write(wxT("GUISelection"), iGUISelection);
m_pConfig->Flush();
}
}
}
// Show the new frame if needed
if (!m_bProcessingActivateAppEvent) {
if (m_pFrame && bShowWindow) {
if (m_pEventLog && !m_pEventLog->IsIconized()) {
m_pEventLog->Show();
m_pEventLog->Raise();
#ifdef __WXMSW__
::SetForegroundWindow((HWND)m_pEventLog->GetHWND());
#endif
}
if (!m_pFrame->IsShown()) {
m_pFrame->Show();
}
if (m_pFrame->IsIconized()) {
m_pFrame->Maximize(false);
}
else if (BOINC_ADVANCEDGUI == iGUISelection && bWindowMaximized) {
m_pFrame->Maximize();
}
m_pFrame->Raise();
#ifdef __WXMSW__
::SetForegroundWindow((HWND)m_pFrame->GetHWND());
#endif
}
}
wxLogTrace(wxT("Function Start/End"), wxT("CBOINCGUIApp::SetActiveGUI - Function End"));
return true;
}
int CBOINCGUIApp::ConfirmExit() {
CSkinAdvanced* pSkinAdvanced = wxGetApp().GetSkinManager()->GetAdvanced();
CMainDocument* pDoc = wxGetApp().GetDocument();
wxString strConnectedCompter = wxEmptyString;
bool bWasVisible;
int retval = 0;
wxASSERT(pDoc);
wxASSERT(pSkinAdvanced);
wxASSERT(wxDynamicCast(pDoc, CMainDocument));
wxASSERT(wxDynamicCast(pSkinAdvanced, CSkinAdvanced));
pDoc->GetConnectedComputerName(strConnectedCompter);
if (!pDoc->IsComputerNameLocal(strConnectedCompter)) {
// Don't shut down remote clients on Manager exit
return 1;
}
// Don't run confirmation dialog if logging out or shutting down Mac,
// or if emergency exit from AsyncRPCDlg
if (s_bSkipExitConfirmation) return 1;
// Don't run confirmation dialog if second instance of Manager
if (IsMgrMultipleInstance()) return 1;
if (!m_iDisplayExitDialog) {
// Mac: User doesn't want to display the dialog and just wants to use their previous value.
// Win & Linux: User doesn't want to display the dialog and wants to shutdown the client.
return 1;
}
bWasVisible = IsApplicationVisible();
ShowApplication(true);
CDlgExitMessage dlg(NULL);
if (!pSkinAdvanced->GetExitMessage().IsEmpty()) {
dlg.m_DialogExitMessage->SetLabel(pSkinAdvanced->GetExitMessage());
}
#ifdef __WXMSW__
if (m_iShutdownCoreClient) {
dlg.m_DialogShutdownCoreClient->SetValue(TRUE);
}
#endif
if (m_iDisplayExitDialog) {
dlg.m_DialogDisplay->SetValue(FALSE);
}
dlg.Fit();
dlg.Centre();
if (wxID_OK == dlg.ShowModal()) {
#ifdef __WXMAC__
s_bSkipExitConfirmation = true; // Don't ask twice (only affects Mac)
#else
m_iShutdownCoreClient = dlg.m_DialogShutdownCoreClient->GetValue();
#endif
m_iDisplayExitDialog = !dlg.m_DialogDisplay->GetValue();
retval = true;
}
if (!bWasVisible) {
ShowApplication(false);
}
return retval; // User cancelled exit
}
// Use this instead of wxMessageBox from all tab Views to suppress
// Periodic RPCs. See comment in CMainDocument::RunPeriodicRPCs()
// for a fuller explanation.
int CBOINCGUIApp::SafeMessageBox(const wxString& message, const wxString& caption, long style,
wxWindow *parent, int x, int y )
{
int retval;
m_bSafeMessageBoxDisplayed++;
retval = wxMessageBox(message, caption, style, parent, x, y);
m_bSafeMessageBoxDisplayed--;
return retval;
}
#ifndef __WXMAC__
// See clientgui/mac/BOINCGUIApp.mm for the Mac versions.
///
/// Determines if the current process is visible.
///
/// @return
/// true if the current process is visible, otherwise false.
///
bool CBOINCGUIApp::IsApplicationVisible() {
return false;
}
///
/// Shows or hides the current process.
///
/// @param bShow
/// true will show the process, false will hide the process.
///
void CBOINCGUIApp::ShowApplication(bool) {
}
#endif
bool CBOINCGUIApp::ShowInterface() {
ShowApplication(true);
return SetActiveGUI(m_iGUISelected, true);
}
bool CBOINCGUIApp::ShowNotifications() {
bool retval = false;
retval = SetActiveGUI(m_iGUISelected, true);
if (retval) {
GetFrame()->FireNotification();
GetDocument()->UpdateUnreadNoticeState();
}
return retval;
}
bool CBOINCGUIApp::IsModalDialogDisplayed() {
if (m_bSafeMessageBoxDisplayed) return true;
// Search for the dialog by ID since all of BOINC Manager's
// dialog IDs are 10000.
if (wxDynamicCast(wxWindow::FindWindowById(ID_ANYDIALOG), wxDialog)) {
return true;
}
if (m_pDocument) {
if (m_pDocument->WaitingForRPC()) {
return true;
}
}
return false;
}
// Prevent recursive entry of CMainDocument::RequestRPC()
int CBOINCGUIApp::FilterEvent(wxEvent &event) {
int theEventType;
wxDialog* theRPCWaitDialog;
wxObject* theObject;
if (!m_pDocument) return -1;
theEventType = event.GetEventType();
if (m_pDocument->WaitingForRPC()) {
// If in RPC Please Wait dialog, reject all command
// and timer events except:
// - RPC Finished
// - those for that dialog or its children
// - Open Manager menu item from system tray icon
if ((theEventType == wxEVT_COMMAND_MENU_SELECTED) && (event.GetId() == wxID_OPEN)) {
return -1;
}
theRPCWaitDialog = m_pDocument->GetRPCWaitDialog();
theObject = event.GetEventObject();
while (theObject) {
if (!theObject->IsKindOf(CLASSINFO(wxWindow))) break;
if (theObject == theRPCWaitDialog) return -1;
theObject = ((wxWindow*)theObject)->GetParent();
}
// Continue with rest of filtering below
} else {
// Do limited filtering if shutting down to allow RPC
// completion events but not events which start new RPCs
if (!m_bFilterEvents) return -1;
}
// Allow all except Command, Timer and Mouse Moved events
if (event.IsCommandEvent()) {
return false;
}
if (theEventType == wxEVT_TIMER) {
return false;
}
#ifdef __WXMSW__
if (theEventType == wxEVT_TASKBAR_MOVE) {
return false;
}
#endif
return -1;
}
| 1 | 9,156 | You could change the name of the command line switch too. Short options are typically one character after hyphen. wxWidgets' command line parser seems to handle `-nd` without confusing it with `-n` or `-d` but I'm not sure if that's by design or by accident. I'd remove the short option. `--no-daemon` is with hyphen but the rest of Manager and client uses underscore. Please change that. | BOINC-boinc | php |
@@ -69,8 +69,8 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.BoolVarP(flagSet, &fs.Config.IgnoreCaseSync, "ignore-case-sync", "", fs.Config.IgnoreCaseSync, "Ignore case when synchronizing")
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
- flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "use DIR to server side copy flies from.")
- flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Compare dest to DIR also.")
+ flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "Include additional server-side path during comparison.")
+ flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Implies --compare-dest but also copies files from path into destination.")
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix to add to changed files.")
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.") | 1 | // Package configflags defines the flags used by rclone. It is
// decoupled into a separate package so it can be replaced.
package configflags
// Options set by command line flags
import (
"log"
"net"
"path/filepath"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/flags"
fsLog "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/rc"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
)
var (
// these will get interpreted into fs.Config via SetFlags() below
verbose int
quiet bool
dumpHeaders bool
dumpBodies bool
deleteBefore bool
deleteDuring bool
deleteAfter bool
bindAddr string
disableFeatures string
)
// AddFlags adds the non filing system specific flags to the command
func AddFlags(flagSet *pflag.FlagSet) {
rc.AddOption("main", fs.Config)
// NB defaults which aren't the zero for the type should be set in fs/config.go NewConfig
flags.CountVarP(flagSet, &verbose, "verbose", "v", "Print lots more stuff (repeat for more)")
flags.BoolVarP(flagSet, &quiet, "quiet", "q", false, "Print as little stuff as possible")
flags.DurationVarP(flagSet, &fs.Config.ModifyWindow, "modify-window", "", fs.Config.ModifyWindow, "Max time diff to be considered the same")
flags.IntVarP(flagSet, &fs.Config.Checkers, "checkers", "", fs.Config.Checkers, "Number of checkers to run in parallel.")
flags.IntVarP(flagSet, &fs.Config.Transfers, "transfers", "", fs.Config.Transfers, "Number of file transfers to run in parallel.")
flags.StringVarP(flagSet, &config.ConfigPath, "config", "", config.ConfigPath, "Config file.")
flags.StringVarP(flagSet, &config.CacheDir, "cache-dir", "", config.CacheDir, "Directory rclone will use for caching.")
flags.BoolVarP(flagSet, &fs.Config.CheckSum, "checksum", "c", fs.Config.CheckSum, "Skip based on checksum (if available) & size, not mod-time & size")
flags.BoolVarP(flagSet, &fs.Config.SizeOnly, "size-only", "", fs.Config.SizeOnly, "Skip based on size only, not mod-time or checksum")
flags.BoolVarP(flagSet, &fs.Config.IgnoreTimes, "ignore-times", "I", fs.Config.IgnoreTimes, "Don't skip files that match size and time - transfer all files")
flags.BoolVarP(flagSet, &fs.Config.IgnoreExisting, "ignore-existing", "", fs.Config.IgnoreExisting, "Skip all files that exist on destination")
flags.BoolVarP(flagSet, &fs.Config.IgnoreErrors, "ignore-errors", "", fs.Config.IgnoreErrors, "delete even if there are I/O errors")
flags.BoolVarP(flagSet, &fs.Config.DryRun, "dry-run", "n", fs.Config.DryRun, "Do a trial run with no permanent changes")
flags.DurationVarP(flagSet, &fs.Config.ConnectTimeout, "contimeout", "", fs.Config.ConnectTimeout, "Connect timeout")
flags.DurationVarP(flagSet, &fs.Config.Timeout, "timeout", "", fs.Config.Timeout, "IO idle timeout")
flags.BoolVarP(flagSet, &dumpHeaders, "dump-headers", "", false, "Dump HTTP headers - may contain sensitive info")
flags.BoolVarP(flagSet, &dumpBodies, "dump-bodies", "", false, "Dump HTTP headers and bodies - may contain sensitive info")
flags.BoolVarP(flagSet, &fs.Config.InsecureSkipVerify, "no-check-certificate", "", fs.Config.InsecureSkipVerify, "Do not verify the server SSL certificate. Insecure.")
flags.BoolVarP(flagSet, &fs.Config.AskPassword, "ask-password", "", fs.Config.AskPassword, "Allow prompt for password for encrypted configuration.")
flags.BoolVarP(flagSet, &deleteBefore, "delete-before", "", false, "When synchronizing, delete files on destination before transferring")
flags.BoolVarP(flagSet, &deleteDuring, "delete-during", "", false, "When synchronizing, delete files during transfer")
flags.BoolVarP(flagSet, &deleteAfter, "delete-after", "", false, "When synchronizing, delete files on destination after transferring (default)")
flags.Int64VarP(flagSet, &fs.Config.MaxDelete, "max-delete", "", -1, "When synchronizing, limit the number of deletes")
flags.BoolVarP(flagSet, &fs.Config.TrackRenames, "track-renames", "", fs.Config.TrackRenames, "When synchronizing, track file renames and do a server side move if possible")
flags.IntVarP(flagSet, &fs.Config.LowLevelRetries, "low-level-retries", "", fs.Config.LowLevelRetries, "Number of low level retries to do.")
flags.BoolVarP(flagSet, &fs.Config.UpdateOlder, "update", "u", fs.Config.UpdateOlder, "Skip files that are newer on the destination.")
flags.BoolVarP(flagSet, &fs.Config.UseServerModTime, "use-server-modtime", "", fs.Config.UseServerModTime, "Use server modified time instead of object metadata")
flags.BoolVarP(flagSet, &fs.Config.NoGzip, "no-gzip-encoding", "", fs.Config.NoGzip, "Don't set Accept-Encoding: gzip.")
flags.IntVarP(flagSet, &fs.Config.MaxDepth, "max-depth", "", fs.Config.MaxDepth, "If set limits the recursion depth to this.")
flags.BoolVarP(flagSet, &fs.Config.IgnoreSize, "ignore-size", "", false, "Ignore size when skipping use mod-time or checksum.")
flags.BoolVarP(flagSet, &fs.Config.IgnoreChecksum, "ignore-checksum", "", fs.Config.IgnoreChecksum, "Skip post copy check of checksums.")
flags.BoolVarP(flagSet, &fs.Config.IgnoreCaseSync, "ignore-case-sync", "", fs.Config.IgnoreCaseSync, "Ignore case when synchronizing")
flags.BoolVarP(flagSet, &fs.Config.NoTraverse, "no-traverse", "", fs.Config.NoTraverse, "Don't traverse destination file system on copy.")
flags.BoolVarP(flagSet, &fs.Config.NoUpdateModTime, "no-update-modtime", "", fs.Config.NoUpdateModTime, "Don't update destination mod-time if files identical.")
flags.StringVarP(flagSet, &fs.Config.CompareDest, "compare-dest", "", fs.Config.CompareDest, "use DIR to server side copy flies from.")
flags.StringVarP(flagSet, &fs.Config.CopyDest, "copy-dest", "", fs.Config.CopyDest, "Compare dest to DIR also.")
flags.StringVarP(flagSet, &fs.Config.BackupDir, "backup-dir", "", fs.Config.BackupDir, "Make backups into hierarchy based in DIR.")
flags.StringVarP(flagSet, &fs.Config.Suffix, "suffix", "", fs.Config.Suffix, "Suffix to add to changed files.")
flags.BoolVarP(flagSet, &fs.Config.SuffixKeepExtension, "suffix-keep-extension", "", fs.Config.SuffixKeepExtension, "Preserve the extension when using --suffix.")
flags.BoolVarP(flagSet, &fs.Config.UseListR, "fast-list", "", fs.Config.UseListR, "Use recursive list if available. Uses more memory but fewer transactions.")
flags.Float64VarP(flagSet, &fs.Config.TPSLimit, "tpslimit", "", fs.Config.TPSLimit, "Limit HTTP transactions per second to this.")
flags.IntVarP(flagSet, &fs.Config.TPSLimitBurst, "tpslimit-burst", "", fs.Config.TPSLimitBurst, "Max burst of transactions for --tpslimit.")
flags.StringVarP(flagSet, &bindAddr, "bind", "", "", "Local address to bind to for outgoing connections, IPv4, IPv6 or name.")
flags.StringVarP(flagSet, &disableFeatures, "disable", "", "", "Disable a comma separated list of features. Use help to see a list.")
flags.StringVarP(flagSet, &fs.Config.UserAgent, "user-agent", "", fs.Config.UserAgent, "Set the user-agent to a specified string. The default is rclone/ version")
flags.BoolVarP(flagSet, &fs.Config.Immutable, "immutable", "", fs.Config.Immutable, "Do not modify files. Fail if existing files have been modified.")
flags.BoolVarP(flagSet, &fs.Config.AutoConfirm, "auto-confirm", "", fs.Config.AutoConfirm, "If enabled, do not request console confirmation.")
flags.IntVarP(flagSet, &fs.Config.StatsFileNameLength, "stats-file-name-length", "", fs.Config.StatsFileNameLength, "Max file name length in stats. 0 for no limit")
flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
flags.IntVarP(flagSet, &fs.Config.MaxStatsGroups, "max-stats-groups", "", fs.Config.MaxStatsGroups, "Maximum number of stats groups to keep in memory. On max oldest is discarded.")
flags.BoolVarP(flagSet, &fs.Config.StatsOneLine, "stats-one-line", "", fs.Config.StatsOneLine, "Make the stats fit on one line.")
flags.BoolVarP(flagSet, &fs.Config.StatsOneLineDate, "stats-one-line-date", "", fs.Config.StatsOneLineDate, "Enables --stats-one-line and add current date/time prefix.")
flags.StringVarP(flagSet, &fs.Config.StatsOneLineDateFormat, "stats-one-line-date-format", "", fs.Config.StatsOneLineDateFormat, "Enables --stats-one-line-date and uses custom formatted date. Enclose date string in double quotes (\"). See https://golang.org/pkg/time/#Time.Format")
flags.BoolVarP(flagSet, &fs.Config.Progress, "progress", "P", fs.Config.Progress, "Show progress during transfer.")
flags.BoolVarP(flagSet, &fs.Config.Cookie, "use-cookies", "", fs.Config.Cookie, "Enable session cookiejar.")
flags.BoolVarP(flagSet, &fs.Config.UseMmap, "use-mmap", "", fs.Config.UseMmap, "Use mmap allocator (see docs).")
flags.StringVarP(flagSet, &fs.Config.CaCert, "ca-cert", "", fs.Config.CaCert, "CA certificate used to verify servers")
flags.StringVarP(flagSet, &fs.Config.ClientCert, "client-cert", "", fs.Config.ClientCert, "Client SSL certificate (PEM) for mutual TLS auth")
flags.StringVarP(flagSet, &fs.Config.ClientKey, "client-key", "", fs.Config.ClientKey, "Client SSL private key (PEM) for mutual TLS auth")
flags.FVarP(flagSet, &fs.Config.MultiThreadCutoff, "multi-thread-cutoff", "", "Use multi-thread downloads for files above this size.")
flags.IntVarP(flagSet, &fs.Config.MultiThreadStreams, "multi-thread-streams", "", fs.Config.MultiThreadStreams, "Max number of streams to use for multi-thread downloads.")
flags.BoolVarP(flagSet, &fs.Config.UseJSONLog, "use-json-log", "", fs.Config.UseJSONLog, "Use json log format.")
}
// SetFlags converts any flags into config which weren't straight forward
func SetFlags() {
if verbose >= 2 {
fs.Config.LogLevel = fs.LogLevelDebug
} else if verbose >= 1 {
fs.Config.LogLevel = fs.LogLevelInfo
}
if quiet {
if verbose > 0 {
log.Fatalf("Can't set -v and -q")
}
fs.Config.LogLevel = fs.LogLevelError
}
logLevelFlag := pflag.Lookup("log-level")
if logLevelFlag != nil && logLevelFlag.Changed {
if verbose > 0 {
log.Fatalf("Can't set -v and --log-level")
}
if quiet {
log.Fatalf("Can't set -q and --log-level")
}
}
if fs.Config.UseJSONLog {
logrus.AddHook(fsLog.NewCallerHook())
logrus.SetFormatter(&logrus.JSONFormatter{
TimestampFormat: "2006-01-02T15:04:05.999999-07:00",
})
logrus.SetLevel(logrus.DebugLevel)
switch fs.Config.LogLevel {
case fs.LogLevelEmergency, fs.LogLevelAlert:
logrus.SetLevel(logrus.PanicLevel)
case fs.LogLevelCritical:
logrus.SetLevel(logrus.FatalLevel)
case fs.LogLevelError:
logrus.SetLevel(logrus.ErrorLevel)
case fs.LogLevelWarning, fs.LogLevelNotice:
logrus.SetLevel(logrus.WarnLevel)
case fs.LogLevelInfo:
logrus.SetLevel(logrus.InfoLevel)
case fs.LogLevelDebug:
logrus.SetLevel(logrus.DebugLevel)
}
}
if dumpHeaders {
fs.Config.Dump |= fs.DumpHeaders
fs.Logf(nil, "--dump-headers is obsolete - please use --dump headers instead")
}
if dumpBodies {
fs.Config.Dump |= fs.DumpBodies
fs.Logf(nil, "--dump-bodies is obsolete - please use --dump bodies instead")
}
switch {
case deleteBefore && (deleteDuring || deleteAfter),
deleteDuring && deleteAfter:
log.Fatalf(`Only one of --delete-before, --delete-during or --delete-after can be used.`)
case deleteBefore:
fs.Config.DeleteMode = fs.DeleteModeBefore
case deleteDuring:
fs.Config.DeleteMode = fs.DeleteModeDuring
case deleteAfter:
fs.Config.DeleteMode = fs.DeleteModeAfter
default:
fs.Config.DeleteMode = fs.DeleteModeDefault
}
if fs.Config.CompareDest != "" && fs.Config.CopyDest != "" {
log.Fatalf(`Can't use --compare-dest with --copy-dest.`)
}
switch {
case len(fs.Config.StatsOneLineDateFormat) > 0:
fs.Config.StatsOneLineDate = true
fs.Config.StatsOneLine = true
case fs.Config.StatsOneLineDate:
fs.Config.StatsOneLineDateFormat = "2006/01/02 15:04:05 - "
fs.Config.StatsOneLine = true
}
if bindAddr != "" {
addrs, err := net.LookupIP(bindAddr)
if err != nil {
log.Fatalf("--bind: Failed to parse %q as IP address: %v", bindAddr, err)
}
if len(addrs) != 1 {
log.Fatalf("--bind: Expecting 1 IP address for %q but got %d", bindAddr, len(addrs))
}
fs.Config.BindAddr = addrs[0]
}
if disableFeatures != "" {
if disableFeatures == "help" {
log.Fatalf("Possible backend features are: %s\n", strings.Join(new(fs.Features).List(), ", "))
}
fs.Config.DisableFeatures = strings.Split(disableFeatures, ",")
}
// Make the config file absolute
configPath, err := filepath.Abs(config.ConfigPath)
if err == nil {
config.ConfigPath = configPath
}
// Set whether multi-thread-streams was set
multiThreadStreamsFlag := pflag.Lookup("multi-thread-streams")
fs.Config.MultiThreadSet = multiThreadStreamsFlag != nil && multiThreadStreamsFlag.Changed
}
| 1 | 9,853 | File is not `goimports`-ed (from `goimports`) | rclone-rclone | go |
@@ -439,7 +439,7 @@ func (r *RpmRepoCloner) clonePackage(baseArgs []string, enabledRepoOrder ...stri
logger.Log.Debugf("stderr: %s", stderr)
if err != nil {
- logger.Log.Errorf("tdnf error (will continue if the only errors are toybox conflicts):\n '%s'", stderr)
+ logger.Log.Debugf("tdnf error (will continue if the only errors are toybox conflicts):\n '%s'", stderr)
}
// ============== TDNF SPECIFIC IMPLEMENTATION ============== | 1 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package rpmrepocloner
import (
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"microsoft.com/pkggen/internal/buildpipeline"
"microsoft.com/pkggen/internal/logger"
"microsoft.com/pkggen/internal/packagerepo/repocloner"
"microsoft.com/pkggen/internal/packagerepo/repomanager/rpmrepomanager"
"microsoft.com/pkggen/internal/pkgjson"
"microsoft.com/pkggen/internal/safechroot"
"microsoft.com/pkggen/internal/shell"
)
const (
squashChrootRunErrors = false
chrootDownloadDir = "/outputrpms"
leaveChrootFilesOnDisk = false
updateRepoID = "mariner-official-update"
fetcherRepoID = "fetcher-cloned-repo"
)
var (
// Every valid line will be of the form: <package>-<version>.<arch> : <Description>
packageLookupNameMatchRegex = regexp.MustCompile(`^\s*([^:]+(x86_64|aarch64|noarch))\s*:`)
// Every valid line will be of the form: <package_name>.<architecture> <version>.<dist> fetcher-cloned-repo
listedPackageRegex = regexp.MustCompile(`^\s*(?P<Name>[a-zA-Z0-9_+-]+)\.(?P<Arch>[a-zA-Z0-9_+-]+)\s*(?P<Version>[a-zA-Z0-9._+-]+)\.(?P<Dist>[a-zA-Z0-9_+-]+)\s*fetcher-cloned-repo`)
)
const (
listMatchSubString = iota
listPackageName = iota
listPackageArch = iota
listPackageVersion = iota
listPackageDist = iota
listMaxMatchLen = iota
)
// RpmRepoCloner represents an RPM repository cloner.
type RpmRepoCloner struct {
chroot *safechroot.Chroot
useUpdateRepo bool
cloneDir string
}
// New creates a new RpmRepoCloner
func New() *RpmRepoCloner {
return &RpmRepoCloner{}
}
// Initialize initializes rpmrepocloner, enabling Clone() to be called.
// - destinationDir is the directory to save RPMs
// - tmpDir is the directory to create a chroot
// - workerTar is the path to the worker tar used to seed the chroot
// - existingRpmsDir is the directory with prebuilt RPMs
// - useUpdateRepo if set, the upstream update repository will be used.
// - repoDefinitions is a list of repo files to use when cloning RPMs
func (r *RpmRepoCloner) Initialize(destinationDir, tmpDir, workerTar, existingRpmsDir string, useUpdateRepo bool, repoDefinitions []string) (err error) {
const (
isExistingDir = false
bindFsType = ""
bindData = ""
chrootLocalRpmsDir = "/localrpms"
overlayWorkDirectory = "/overlaywork/workdir"
overlayUpperDirectory = "/overlaywork/upper"
overlaySource = "overlay"
)
r.useUpdateRepo = useUpdateRepo
if !useUpdateRepo {
logger.Log.Warnf("Disabling update repo")
}
// Ensure that if initialization fails, the chroot is closed
defer func() {
if err != nil {
logger.Log.Warnf("Failed to initialize cloner. Error: %s", err)
if r.chroot != nil {
closeErr := r.chroot.Close(leaveChrootFilesOnDisk)
if closeErr != nil {
logger.Log.Panicf("Unable to close chroot on failed initialization. Error: %s", closeErr)
}
}
}
}()
// Create the directory to download into
err = os.MkdirAll(destinationDir, os.ModePerm)
if err != nil {
logger.Log.Warnf("Could not create download directory (%s)", destinationDir)
return
}
// Setup the chroot
logger.Log.Infof("Creating cloning environment to populate (%s)", destinationDir)
r.chroot = safechroot.NewChroot(tmpDir, isExistingDir)
r.cloneDir = destinationDir
// Setup mount points for the chroot.
//
// 1) Mount the provided directory of existings RPMs into the chroot as an overlay,
// ensuring the chroot can read the files, but not alter the actual directory outside
// the chroot.
//
// 2) Mount the directory to download RPMs into as a bind, allowing the chroot to write
// files into it.
overlayMount, overlayExtraDirs := safechroot.NewOverlayMountPoint(r.chroot.RootDir(), overlaySource, chrootLocalRpmsDir, existingRpmsDir, overlayUpperDirectory, overlayWorkDirectory)
extraMountPoints := []*safechroot.MountPoint{
overlayMount,
safechroot.NewMountPoint(destinationDir, chrootDownloadDir, bindFsType, safechroot.BindMountPointFlags, bindData),
}
// Also request that /overlaywork is created before any chroot mounts happen so the overlay can
// be created succesfully
err = r.chroot.Initialize(workerTar, overlayExtraDirs, extraMountPoints)
if err != nil {
r.chroot = nil
return
}
logger.Log.Info("Initializing local RPM repository")
err = r.initializeMountedChrootRepo(chrootLocalRpmsDir)
if err != nil {
return
}
logger.Log.Info("Initializing repository configurations")
err = r.initializeRepoDefinitions(repoDefinitions)
if err != nil {
return
}
return
}
// AddNetworkFiles adds files needed for networking capabilities into the cloner.
// tlsClientCert and tlsClientKey are optional.
func (r *RpmRepoCloner) AddNetworkFiles(tlsClientCert, tlsClientKey string) (err error) {
files := []safechroot.FileToCopy{
{Src: "/etc/resolv.conf", Dest: "/etc/resolv.conf"},
}
if tlsClientCert != "" && tlsClientKey != "" {
tlsFiles := []safechroot.FileToCopy{
{Src: tlsClientCert, Dest: "/etc/tdnf/mariner_user.crt"},
{Src: tlsClientKey, Dest: "/etc/tdnf/mariner_user.key"},
}
files = append(files, tlsFiles...)
}
err = r.chroot.AddFiles(files...)
return
}
// initializeRepoDefinitions will configure the chroot's repo files to match those
// provided by the caller.
func (r *RpmRepoCloner) initializeRepoDefinitions(repoDefinitions []string) (err error) {
// ============== TDNF SPECIFIC IMPLEMENTATION ==============
// Unlike some other package managers, TDNF has no notion of repository priority.
// It reads the repo files using `readdir`, which should be assumed to be random ordering.
//
// In order to simulate repository priority, concatenate all requested repofiles into a single file.
// TDNF will read the file top-down. It will then parse the results into a linked list, meaning
// the first repo entry in the file is the first to be checked.
const chrootRepoFile = "/etc/yum.repos.d/allrepos.repo"
fullRepoFilePath := filepath.Join(r.chroot.RootDir(), chrootRepoFile)
// Create the directory for the repo file
err = os.MkdirAll(filepath.Dir(fullRepoFilePath), os.ModePerm)
if err != nil {
logger.Log.Warnf("Could not create directory for chroot repo file (%s)", fullRepoFilePath)
return
}
dstFile, err := os.OpenFile(fullRepoFilePath, os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
return
}
defer dstFile.Close()
// Append all repo files together into a single repo file.
// Assume the order of repoDefinitions indicates their relative priority.
for _, repoFilePath := range repoDefinitions {
err = appendRepoFile(repoFilePath, dstFile)
if err != nil {
return
}
}
return
}
func appendRepoFile(repoFilePath string, dstFile *os.File) (err error) {
repoFile, err := os.Open(repoFilePath)
if err != nil {
return
}
defer repoFile.Close()
_, err = io.Copy(dstFile, repoFile)
if err != nil {
return
}
// Append a new line
_, err = dstFile.WriteString("\n")
return
}
// initializeMountedChrootRepo will initialize a local RPM repository inside the chroot.
func (r *RpmRepoCloner) initializeMountedChrootRepo(repoDir string) (err error) {
return r.chroot.Run(func() (err error) {
return rpmrepomanager.CreateRepo(repoDir)
})
}
// Clone clones the provided list of packages.
// If cloneDeps is set, package dependencies will also be cloned.
// It will automatically resolve packages that describe a provide or file from a package.
func (r *RpmRepoCloner) Clone(cloneDeps bool, packagesToClone ...*pkgjson.PackageVer) (err error) {
const (
strictComparisonOperator = "="
lessThanOrEqualComparisonOperator = "<="
versionSuffixFormat = "-%s"
builtRepoID = "local-repo"
cachedRepoID = "upstream-cache-repo"
allRepoIDs = "*"
)
for _, pkg := range packagesToClone {
builder := strings.Builder{}
builder.WriteString(pkg.Name)
// Treat <= as =
// Treat > and >= as "latest"
if pkg.Condition == strictComparisonOperator || pkg.Condition == lessThanOrEqualComparisonOperator {
builder.WriteString(fmt.Sprintf(versionSuffixFormat, pkg.Version))
}
pkgName := builder.String()
logger.Log.Debugf("Cloning: %s", pkgName)
args := []string{
"--destdir",
chrootDownloadDir,
pkgName,
}
if cloneDeps {
args = append([]string{"download", "--alldeps"}, args...)
} else {
args = append([]string{"download-nodeps"}, args...)
}
err = r.chroot.Run(func() (err error) {
// Consider the built RPMs first, then the already cached (e.g. tooolchain), and finally all remote packages.
repoOrderList := []string{builtRepoID, cachedRepoID, allRepoIDs}
return r.clonePackage(args, repoOrderList...)
})
if err != nil {
return
}
}
return
}
// SearchAndClone attempts to find a package which supplies the requested file or package. It
// wraps Clone() to acquire the requested package once found.
func (r *RpmRepoCloner) SearchAndClone(cloneDeps bool, singlePackageToClone *pkgjson.PackageVer) (err error) {
var (
pkgName string
stderr string
)
err = r.chroot.Run(func() (err error) {
args := []string{
"provides",
singlePackageToClone.Name,
}
if !r.useUpdateRepo {
args = append(args, fmt.Sprintf("--disablerepo=%s", updateRepoID))
}
stdout, stderr, err := shell.Execute("tdnf", args...)
logger.Log.Debugf("tdnf search for dependency '%s':\n%s", singlePackageToClone.Name, stdout)
if err != nil {
logger.Log.Errorf("Failed to lookup dependency '%s', tdnf error: '%s'", singlePackageToClone.Name, stderr)
return
}
splitStdout := strings.Split(stdout, "\n")
for _, line := range splitStdout {
matches := packageLookupNameMatchRegex.FindStringSubmatch(line)
if len(matches) == 0 {
continue
}
// Local sources are listed last, keep searching for the last possible match
pkgName = matches[1]
logger.Log.Debugf("'%s' is available from package '%s'", singlePackageToClone.Name, pkgName)
}
return
})
if err != nil {
logger.Log.Error(stderr)
return
}
logger.Log.Warnf("Translated '%s' to package '%s'", singlePackageToClone.Name, pkgName)
err = r.Clone(cloneDeps, &pkgjson.PackageVer{Name: pkgName})
return
}
// ConvertDownloadedPackagesIntoRepo initializes the downloaded RPMs into an RPM repository.
func (r *RpmRepoCloner) ConvertDownloadedPackagesIntoRepo() (err error) {
fullRpmDownloadDir := buildpipeline.GetRpmsDir(r.chroot.RootDir(), chrootDownloadDir)
err = rpmrepomanager.OrganizePackagesByArch(fullRpmDownloadDir, fullRpmDownloadDir)
if err != nil {
return
}
err = r.initializeMountedChrootRepo(chrootDownloadDir)
return
}
// ClonedRepoContents returns the packages contained in the cloned repository.
func (r *RpmRepoCloner) ClonedRepoContents() (repoContents *repocloner.RepoContents, err error) {
repoContents = &repocloner.RepoContents{}
onStdout := func(args ...interface{}) {
if len(args) == 0 {
return
}
line := args[0].(string)
matches := listedPackageRegex.FindStringSubmatch(line)
if len(matches) != listMaxMatchLen {
return
}
pkg := &repocloner.RepoPackage{
Name: matches[listPackageName],
Version: matches[listPackageVersion],
Architecture: matches[listPackageArch],
Distribution: matches[listPackageDist],
}
repoContents.Repo = append(repoContents.Repo, pkg)
}
err = r.chroot.Run(func() (err error) {
// Disable all repositories except the fetcher repository (the repository with the cloned packages)
tdnfArgs := []string{
"list",
"ALL",
"--disablerepo=*",
fmt.Sprintf("--enablerepo=%s", fetcherRepoID),
}
return shell.ExecuteLiveWithCallback(onStdout, logger.Log.Warn, "tdnf", tdnfArgs...)
})
return
}
// CloneDirectory returns the directory where cloned packages are saved.
func (r *RpmRepoCloner) CloneDirectory() string {
return r.cloneDir
}
// Close closes the given RpmRepoCloner.
func (r *RpmRepoCloner) Close() error {
return r.chroot.Close(leaveChrootFilesOnDisk)
}
// clonePackage clones a given package using prepopulated arguments.
// It will gradually enable more repos to consider using enabledRepoOrder until the package is found.
func (r *RpmRepoCloner) clonePackage(baseArgs []string, enabledRepoOrder ...string) (err error) {
const (
unresolvedOutputPrefix = "No package"
toyboxConflictsPrefix = "toybox conflicts"
unresolvedOutputPostfix = "available"
)
if len(enabledRepoOrder) == 0 {
return fmt.Errorf("enabledRepoOrder cannot be empty")
}
// Disable all repos first so we can gradually enable them below.
// TDNF processes enable/disable repo requests in the order that they are passed.
// So if `--disablerepo=foo` and then `--enablerepo=foo` are passed, `foo` will be enabled.
baseArgs = append(baseArgs, "--disablerepo=*")
var enabledRepoArgs []string
for _, repoID := range enabledRepoOrder {
logger.Log.Debugf("Enabling repo ID: %s", repoID)
// Gradually increase the scope of allowed repos. Keep repos already considered enabled
// as packages from one repo may depend on another.
// e.g. packages in upstream update repo may require packages in upstream base repo.
enabledRepoArgs = append(enabledRepoArgs, fmt.Sprintf("--enablerepo=%s", repoID))
args := append(baseArgs, enabledRepoArgs...)
// Do not enable the fetcher's own repo as it is only used for listing cloned files
// and will not been initialized until ConvertDownloadedPackagesIntoRepo is called on it
// when all cloning is complete.
args = append(args, fmt.Sprintf("--disablerepo=%s", fetcherRepoID))
// Explicitly disable the update repo if it is turned off.
if !r.useUpdateRepo {
args = append(args, fmt.Sprintf("--disablerepo=%s", updateRepoID))
}
var (
stdout string
stderr string
)
stdout, stderr, err = shell.Execute("tdnf", args...)
logger.Log.Debugf("stdout: %s", stdout)
logger.Log.Debugf("stderr: %s", stderr)
if err != nil {
logger.Log.Errorf("tdnf error (will continue if the only errors are toybox conflicts):\n '%s'", stderr)
}
// ============== TDNF SPECIFIC IMPLEMENTATION ==============
// Check if TDNF could not resolve a given package. If TDNF does not find a requested package,
// it will not error. Instead it will print a message to stdout. Check for this message.
//
// *NOTE*: TDNF will attempt best effort. If N packages are requested, and 1 cannot be found,
// it will still download N-1 packages while also printing the message.
splitStdout := strings.Split(stdout, "\n")
for _, line := range splitStdout {
trimmedLine := strings.TrimSpace(line)
// Toybox conflicts are a known issue, reset the err value if encountered
if strings.HasPrefix(trimmedLine, toyboxConflictsPrefix) {
logger.Log.Warn("Ignoring known toybox conflict")
err = nil
continue
}
// If a package was not available, update err
if strings.HasPrefix(trimmedLine, unresolvedOutputPrefix) && strings.HasSuffix(trimmedLine, unresolvedOutputPostfix) {
err = fmt.Errorf(trimmedLine)
break
}
}
if err == nil {
break
}
}
return
}
| 1 | 12,174 | Why are we mentioning toybox in this message? AND it still says "tdnf error". What's the actual error? Should it be resolved instead of flagged? | microsoft-CBL-Mariner | go |
@@ -473,6 +473,7 @@ describe('GridFS Stream', function () {
// Fail if user tries to abort an aborted stream
uploadStream.abort().then(null, function (error) {
expect(error.toString()).to.equal(
+ // TODO(NODE-3405): Replace with MongoStreamClosedError
'MongoDriverError: Cannot call abort() on a stream twice'
);
client.close(done); | 1 | 'use strict';
const { Double } = require('bson');
const stream = require('stream');
const { EJSON } = require('bson');
const fs = require('fs');
const { setupDatabase, withClient } = require('./shared');
const { expect } = require('chai');
const { GridFSBucket, ObjectId } = require('../../src');
describe('GridFS Stream', function () {
before(function () {
return setupDatabase(this.configuration);
});
/**
* Correctly stream a file from disk into GridFS using openUploadStream
*
* @example-class GridFSBucket
* @example-method openUploadStream
*/
it('should upload from file stream', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
db.dropDatabase(function (error) {
expect(error).to.not.exist;
const bucket = new GridFSBucket(db);
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
const license = fs.readFileSync('./LICENSE.md');
const id = uploadStream.id;
// Wait for stream to finish
uploadStream.once('finish', function () {
const chunksColl = db.collection('fs.chunks');
const chunksQuery = chunksColl.find({ files_id: id });
// Get all the chunks
chunksQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(1);
expect(docs[0].data.toString('hex')).to.equal(license.toString('hex'));
const filesColl = db.collection('fs.files');
const filesQuery = filesColl.find({ _id: id });
filesQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(1);
expect(docs[0]).to.not.have.property('md5');
// make sure we created indexes
filesColl.listIndexes().toArray(function (error, indexes) {
expect(error).to.not.exist;
expect(indexes.length).to.equal(2);
expect(indexes[1].name).to.equal('filename_1_uploadDate_1');
chunksColl.listIndexes().toArray(function (error, indexes) {
expect(error).to.not.exist;
expect(indexes.length).to.equal(2);
expect(indexes[1].name).to.equal('files_id_1_n_1');
client.close(done);
});
});
});
});
});
readStream.pipe(uploadStream);
});
});
}
});
it('destroy publishes provided error', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
db.dropDatabase(function (error) {
expect(error).to.not.exist;
const bucket = new GridFSBucket(db);
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
const errorMessage = 'error';
uploadStream.once('error', function (e) {
expect(e).to.equal(errorMessage);
client.close(done);
});
uploadStream.once('finish', function () {
uploadStream.destroy(errorMessage);
});
readStream.pipe(uploadStream);
});
});
}
});
/**
* Correctly stream a file from disk into GridFS using openUploadStream
*
* @example-class GridFSBucket
* @example-method openUploadStreamWithId
*/
it('should upload from file stream with custom id', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
db.dropDatabase(function (error) {
expect(error).to.not.exist;
const bucket = new GridFSBucket(db);
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStreamWithId(1, 'test.dat');
const license = fs.readFileSync('./LICENSE.md');
const id = uploadStream.id;
expect(id).to.equal(1);
// Wait for stream to finish
uploadStream.once('finish', function () {
const chunksColl = db.collection('fs.chunks');
const chunksQuery = chunksColl.find({ files_id: id });
// Get all the chunks
chunksQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(1);
expect(docs[0].data.toString('hex')).to.equal(license.toString('hex'));
const filesColl = db.collection('fs.files');
const filesQuery = filesColl.find({ _id: id });
filesQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(1);
expect(docs[0]).to.not.have.property('md5');
// make sure we created indexes
filesColl.listIndexes().toArray(function (error, indexes) {
expect(error).to.not.exist;
expect(indexes.length).to.equal(2);
expect(indexes[1].name).to.equal('filename_1_uploadDate_1');
chunksColl.listIndexes().toArray(function (error, indexes) {
expect(error).to.not.exist;
expect(indexes.length).to.equal(2);
expect(indexes[1].name).to.equal('files_id_1_n_1');
client.close(done);
});
});
});
});
});
readStream.pipe(uploadStream);
});
});
}
});
/**
* Correctly upload a file to GridFS and then retrieve it as a stream
*
* @example-class GridFSBucket
* @example-method openDownloadStream
*/
it('should download to upload stream', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
const CHUNKS_COLL = 'gridfsdownload.chunks';
const FILES_COLL = 'gridfsdownload.files';
const readStream = fs.createReadStream('./LICENSE.md');
let uploadStream = bucket.openUploadStream('test.dat');
const license = fs.readFileSync('./LICENSE.md');
let id = uploadStream.id;
uploadStream.once('finish', function () {
const downloadStream = bucket.openDownloadStream(id);
uploadStream = bucket.openUploadStream('test2.dat');
id = uploadStream.id;
downloadStream.pipe(uploadStream).once('finish', function () {
const chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
chunksQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(1);
expect(docs[0].data.toString('hex')).to.equal(license.toString('hex'));
const filesQuery = db.collection(FILES_COLL).find({ _id: id });
filesQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(1);
expect(docs[0]).to.not.have.property('md5');
client.close(done);
});
});
});
});
readStream.pipe(uploadStream);
});
}
});
/**
* Correctly return file not found error
*/
it('should fail to locate gridfs stream', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
// Get an unknown file
const downloadStream = bucket.openDownloadStream(new ObjectId());
downloadStream.on('data', function () {});
downloadStream.on('error', function (err) {
expect(err.code).to.equal('ENOENT');
client.close(done);
});
});
}
});
/**
* Correctly download a GridFS file by name
*
* @example-class GridFSBucket
* @example-method openDownloadStreamByName
*/
it('openDownloadStreamByName', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
uploadStream.once('finish', function () {
const downloadStream = bucket.openDownloadStreamByName('test.dat');
let gotData = false;
downloadStream.on('data', function (data) {
expect(gotData).to.equal(false);
gotData = true;
expect(data.toString('utf8').indexOf('TERMS AND CONDITIONS') !== -1).to.equal(true);
});
downloadStream.on('end', function () {
expect(gotData).to.equal(true);
client.close(done);
});
});
readStream.pipe(uploadStream);
});
}
});
/**
* Provide start and end parameters for file download to skip ahead x bytes and limit the total amount of bytes read to n
*
* @example-class GridFSBucket
* @example-method openDownloadStream
*/
it('start/end options for openDownloadStream', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, {
bucketName: 'gridfsdownload',
chunkSizeBytes: 2
});
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('teststart.dat');
uploadStream.once('finish', function () {
const downloadStream = bucket
.openDownloadStreamByName('teststart.dat', { start: 1 })
.end(6);
downloadStream.on('error', function (error) {
expect(error).to.not.exist;
});
let gotData = 0;
let str = '';
downloadStream.on('data', function (data) {
++gotData;
str += data.toString('utf8');
});
downloadStream.on('end', function () {
// Depending on different versions of node, we may get
// different amounts of 'data' events. node 0.10 gives 2,
// node >= 0.12 gives 3. Either is correct, but we just
// care that we got between 1 and 3, and got the right result
expect(gotData >= 1 && gotData <= 3).to.equal(true);
expect(str).to.equal('pache');
client.close(done);
});
});
readStream.pipe(uploadStream);
});
}
});
it('should emit close after all chunks are received', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect((err, client) => {
expect(err).to.not.exist;
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, {
bucketName: 'gridfsdownload',
chunkSizeBytes: 6000
});
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('teststart.dat');
uploadStream.once('finish', function () {
const downloadStream = bucket.openDownloadStreamByName('teststart.dat');
const events = [];
downloadStream.on('data', () => events.push('data'));
downloadStream.on('close', () => events.push('close'));
downloadStream.on('end', () => {
expect(events).to.eql(['data', 'data', 'close']);
client.close(done);
});
});
readStream.pipe(uploadStream);
});
}
});
/**
* Deleting a file from GridFS
*
* @example-class GridFSBucket
* @example-method delete
*/
it('Deleting a file', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
const CHUNKS_COLL = 'gridfsdownload.chunks';
const FILES_COLL = 'gridfsdownload.files';
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
const id = uploadStream.id;
uploadStream.once('finish', function () {
bucket.delete(id, function (err) {
expect(err).to.not.exist;
const chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
chunksQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(0);
const filesQuery = db.collection(FILES_COLL).find({ _id: id });
filesQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(0);
client.close(done);
});
});
});
});
readStream.pipe(uploadStream);
});
}
});
/**
* Aborting an upload
*
* @example-class GridFSBucketWriteStream
* @example-method abort
*/
it('Aborting an upload', {
metadata: { requires: { topology: ['single'], node: '>12.0.0' } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsabort', chunkSizeBytes: 1 });
const CHUNKS_COLL = 'gridfsabort.chunks';
const uploadStream = bucket.openUploadStream('test.dat');
const id = uploadStream.id;
const query = { files_id: id };
uploadStream.write('a', 'utf8', function (error) {
expect(error).to.not.exist;
db.collection(CHUNKS_COLL).count(query, function (error, c) {
expect(error).to.not.exist;
expect(c).to.equal(1);
uploadStream.abort(function (error) {
expect(error).to.not.exist;
db.collection(CHUNKS_COLL).count(query, function (error, c) {
expect(error).to.not.exist;
expect(c).to.equal(0);
uploadStream.write('b', 'utf8', function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: this stream has been aborted'
);
uploadStream.end('c', 'utf8', function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: this stream has been aborted'
);
// Fail if user tries to abort an aborted stream
uploadStream.abort().then(null, function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: Cannot call abort() on a stream twice'
);
client.close(done);
});
});
});
});
});
});
});
});
}
});
/**
* Aborting an upload
*/
it('Destroy an upload', {
metadata: { requires: { topology: ['single'], node: '>12.0.0' } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsabort', chunkSizeBytes: 1 });
const CHUNKS_COLL = 'gridfsabort.chunks';
const uploadStream = bucket.openUploadStream('test.dat');
const id = uploadStream.id;
const query = { files_id: id };
uploadStream.write('a', 'utf8', function (error) {
expect(error).to.not.exist;
db.collection(CHUNKS_COLL).count(query, function (error, c) {
expect(error).to.not.exist;
expect(c).to.equal(1);
uploadStream.abort(function (error) {
expect(error).to.not.exist;
db.collection(CHUNKS_COLL).count(query, function (error, c) {
expect(error).to.not.exist;
expect(c).to.equal(0);
uploadStream.write('b', 'utf8', function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: this stream has been aborted'
);
uploadStream.end('c', 'utf8', function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: this stream has been aborted'
);
// Fail if user tries to abort an aborted stream
uploadStream.abort().then(null, function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: Cannot call abort() on a stream twice'
);
client.close(done);
});
});
});
});
});
});
});
});
}
});
/**
* Calling abort() on a GridFSBucketReadStream
*
* @example-class GridFSBucketReadStream
* @example-method abort
*/
it('Destroying a download stream', {
metadata: { requires: { topology: ['single'], apiVersion: false } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdestroy', chunkSizeBytes: 10 });
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
// Wait for stream to finish
uploadStream.once('finish', function () {
const id = uploadStream.id;
const downloadStream = bucket.openDownloadStream(id);
const finished = {};
downloadStream.on('data', function () {
expect.fail('Should be unreachable');
});
downloadStream.on('error', function () {
expect.fail('Should be unreachable');
});
downloadStream.on('end', function () {
expect(downloadStream.s.cursor).to.not.exist;
if (finished.close) {
client.close(done);
return;
}
finished.end = true;
});
downloadStream.on('close', function () {
if (finished.end) {
client.close(done);
return;
}
finished.close = true;
});
downloadStream.abort(function (error) {
expect(error).to.not.exist;
});
});
readStream.pipe(uploadStream);
});
}
});
/**
* Deleting a file from GridFS using promises
*
* @example-class GridFSBucket
* @example-method delete
*/
it('Deleting a file using promises', {
metadata: {
requires: { topology: ['single'], node: '>12.0.0', sessions: { skipLeakTests: true } }
},
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
const CHUNKS_COLL = 'gridfsdownload.chunks';
const FILES_COLL = 'gridfsdownload.files';
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
const id = uploadStream.id;
uploadStream.once('finish', function () {
bucket.delete(id).then(function () {
const chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
chunksQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(0);
const filesQuery = db.collection(FILES_COLL).find({ _id: id });
filesQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(0);
client.close(done);
});
});
});
});
readStream.pipe(uploadStream);
});
}
});
it('find()', {
metadata: { requires: { topology: ['single'], sessions: { skipLeakTests: true } } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'fs' });
// We're only making sure this doesn't throw
bucket.find({
batchSize: 1,
limit: 2,
maxTimeMS: 3,
noCursorTimeout: true,
skip: 4,
sort: { _id: 1 }
});
client.close(done);
});
}
});
/**
* Drop an entire buckets files and chunks
*
* @example-class GridFSBucket
* @example-method drop
*/
it('drop example', {
metadata: { requires: { topology: ['single'], sessions: { skipLeakTests: true } } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
const CHUNKS_COLL = 'gridfsdownload.chunks';
const FILES_COLL = 'gridfsdownload.files';
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
const id = uploadStream.id;
uploadStream.once('finish', function () {
bucket.drop(function (err) {
expect(err).to.not.exist;
const chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
chunksQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(0);
const filesQuery = db.collection(FILES_COLL).find({ _id: id });
filesQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(0);
client.close(done);
});
});
});
});
readStream.pipe(uploadStream);
});
}
});
/**
* Drop an entire buckets files and chunks using promises
*
* @example-class GridFSBucket
* @example-method drop
*/
it('drop using promises', {
metadata: { requires: { topology: ['single'], node: '>12.0.0' } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
const CHUNKS_COLL = 'gridfsdownload.chunks';
const FILES_COLL = 'gridfsdownload.files';
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
const id = uploadStream.id;
uploadStream.once('finish', function () {
bucket.drop().then(function () {
const chunksQuery = db.collection(CHUNKS_COLL).find({ files_id: id });
chunksQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(0);
const filesQuery = db.collection(FILES_COLL).find({ _id: id });
filesQuery.toArray(function (error, docs) {
expect(error).to.not.exist;
expect(docs.length).to.equal(0);
client.close(done);
});
});
});
});
readStream.pipe(uploadStream);
});
}
});
/*
* Find all associates files with a bucket
*
* @example-class GridFSBucket
* @example-method find
*/
it('find example', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload_2' });
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
uploadStream.once('finish', function () {
bucket.find({}, { batchSize: 1 }).toArray(function (err, files) {
expect(err).to.not.exist;
expect(1).to.equal(files.length);
client.close(done);
});
});
readStream.pipe(uploadStream);
});
}
});
/**
* Rename a file
*
* @example-class GridFSBucket
* @example-method rename
*/
it('rename example', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload_3' });
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
const id = uploadStream.id;
uploadStream.once('finish', function () {
// Rename the file
bucket.rename(id, 'renamed_it.dat', function (err) {
expect(err).to.not.exist;
client.close(done);
});
});
readStream.pipe(uploadStream);
});
}
});
it('download empty doc', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'fs' });
db.collection('fs.files').insertMany([{ length: 0 }], function (error, result) {
expect(error).to.not.exist;
expect(Object.keys(result.insertedIds).length).to.equal(1);
const id = result.insertedIds[0];
const stream = bucket.openDownloadStream(id);
stream.on('error', function (error) {
expect(error).to.not.exist;
});
stream.on('data', function () {
expect.fail('Should be unreachable');
});
stream.on('end', function () {
// As per spec, make sure we didn't actually fire a query
// because the document length is 0
expect(stream.s.cursor).to.not.exist;
client.close(done);
});
});
});
}
});
it('should use chunkSize for download', {
metadata: { requires: { topology: ['single'] } },
test(done) {
if (typeof stream.pipeline !== 'function') {
this.skip();
}
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfs' });
const uploadStream = bucket.openUploadStream('test');
uploadStream.end(Buffer.alloc(40 * 1024 * 1024), err => {
expect(err).to.not.exist;
const range = {
start: 35191617,
end: 35192831
};
const downloadStream = bucket.openDownloadStreamByName('test', range);
const outputStream = fs.createWriteStream('output');
stream.pipeline(downloadStream, outputStream, err => {
expect(err).to.not.exist;
client.close(() => {
fs.stat('output', (err, stats) => {
expect(err).to.not.exist;
expect(range.end - range.start).to.equal(stats.size);
done();
});
});
});
});
});
}
});
const UPLOAD_SPEC = require('../spec/gridfs/gridfs-upload.json');
UPLOAD_SPEC.tests.forEach(function (specTest) {
(function (testSpec) {
it(testSpec.description, {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
const db = client.db(configuration.db);
db.dropDatabase(function (error) {
expect(error).to.not.exist;
const bucket = new GridFSBucket(db, { bucketName: 'expected' });
const res = bucket.openUploadStream(
testSpec.act.arguments.filename,
testSpec.act.arguments.options
);
const buf = Buffer.from(testSpec.act.arguments.source.$hex, 'hex');
res.on('error', function (err) {
expect(err).to.not.exist;
});
res.on('finish', function () {
const data = testSpec.assert.data;
let num = data.length;
data.forEach(function (data) {
const collection = data.insert;
db.collection(collection)
.find({})
.toArray(function (error, docs) {
expect(data.documents.length).to.equal(docs.length);
for (let i = 0; i < docs.length; ++i) {
testResultDoc(data.documents[i], docs[i], res.id);
}
if (--num === 0) {
client.close(done);
}
});
});
});
res.write(buf);
res.end();
});
});
}
});
})(specTest);
});
const DOWNLOAD_SPEC = require('../spec/gridfs/gridfs-download.json');
DOWNLOAD_SPEC.tests.forEach(function (specTest) {
(function (testSpec) {
it(testSpec.description, {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), {
maxPoolSize: 1
});
client.connect(function (err, client) {
const db = client.db(configuration.db);
db.dropDatabase(function (err) {
expect(err).to.not.exist;
const BUCKET_NAME = 'fs';
const _runTest = function () {
const bucket = new GridFSBucket(db, { bucketName: BUCKET_NAME });
let res = Buffer.alloc(0);
const download = bucket.openDownloadStream(
EJSON.parse(JSON.stringify(testSpec.act.arguments.id), { relaxed: true })
);
download.on('data', function (chunk) {
res = Buffer.concat([res, chunk]);
});
let errorReported = false;
download.on('error', function (error) {
errorReported = true;
if (!testSpec.assert.error) {
expect.fail('Should be unreached');
// We need to abort in order to close the underlying cursor,
// and by extension the implicit session used for the cursor.
// This is only necessary if the cursor is not exhausted
download.abort();
client.close(done);
}
expect(error.toString().indexOf(testSpec.assert.error) !== -1).to.equal(true);
// We need to abort in order to close the underlying cursor,
// and by extension the implicit session used for the cursor.
// This is only necessary if the cursor is not exhausted
download.abort();
client.close(done);
});
download.on('end', function () {
const result = testSpec.assert.result;
if (!result) {
if (errorReported) {
return;
}
// We need to abort in order to close the underlying cursor,
// and by extension the implicit session used for the cursor.
// This is only necessary if the cursor is not exhausted
download.abort();
client.close(done);
expect.fail('errorReported should be set');
}
expect(res.toString('hex')).to.equal(result.$hex);
// We need to abort in order to close the underlying cursor,
// and by extension the implicit session used for the cursor.
// This is only necessary if the cursor is not exhausted
download.abort();
client.close(done);
});
};
const keys = Object.keys(DOWNLOAD_SPEC.data);
let numCollections = Object.keys(DOWNLOAD_SPEC.data).length;
keys.forEach(function (collection) {
const data = DOWNLOAD_SPEC.data[collection].map(function (v) {
return deflateTestDoc(v);
});
db.collection(BUCKET_NAME + '.' + collection).insertMany(data, function (error) {
expect(error).to.not.exist;
if (--numCollections === 0) {
if (testSpec.arrange) {
// only support 1 arrange op for now
expect(testSpec.arrange.data.length).to.equal(1);
applyArrange(db, deflateTestDoc(testSpec.arrange.data[0]), function (error) {
expect(error).to.not.exist;
_runTest();
});
} else {
_runTest();
}
}
});
});
});
});
}
});
})(specTest);
});
function testResultDoc(specDoc, resDoc, result) {
const specKeys = Object.keys(specDoc)
.filter(key => key !== 'md5')
.sort();
const resKeys = Object.keys(resDoc).sort();
expect(specKeys.length === resKeys.length).to.equal(true);
for (let i = 0; i < specKeys.length; ++i) {
const key = specKeys[i];
expect(specKeys[i]).to.equal(resKeys[i]);
if (specDoc[key] === '*actual') {
expect(resDoc[key]).to.exist;
} else if (specDoc[key] === '*result') {
expect(resDoc[key].toString()).to.equal(result.toString());
} else if (specDoc[key].$hex) {
expect(resDoc[key]._bsontype === 'Binary').to.equal(true);
expect(resDoc[key].toString('hex')).to.equal(specDoc[key].$hex);
} else {
if (typeof specDoc[key] === 'object') {
expect(specDoc[key]).to.deep.equal(resDoc[key]);
} else {
expect(specDoc[key]).to.equal(resDoc[key]);
}
}
}
}
function deflateTestDoc(doc) {
const ret = EJSON.parse(JSON.stringify(doc), { relaxed: true });
convert$hexToBuffer(ret);
return ret;
}
function convert$hexToBuffer(doc) {
const keys = Object.keys(doc);
keys.forEach(function (key) {
if (doc[key] && typeof doc[key] === 'object') {
if (doc[key].$hex != null) {
doc[key] = Buffer.from(doc[key].$hex, 'hex');
} else {
convert$hexToBuffer(doc[key]);
}
}
});
}
function applyArrange(db, command, callback) {
// Don't count on commands being there since we need to test on 2.2 and 2.4
if (command.delete) {
if (command.deletes.length !== 1) {
return callback(new Error('can only arrange with 1 delete'));
}
if (command.deletes[0].limit !== 1) {
return callback(new Error('can only arrange with delete limit 1'));
}
db.collection(command.delete).deleteOne(command.deletes[0].q, callback);
} else if (command.insert) {
db.collection(command.insert).insertMany(command.documents, callback);
} else if (command.update) {
const bulk = [];
for (let i = 0; i < command.updates.length; ++i) {
bulk.push({
updateOne: {
filter: command.updates[i].q,
update: command.updates[i].u
}
});
}
db.collection(command.update).bulkWrite(bulk, callback);
} else {
const msg = 'Command not recognized: ' + require('util').inspect(command);
callback(new Error(msg));
}
}
/**
* NODE-822 GridFSBucketWriteStream end method does not handle optional parameters
*/
it('should correctly handle calling end function with only a callback', {
metadata: { requires: { topology: ['single'], node: '>4.0.0' } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsabort', chunkSizeBytes: 1 });
const CHUNKS_COLL = 'gridfsabort.chunks';
const uploadStream = bucket.openUploadStream('test.dat');
const id = uploadStream.id;
const query = { files_id: id };
uploadStream.write('a', 'utf8', function (error) {
expect(error).to.not.exist;
db.collection(CHUNKS_COLL).count(query, function (error, c) {
expect(error).to.not.exist;
expect(c).to.equal(1);
uploadStream.abort(function (error) {
expect(error).to.not.exist;
db.collection(CHUNKS_COLL).count(query, function (error, c) {
expect(error).to.not.exist;
expect(c).to.equal(0);
uploadStream.write('b', 'utf8', function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: this stream has been aborted'
);
uploadStream.end(function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: this stream has been aborted'
);
// Fail if user tries to abort an aborted stream
uploadStream.abort().then(null, function (error) {
expect(error.toString()).to.equal(
'MongoDriverError: Cannot call abort() on a stream twice'
);
client.close(done);
});
});
});
});
});
});
});
});
}
});
/**
* Provide start and end parameters for file download to skip ahead x bytes and limit the total amount of bytes read to n
*
* @example-class GridFSBucket
* @example-method openDownloadStream
*/
it('NODE-829 start/end options for openDownloadStream where start-end is < size of chunk', {
metadata: { requires: { topology: ['single'] } },
test(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.writeConcernMax(), { maxPoolSize: 1 });
client.connect(function (err, client) {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, {
bucketName: 'gridfsdownload',
chunkSizeBytes: 20
});
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('teststart.dat');
uploadStream.once('finish', function () {
const downloadStream = bucket
.openDownloadStreamByName('teststart.dat', { start: 1 })
.end(6);
downloadStream.on('error', function (error) {
expect(error).to.not.exist;
});
let gotData = 0;
let str = '';
downloadStream.on('data', function (data) {
++gotData;
str += data.toString('utf8');
});
downloadStream.on('end', function () {
// Depending on different versions of node, we may get
// different amounts of 'data' events. node 0.10 gives 2,
// node >= 0.12 gives 3. Either is correct, but we just
// care that we got between 1 and 3, and got the right result
expect(gotData >= 1 && gotData <= 3).to.equal(true);
expect(str).to.equal('pache');
client.close(done);
});
});
readStream.pipe(uploadStream);
});
}
});
it('should correctly handle indexes create with BSON.Double', function (done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
const db = client.db(configuration.db);
const col = db.collection('fs.files');
col.createIndex({ filename: new Double(1.0), uploadDate: new Double(1.0) }, err => {
expect(err).to.not.exist;
col.listIndexes().toArray((err, indexes) => {
expect(err).to.not.exist;
const names = indexes.map(i => i.name);
expect(names).to.eql(['_id_', 'filename_1_uploadDate_1']);
client.close();
done();
});
});
});
});
it('NODE-2623 downloadStream should emit error on end > size', function () {
const configuration = this.configuration;
return withClient.bind(this)((client, done) => {
const db = client.db(configuration.db);
const bucket = new GridFSBucket(db, { bucketName: 'gridfsdownload' });
const readStream = fs.createReadStream('./LICENSE.md');
const uploadStream = bucket.openUploadStream('test.dat');
const actualSize = fs.fstatSync(fs.openSync('./LICENSE.md', 'r')).size;
const wrongExpectedSize = Math.floor(actualSize * 1.1);
const id = uploadStream.id;
uploadStream.once('finish', function () {
const downloadStream = bucket.openDownloadStream(id, { end: wrongExpectedSize });
downloadStream.on('data', function () {});
downloadStream.on('error', function (err) {
expect(err.message).to.equal(
`Stream end (${wrongExpectedSize}) must not be more than the length of the file (${actualSize})`
);
done();
});
});
readStream.pipe(uploadStream);
});
});
});
| 1 | 20,706 | You can remove these if they've been resolved in NODE-3405 and this isn't depending on it | mongodb-node-mongodb-native | js |
@@ -1,7 +1,7 @@
<%# locals: { phase } %>
<div class="row" id="<%= dom_id(phase) %>">
<div class="col-xs-12">
- <h4>
+ <h4 class=''>
<%= _('Instructions') %>
<a href="<%= edit_plan_path(plan_id, phase_id: phase.id) %>" class="btn btn-default pull-right"><%= _('Write plan') %></a>
</h4> | 1 | <%# locals: { phase } %>
<div class="row" id="<%= dom_id(phase) %>">
<div class="col-xs-12">
<h4>
<%= _('Instructions') %>
<a href="<%= edit_plan_path(plan_id, phase_id: phase.id) %>" class="btn btn-default pull-right"><%= _('Write plan') %></a>
</h4>
<p class="text-justify">
<div class="display-readonly-textarea-content">
<%= sanitize(phase.description) %>
</div>
</p>
</div>
<div class="col-xs-12">
<ul class="list-unstyled">
<% phase.sections.order(:number).each do |s| %>
<li class="mb-5">
<%= s.title %>
<ul>
<% s.questions.each do |q| %>
<li>
<div class="display-readonly-textarea-content">
<%= sanitize(q.text) %>
</div>
</li>
<% end %>
</ul>
</li>
<% end %>
</ul>
</div>
</div>
| 1 | 18,894 | don't need the class here if its empty | DMPRoadmap-roadmap | rb |
@@ -4,11 +4,15 @@ import Ember from 'ember';
import moment from 'moment';
import { translationMacro as t } from 'ember-i18n';
+const { computed } = Ember;
+
export default AppointmentIndexRoute.extend(DateFormat, {
editReturn: 'appointments.search',
filterParams: ['appointmentType', 'provider', 'status'],
modelName: 'appointment',
- pageTitle: t('appointments.searchTitle'),
+ pageTitle: computed('i18n', () => {
+ return t('appointments.searchTitle');
+ }),
queryParams: {
appointmentType: { refreshModel: true }, | 1 | import AppointmentIndexRoute from 'hospitalrun/appointments/index/route';
import DateFormat from 'hospitalrun/mixins/date-format';
import Ember from 'ember';
import moment from 'moment';
import { translationMacro as t } from 'ember-i18n';
export default AppointmentIndexRoute.extend(DateFormat, {
editReturn: 'appointments.search',
filterParams: ['appointmentType', 'provider', 'status'],
modelName: 'appointment',
pageTitle: t('appointments.searchTitle'),
queryParams: {
appointmentType: { refreshModel: true },
provider: { refreshModel: true },
status: { refreshModel: true },
startDate: { refreshModel: true },
startKey: { refreshModel: true }
},
_modelQueryParams(params) {
let { startDate } = params;
let maxValue = this.get('maxValue');
if (Ember.isEmpty(startDate)) {
startDate = moment();
} else {
startDate = moment(parseInt(startDate));
}
let startOfDay = startDate.startOf('day').toDate().getTime();
let searchOptions = {
startkey: [startOfDay, null, 'appointment_'],
endkey: [maxValue, maxValue, `appointment_${maxValue}`]
};
return {
options: searchOptions,
mapReduce: 'appointments_by_date'
};
},
model(params) {
return this._super(params).then(function(model) {
model.setProperties({
selectedAppointmentType: params.appointmentType,
selectedProvider: params.provider,
selectedStatus: params.status
});
let { startDate } = params;
startDate = new Date();
if (!Ember.isEmpty(params.startDate)) {
startDate.setTime(params.startDate);
}
model.set('selectedStartingDate', startDate);
model.set('display_selectedStartingDate', this._dateFormat(startDate));
return model;
}.bind(this));
}
});
| 1 | 13,691 | This should be computed('i18n.locale'.... | HospitalRun-hospitalrun-frontend | js |
@@ -348,8 +348,8 @@ class BigQueryClient:
status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute(num_retries=10)
if status['status']['state'] == 'DONE':
if status['status'].get('errorResult'):
- raise Exception('BigQuery job failed: {}'.format(status['status']['errorResult']))
- return
+ raise BigQueryExecutionError(job_id, status['status']['errorResult'])
+ return job_id
logger.info('Waiting for job %s:%s to complete...', project_id, job_id)
time.sleep(5) | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import logging
import luigi.target
import time
from luigi.contrib import gcp
logger = logging.getLogger('luigi-interface')
try:
from googleapiclient import discovery
from googleapiclient import http
except ImportError:
logger.warning('BigQuery module imported, but google-api-python-client is '
'not installed. Any BigQuery task will fail')
class CreateDisposition:
CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
CREATE_NEVER = 'CREATE_NEVER'
class WriteDisposition:
WRITE_TRUNCATE = 'WRITE_TRUNCATE'
WRITE_APPEND = 'WRITE_APPEND'
WRITE_EMPTY = 'WRITE_EMPTY'
class QueryMode:
INTERACTIVE = 'INTERACTIVE'
BATCH = 'BATCH'
class SourceFormat:
AVRO = 'AVRO'
CSV = 'CSV'
DATASTORE_BACKUP = 'DATASTORE_BACKUP'
NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
class FieldDelimiter:
"""
The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character.
To use a character in the range 128-255, you must encode the character as UTF8.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary state.
BigQuery also supports the escape sequence "\t" to specify a tab separator.
The default value is a comma (',').
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
"""
COMMA = ',' # Default
TAB = "\t"
PIPE = "|"
class PrintHeader:
TRUE = True
FALSE = False
class DestinationFormat:
AVRO = 'AVRO'
CSV = 'CSV'
NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
class Compression:
GZIP = 'GZIP'
NONE = 'NONE'
class Encoding:
"""
[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.
BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
"""
UTF_8 = 'UTF-8'
ISO_8859_1 = 'ISO-8859-1'
BQDataset = collections.namedtuple('BQDataset', 'project_id dataset_id location')
class BQTable(collections.namedtuple('BQTable', 'project_id dataset_id table_id location')):
@property
def dataset(self):
return BQDataset(project_id=self.project_id, dataset_id=self.dataset_id, location=self.location)
@property
def uri(self):
return "bq://" + self.project_id + "/" + \
self.dataset.dataset_id + "/" + self.table_id
class BigQueryClient:
"""A client for Google BigQuery.
For details of how authentication and the descriptor work, see the
documentation for the GCS client. The descriptor URL for BigQuery is
https://www.googleapis.com/discovery/v1/apis/bigquery/v2/rest
"""
def __init__(self, oauth_credentials=None, descriptor='', http_=None):
authenticate_kwargs = gcp.get_authenticate_kwargs(oauth_credentials, http_)
if descriptor:
self.client = discovery.build_from_document(descriptor, **authenticate_kwargs)
else:
self.client = discovery.build('bigquery', 'v2', cache_discovery=False, **authenticate_kwargs)
def dataset_exists(self, dataset):
"""Returns whether the given dataset exists.
If regional location is specified for the dataset, that is also checked
to be compatible with the remote dataset, otherwise an exception is thrown.
:param dataset:
:type dataset: BQDataset
"""
try:
response = self.client.datasets().get(projectId=dataset.project_id,
datasetId=dataset.dataset_id).execute()
if dataset.location is not None:
fetched_location = response.get('location')
if dataset.location != fetched_location:
raise Exception('''Dataset already exists with regional location {}. Can't use {}.'''.format(
fetched_location if fetched_location is not None else 'unspecified',
dataset.location))
except http.HttpError as ex:
if ex.resp.status == 404:
return False
raise
return True
def table_exists(self, table):
"""Returns whether the given table exists.
:param table:
:type table: BQTable
"""
if not self.dataset_exists(table.dataset):
return False
try:
self.client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id).execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return False
raise
return True
def make_dataset(self, dataset, raise_if_exists=False, body=None):
"""Creates a new dataset with the default permissions.
:param dataset:
:type dataset: BQDataset
:param raise_if_exists: whether to raise an exception if the dataset already exists.
:raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists
"""
if body is None:
body = {}
try:
# Construct a message body in the format required by
# https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.datasets.html#insert
body['datasetReference'] = {
'projectId': dataset.project_id,
'datasetId': dataset.dataset_id
}
if dataset.location is not None:
body['location'] = dataset.location
self.client.datasets().insert(projectId=dataset.project_id, body=body).execute()
except http.HttpError as ex:
if ex.resp.status == 409:
if raise_if_exists:
raise luigi.target.FileAlreadyExists()
else:
raise
def delete_dataset(self, dataset, delete_nonempty=True):
"""Deletes a dataset (and optionally any tables in it), if it exists.
:param dataset:
:type dataset: BQDataset
:param delete_nonempty: if true, will delete any tables before deleting the dataset
"""
if not self.dataset_exists(dataset):
return
self.client.datasets().delete(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
deleteContents=delete_nonempty).execute()
def delete_table(self, table):
"""Deletes a table, if it exists.
:param table:
:type table: BQTable
"""
if not self.table_exists(table):
return
self.client.tables().delete(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id).execute()
def list_datasets(self, project_id):
"""Returns the list of datasets in a given project.
:param project_id:
:type project_id: str
"""
request = self.client.datasets().list(projectId=project_id,
maxResults=1000)
response = request.execute()
while response is not None:
for ds in response.get('datasets', []):
yield ds['datasetReference']['datasetId']
request = self.client.datasets().list_next(request, response)
if request is None:
break
response = request.execute()
def list_tables(self, dataset):
"""Returns the list of tables in a given dataset.
:param dataset:
:type dataset: BQDataset
"""
request = self.client.tables().list(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
maxResults=1000)
response = request.execute()
while response is not None:
for t in response.get('tables', []):
yield t['tableReference']['tableId']
request = self.client.tables().list_next(request, response)
if request is None:
break
response = request.execute()
def get_view(self, table):
"""Returns the SQL query for a view, or None if it doesn't exist or is not a view.
:param table: The table containing the view.
:type table: BQTable
"""
request = self.client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id)
try:
response = request.execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return None
raise
return response['view']['query'] if 'view' in response else None
def update_view(self, table, view):
"""Updates the SQL query for a view.
If the output table exists, it is replaced with the supplied view query. Otherwise a new
table is created with this view.
:param table: The table to contain the view.
:type table: BQTable
:param view: The SQL query for the view.
:type view: str
"""
body = {
'tableReference': {
'projectId': table.project_id,
'datasetId': table.dataset_id,
'tableId': table.table_id
},
'view': {
'query': view
}
}
if self.table_exists(table):
self.client.tables().update(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id,
body=body).execute()
else:
self.client.tables().insert(projectId=table.project_id,
datasetId=table.dataset_id,
body=body).execute()
def run_job(self, project_id, body, dataset=None):
"""Runs a BigQuery "job". See the documentation for the format of body.
.. note::
You probably don't need to use this directly. Use the tasks defined below.
:param dataset:
:type dataset: BQDataset
"""
if dataset and not self.dataset_exists(dataset):
self.make_dataset(dataset)
new_job = self.client.jobs().insert(projectId=project_id, body=body).execute()
job_id = new_job['jobReference']['jobId']
logger.info('Started import job %s:%s', project_id, job_id)
while True:
status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute(num_retries=10)
if status['status']['state'] == 'DONE':
if status['status'].get('errorResult'):
raise Exception('BigQuery job failed: {}'.format(status['status']['errorResult']))
return
logger.info('Waiting for job %s:%s to complete...', project_id, job_id)
time.sleep(5)
def copy(self,
source_table,
dest_table,
create_disposition=CreateDisposition.CREATE_IF_NEEDED,
write_disposition=WriteDisposition.WRITE_TRUNCATE):
"""Copies (or appends) a table to another table.
:param source_table:
:type source_table: BQTable
:param dest_table:
:type dest_table: BQTable
:param create_disposition: whether to create the table if needed
:type create_disposition: CreateDisposition
:param write_disposition: whether to append/truncate/fail if the table exists
:type write_disposition: WriteDisposition
"""
job = {
"configuration": {
"copy": {
"sourceTable": {
"projectId": source_table.project_id,
"datasetId": source_table.dataset_id,
"tableId": source_table.table_id,
},
"destinationTable": {
"projectId": dest_table.project_id,
"datasetId": dest_table.dataset_id,
"tableId": dest_table.table_id,
},
"createDisposition": create_disposition,
"writeDisposition": write_disposition,
}
}
}
self.run_job(dest_table.project_id, job, dataset=dest_table.dataset)
class BigQueryTarget(luigi.target.Target):
def __init__(self, project_id, dataset_id, table_id, client=None, location=None):
self.table = BQTable(project_id=project_id, dataset_id=dataset_id, table_id=table_id, location=location)
self.client = client or BigQueryClient()
@classmethod
def from_bqtable(cls, table, client=None):
"""A constructor that takes a :py:class:`BQTable`.
:param table:
:type table: BQTable
"""
return cls(table.project_id, table.dataset_id, table.table_id, client=client)
def exists(self):
return self.client.table_exists(self.table)
def __str__(self):
return str(self.table)
class MixinBigQueryBulkComplete:
"""
Allows to efficiently check if a range of BigQueryTargets are complete.
This enables scheduling tasks with luigi range tools.
If you implement a custom Luigi task with a BigQueryTarget output, make sure to also inherit
from this mixin to enable range support.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
# Instantiate the tasks to inspect them
tasks_with_params = [(cls(p), p) for p in parameter_tuples]
if not tasks_with_params:
return
# Grab the set of BigQuery datasets we are interested in
datasets = {t.output().table.dataset for t, p in tasks_with_params}
logger.info('Checking datasets %s for available tables', datasets)
# Query the available tables for all datasets
client = tasks_with_params[0][0].output().client
available_datasets = filter(client.dataset_exists, datasets)
available_tables = {d: set(client.list_tables(d)) for d in available_datasets}
# Return parameter_tuples belonging to available tables
for t, p in tasks_with_params:
table = t.output().table
if table.table_id in available_tables.get(table.dataset, []):
yield p
class BigQueryLoadTask(MixinBigQueryBulkComplete, luigi.Task):
"""Load data into BigQuery from GCS."""
@property
def source_format(self):
"""The source format to use (see :py:class:`SourceFormat`)."""
return SourceFormat.NEWLINE_DELIMITED_JSON
@property
def encoding(self):
"""The encoding of the data that is going to be loaded (see :py:class:`Encoding`)."""
return Encoding.UTF_8
@property
def write_disposition(self):
"""What to do if the table already exists. By default this will fail the job.
See :py:class:`WriteDisposition`"""
return WriteDisposition.WRITE_EMPTY
@property
def schema(self):
"""Schema in the format defined at https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.schema.
If the value is falsy, it is omitted and inferred by BigQuery."""
return []
@property
def max_bad_records(self):
""" The maximum number of bad records that BigQuery can ignore when reading data.
If the number of bad records exceeds this value, an invalid error is returned in the job result."""
return 0
@property
def field_delimiter(self):
"""The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character."""
return FieldDelimiter.COMMA
def source_uris(self):
"""The fully-qualified URIs that point to your data in Google Cloud Storage.
Each URI can contain one '*' wildcard character and it must come after the 'bucket' name."""
return [x.path for x in luigi.task.flatten(self.input())]
@property
def skip_leading_rows(self):
"""The number of rows at the top of a CSV file that BigQuery will skip when loading the data.
The default value is 0. This property is useful if you have header rows in the file that should be skipped."""
return 0
@property
def allow_jagged_rows(self):
"""Accept rows that are missing trailing optional columns. The missing values are treated as nulls.
If false, records with missing trailing columns are treated as bad records, and if there are too many bad records,
an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats."""
return False
@property
def ignore_unknown_values(self):
"""Indicates if BigQuery should allow extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns are treated as bad records,
and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
The sourceFormat property determines what BigQuery treats as an extra value:
CSV: Trailing columns JSON: Named values that don't match any column names"""
return False
@property
def allow_quoted_new_lines(self):
""" Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."""
return False
def run(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output)
bq_client = output.client
source_uris = self.source_uris()
assert all(x.startswith('gs://') for x in source_uris)
job = {
'configuration': {
'load': {
'destinationTable': {
'projectId': output.table.project_id,
'datasetId': output.table.dataset_id,
'tableId': output.table.table_id,
},
'encoding': self.encoding,
'sourceFormat': self.source_format,
'writeDisposition': self.write_disposition,
'sourceUris': source_uris,
'maxBadRecords': self.max_bad_records,
'ignoreUnknownValues': self.ignore_unknown_values
}
}
}
if self.source_format == SourceFormat.CSV:
job['configuration']['load']['fieldDelimiter'] = self.field_delimiter
job['configuration']['load']['skipLeadingRows'] = self.skip_leading_rows
job['configuration']['load']['allowJaggedRows'] = self.allow_jagged_rows
job['configuration']['load']['allowQuotedNewlines'] = self.allow_quoted_new_lines
if self.schema:
job['configuration']['load']['schema'] = {'fields': self.schema}
else:
job['configuration']['load']['autodetect'] = True
bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset)
class BigQueryRunQueryTask(MixinBigQueryBulkComplete, luigi.Task):
@property
def write_disposition(self):
"""What to do if the table already exists. By default this will fail the job.
See :py:class:`WriteDisposition`"""
return WriteDisposition.WRITE_TRUNCATE
@property
def create_disposition(self):
"""Whether to create the table or not. See :py:class:`CreateDisposition`"""
return CreateDisposition.CREATE_IF_NEEDED
@property
def flatten_results(self):
"""Flattens all nested and repeated fields in the query results.
allowLargeResults must be true if this is set to False."""
return True
@property
def query(self):
"""The query, in text form."""
raise NotImplementedError()
@property
def query_mode(self):
"""The query mode. See :py:class:`QueryMode`."""
return QueryMode.INTERACTIVE
@property
def udf_resource_uris(self):
"""Iterator of code resource to load from a Google Cloud Storage URI (gs://bucket/path).
"""
return []
@property
def use_legacy_sql(self):
"""Whether to use legacy SQL
"""
return True
def run(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output)
query = self.query
assert query, 'No query was provided'
bq_client = output.client
logger.info('Launching Query')
logger.info('Query destination: %s (%s)', output, self.write_disposition)
logger.info('Query SQL: %s', query)
job = {
'configuration': {
'query': {
'query': query,
'priority': self.query_mode,
'destinationTable': {
'projectId': output.table.project_id,
'datasetId': output.table.dataset_id,
'tableId': output.table.table_id,
},
'allowLargeResults': True,
'createDisposition': self.create_disposition,
'writeDisposition': self.write_disposition,
'flattenResults': self.flatten_results,
'userDefinedFunctionResources': [{"resourceUri": v} for v in self.udf_resource_uris],
'useLegacySql': self.use_legacy_sql,
}
}
}
bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset)
class BigQueryCreateViewTask(luigi.Task):
"""
Creates (or updates) a view in BigQuery.
The output of this task needs to be a BigQueryTarget.
Instances of this class should specify the view SQL in the view property.
If a view already exist in BigQuery at output(), it will be updated.
"""
@property
def view(self):
"""The SQL query for the view, in text form."""
raise NotImplementedError()
def complete(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output)
if not output.exists():
return False
existing_view = output.client.get_view(output.table)
return existing_view == self.view
def run(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output)
view = self.view
assert view, 'No view was provided'
logger.info('Create view')
logger.info('Destination: %s', output)
logger.info('View SQL: %s', view)
output.client.update_view(output.table, view)
class ExternalBigQueryTask(MixinBigQueryBulkComplete, luigi.ExternalTask):
"""
An external task for a BigQuery target.
"""
pass
class BigQueryExtractTask(luigi.Task):
"""
Extracts (unloads) a table from BigQuery to GCS.
This tasks requires the input to be exactly one BigQueryTarget while the
output should be one or more GCSTargets from luigi.contrib.gcs depending on
the use of destinationUris property.
"""
@property
def destination_uris(self):
"""
The fully-qualified URIs that point to your data in Google Cloud
Storage. Each URI can contain one '*' wildcard character and it must
come after the 'bucket' name.
Wildcarded destinationUris in GCSQueryTarget might not be resolved
correctly and result in incomplete data. If a GCSQueryTarget is used to
pass wildcarded destinationUris be sure to overwrite this property to
suppress the warning.
"""
return [x.path for x in luigi.task.flatten(self.output())]
@property
def print_header(self):
"""Whether to print the header or not."""
return PrintHeader.TRUE
@property
def field_delimiter(self):
"""
The separator for fields in a CSV file. The separator can be any
ISO-8859-1 single-byte character.
"""
return FieldDelimiter.COMMA
@property
def destination_format(self):
"""
The destination format to use (see :py:class:`DestinationFormat`).
"""
return DestinationFormat.CSV
@property
def compression(self):
"""Whether to use compression."""
return Compression.NONE
def run(self):
input = luigi.task.flatten(self.input())[0]
assert (
isinstance(input, BigQueryTarget) or
(len(input) == 1 and isinstance(input[0], BigQueryTarget))), \
'Input must be exactly one BigQueryTarget, not %s' % (input)
bq_client = input.client
destination_uris = self.destination_uris
assert all(x.startswith('gs://') for x in destination_uris)
logger.info('Launching Extract Job')
logger.info('Extract source: %s', input)
logger.info('Extract destination: %s', destination_uris)
job = {
'configuration': {
'extract': {
'sourceTable': {
'projectId': input.table.project_id,
'datasetId': input.table.dataset_id,
'tableId': input.table.table_id
},
'destinationUris': destination_uris,
'destinationFormat': self.destination_format,
'compression': self.compression
}
}
}
if self.destination_format == 'CSV':
# "Only exports to CSV may specify a field delimiter."
job['configuration']['extract']['printHeader'] = self.print_header
job['configuration']['extract']['fieldDelimiter'] = \
self.field_delimiter
bq_client.run_job(
input.table.project_id,
job,
dataset=input.table.dataset)
# the original inconsistently capitalized aliases, for backwards compatibility
BigqueryClient = BigQueryClient
BigqueryTarget = BigQueryTarget
MixinBigqueryBulkComplete = MixinBigQueryBulkComplete
BigqueryLoadTask = BigQueryLoadTask
BigqueryRunQueryTask = BigQueryRunQueryTask
BigqueryCreateViewTask = BigQueryCreateViewTask
ExternalBigqueryTask = ExternalBigQueryTask
| 1 | 19,865 | Please add return and return type description to docstring of this method. | spotify-luigi | py |
@@ -51,8 +51,10 @@ class ContributionsController < ApplicationController
private
def set_contributor
- @contributor = ContributorFact.where(names: { id: params[:id] }).where(analysis_id: @project.best_analysis_id)
- .eager_load(:name).first
+ id = params[:id].to_i
+ @contributor = Contribution.find(id).name_fact if id > (1 << 32)
+ @contributor ||= ContributorFact.where(names: { id: id }).where(analysis_id: @project.best_analysis_id)
+ .eager_load(:name).first
fail ParamRecordNotFound unless @contributor
end
| 1 | class ContributionsController < ApplicationController
COMMITS_SPARK_IMAGE = 'app/assets/images/bot_stuff/contribution_commits_spark.png'
COMMITS_COMPOUND_SPARK_IMAGE = 'app/assets/images/bot_stuff/position_commits_compound_spark.png'
helper :kudos, :projects
helper MapHelper
before_action :set_project_or_fail, if: -> { params[:project_id] }
before_action :set_contribution, except: [:commits_spark, :commits_compound_spark, :index, :summary, :near]
before_action :set_contributor, only: [:commits_spark, :commits_compound_spark]
before_action :send_sample_image_if_bot, if: :bot?, only: [:commits_spark, :commits_compound_spark]
before_action :project_context, only: [:index, :show, :summary]
skip_before_action :store_location, only: [:commits_spark, :commits_compound_spark]
def index
fail ParamRecordNotFound unless @project
@contributions = @project.contributions
.sort(params[:sort])
.filter_by(params[:query])
.includes(person: :account, contributor_fact: :primary_language)
.references(:all)
.paginate(per_page: 20, page: page_param)
end
def show
fail ParamRecordNotFound unless @project
return redirect_to project_contributor_path(@project, @contribution) if @contribution.id != params[:id].to_i
@recent_kudos = @contribution.kudoable.recent_kudos || []
end
def summary
@newest_contributions = @project.newest_contributions
@top_contributions = @project.top_contributions
@analysis = @project.best_analysis
end
def near
render text: view_context.map_near_contributors_json(@project, params)
end
def commits_spark
spark_image = Spark::SimpleSpark.new(@contributor.monthly_commits, max_value: 50).render.to_blob
send_data spark_image, type: 'image/png', filename: 'commits.png', disposition: 'inline'
end
def commits_compound_spark
spark_image = Spark::CompoundSpark.new(@contributor.monthly_commits(11), max_value: 50).render.to_blob
send_data spark_image, type: 'image/png', filename: 'commits.png', disposition: 'inline'
end
private
def set_contributor
@contributor = ContributorFact.where(names: { id: params[:id] }).where(analysis_id: @project.best_analysis_id)
.eager_load(:name).first
fail ParamRecordNotFound unless @contributor
end
def send_sample_image_if_bot
image_path = "#{Rails.root}/#{self.class.const_get(action_name.upcase + '_IMAGE')}"
send_file image_path, filename: 'commits.png', type: 'image/png', disposition: 'inline'
end
def set_contribution
@contribution = @project.contributions.find_by(id: params[:id].to_i)
# It's possible that the contributor we are looking for has been aliased to a new name.
# Redirect to the new name if we can find it.
@contribution ||= Contribution.find_indirectly(contribution_id: params[:id].to_i, project: @project)
fail ParamRecordNotFound unless @contribution
end
end
| 1 | 8,003 | What does 1 << 32 do? In irb typing this yields 4294967296. I didn't find the append operator in Numeric or Integer. | blackducksoftware-ohloh-ui | rb |
@@ -79,6 +79,16 @@ module RSpec
RSpec.configuration.format_docstrings_block.call(description)
end
+ # @attr_accessor
+ #
+ # Holds the completion status of the example (nil if not completed)
+ attr_accessor :succeeded
+
+ # Convenience method for getting success status of example
+ def succeeded?
+ @succeeded
+ end
+
# @attr_reader
#
# Returns the first exception raised in the context of running this | 1 | module RSpec
module Core
# Wrapper for an instance of a subclass of {ExampleGroup}. An instance of
# `RSpec::Core::Example` is returned by example definition methods
# such as {ExampleGroup.it it} and is yielded to the {ExampleGroup.it it},
# {Hooks#before before}, {Hooks#after after}, {Hooks#around around},
# {MemoizedHelpers::ClassMethods#let let} and
# {MemoizedHelpers::ClassMethods#subject subject} blocks.
#
# This allows us to provide rich metadata about each individual
# example without adding tons of methods directly to the ExampleGroup
# that users may inadvertantly redefine.
#
# Useful for configuring logging and/or taking some action based
# on the state of an example's metadata.
#
# @example
#
# RSpec.configure do |config|
# config.before do |example|
# log example.description
# end
#
# config.after do |example|
# log example.description
# end
#
# config.around do |example|
# log example.description
# example.run
# end
# end
#
# shared_examples "auditable" do
# it "does something" do
# log "#{example.full_description}: #{auditable.inspect}"
# auditable.should do_something
# end
# end
#
# @see ExampleGroup
# @note Example blocks are evaluated in the context of an instance
# of an `ExampleGroup`, not in the context of an instance of `Example`.
class Example
# @private
#
# Used to define methods that delegate to this example's metadata
def self.delegate_to_metadata(key)
define_method(key) { @metadata[key] }
end
# @return [ExecutionResult] represents the result of running this example.
delegate_to_metadata :execution_result
# @return [String] the relative path to the file where this example was defined.
delegate_to_metadata :file_path
# @return [String] the full description (including the docstrings of
# all parent example groups).
delegate_to_metadata :full_description
# @return [String] the exact source location of this example in a form
# like `./path/to/spec.rb:17`
delegate_to_metadata :location
# @return [Boolean] flag that indicates that the example is not expected to pass.
# It will be run and will either have a pending result (if a failure occurs)
# or a failed result (if no failure occurs).
delegate_to_metadata :pending
# @return [Boolean] flag that will cause the example to not run.
# The {ExecutionResult} status will be `:pending`.
delegate_to_metadata :skip
# Returns the string submitted to `example` or its aliases (e.g.
# `specify`, `it`, etc). If no string is submitted (e.g. `it { is_expected.to
# do_something }`) it returns the message generated by the matcher if
# there is one, otherwise returns a message including the location of the
# example.
def description
description = metadata[:description].to_s.empty? ?
"example at #{location}" :
metadata[:description]
RSpec.configuration.format_docstrings_block.call(description)
end
# @attr_reader
#
# Returns the first exception raised in the context of running this
# example (nil if no exception is raised)
attr_reader :exception
# @attr_reader
#
# Returns the metadata object associated with this example.
attr_reader :metadata
# @attr_reader
# @private
#
# Returns the example_group_instance that provides the context for
# running this example.
attr_reader :example_group_instance
# @attr
# @private
attr_accessor :clock
# Creates a new instance of Example.
# @param example_group_class [Class] the subclass of ExampleGroup in which this Example is declared
# @param description [String] the String passed to the `it` method (or alias)
# @param user_metadata [Hash] additional args passed to `it` to be used as metadata
# @param example_block [Proc] the block of code that represents the example
# @api private
def initialize(example_group_class, description, user_metadata, example_block=nil)
@example_group_class = example_group_class
@example_block = example_block
@metadata = Metadata::ExampleHash.create(
@example_group_class.metadata, user_metadata, description, example_block
)
@example_group_instance = @exception = nil
@clock = RSpec::Core::Time
end
# Returns the example group class that provides the context for running
# this example.
def example_group
@example_group_class
end
alias_method :pending?, :pending
alias_method :skipped?, :skip
# @api private
# instance_execs the block passed to the constructor in the context of
# the instance of {ExampleGroup}.
# @param example_group_instance the instance of an ExampleGroup subclass
def run(example_group_instance, reporter)
@example_group_instance = example_group_instance
RSpec.current_example = self
start(reporter)
begin
if skipped?
Pending.mark_pending! self, skip
elsif !RSpec.configuration.dry_run?
with_around_example_hooks do
begin
run_before_example
@example_group_instance.instance_exec(self, &@example_block)
if pending?
Pending.mark_fixed! self
raise Pending::PendingExampleFixedError,
'Expected example to fail since it is pending, but it passed.',
[location]
end
rescue Pending::SkipDeclaredInExample
# no-op, required metadata has already been set by the `skip`
# method.
rescue Exception => e
set_exception(e)
ensure
run_after_example
end
end
end
rescue Exception => e
set_exception(e)
ensure
@example_group_instance.instance_variables.each do |ivar|
@example_group_instance.instance_variable_set(ivar, nil)
end
@example_group_instance = nil
end
finish(reporter)
ensure
RSpec.current_example = nil
end
# Wraps both a `Proc` and an {Example} for use in {Hooks#around
# around} hooks. In around hooks we need to yield this special
# kind of object (rather than the raw {Example}) because when
# there are multiple `around` hooks we have to wrap them recursively.
#
# @example
#
# RSpec.configure do |c|
# c.around do |ex| # Procsy which wraps the example
# if ex.metadata[:key] == :some_value && some_global_condition
# raise "some message"
# end
# ex.run # run delegates to ex.call
# end
# end
#
# @note This class also exposes the instance methods of {Example},
# proxying them through to the wrapped {Example} instance.
class Procsy
# The {Example} instance.
attr_reader :example
Example.public_instance_methods(false).each do |name|
define_method(name) { |*a, &b| @example.__send__(name, *a, &b) }
end
Proc.public_instance_methods(false).each do |name|
define_method(name) { |*a, &b| @proc.__send__(name, *a, &b) }
end
alias run call
def initialize(example, &block)
@example = example
@proc = block
end
# @private
def wrap(&block)
self.class.new(example, &block)
end
end
# @private
def any_apply?(filters)
MetadataFilter.any_apply?(filters, metadata)
end
# @private
def all_apply?(filters)
MetadataFilter.all_apply?(filters, metadata) || @example_group_class.all_apply?(filters)
end
# @private
def around_example_hooks
@around_example_hooks ||= example_group.hooks.around_example_hooks_for(self)
end
# @private
#
# Used internally to set an exception in an after hook, which
# captures the exception but doesn't raise it.
def set_exception(exception, context=nil)
if pending? && !(Pending::PendingExampleFixedError === exception)
execution_result.pending_exception = exception
else
if @exception
# An error has already been set; we don't want to override it,
# but we also don't want silence the error, so let's print it.
msg = <<-EOS
An error occurred #{context}
#{exception.class}: #{exception.message}
occurred at #{exception.backtrace.first}
EOS
RSpec.configuration.reporter.message(msg)
end
@exception ||= exception
end
end
# @private
#
# Used internally to set an exception and fail without actually executing
# the example when an exception is raised in before(:context).
def fail_with_exception(reporter, exception)
start(reporter)
set_exception(exception)
finish(reporter)
end
# @private
#
# Used internally to skip without actually executing the example when
# skip is used in before(:context)
def skip_with_exception(reporter, exception)
start(reporter)
Pending.mark_skipped! self, exception.argument
finish(reporter)
end
# @private
def instance_exec_with_rescue(context, &block)
@example_group_instance.instance_exec(self, &block)
rescue Exception => e
set_exception(e, context)
end
# @private
def instance_exec(*args, &block)
@example_group_instance.instance_exec(*args, &block)
end
private
def with_around_example_hooks(&block)
if around_example_hooks.empty?
yield
else
@example_group_class.hooks.run(:around, :example, self, Procsy.new(self, &block))
end
rescue Exception => e
set_exception(e, "in an `around(:example)` hook")
end
def start(reporter)
reporter.example_started(self)
execution_result.started_at = clock.now
end
def finish(reporter)
pending_message = execution_result.pending_message
if @exception
record_finished :failed
execution_result.exception = @exception
reporter.example_failed self
false
elsif pending_message
record_finished :pending
execution_result.pending_message = pending_message
reporter.example_pending self
true
else
record_finished :passed
reporter.example_passed self
true
end
end
def record_finished(status)
execution_result.record_finished(status, clock.now)
end
def run_before_example
@example_group_instance.setup_mocks_for_rspec
@example_group_class.hooks.run(:before, :example, self)
end
def run_after_example
@example_group_class.hooks.run(:after, :example, self)
verify_mocks
assign_generated_description if RSpec.configuration.expecting_with_rspec?
rescue Exception => e
set_exception(e, "in an `after(:example)` hook")
ensure
@example_group_instance.teardown_mocks_for_rspec
end
def verify_mocks
@example_group_instance.verify_mocks_for_rspec if mocks_need_verification?
rescue Exception => e
if pending?
execution_result.pending_fixed = false
@exception = nil
else
set_exception(e)
end
end
def mocks_need_verification?
exception.nil? || execution_result.pending_fixed?
end
def assign_generated_description
if metadata[:description].empty? && (description = RSpec::Matchers.generated_description)
metadata[:description] = description
metadata[:full_description] << description
end
rescue Exception => e
set_exception(e, "while assigning the example description")
ensure
RSpec::Matchers.clear_generated_description
end
def skip_message
if String === skip
skip
else
Pending::NO_REASON_GIVEN
end
end
# Represents the result of executing an example.
# Behaves like a hash for backwards compatibility.
class ExecutionResult
include HashImitatable
# @return [Symbol] `:passed`, `:failed` or `:pending`.
attr_accessor :status
# @return [Exception, nil] The failure, if there was one.
attr_accessor :exception
# @return [Time] When the example started.
attr_accessor :started_at
# @return [Time] When the example finished.
attr_accessor :finished_at
# @return [Float] How long the example took in seconds.
attr_accessor :run_time
# @return [String, nil] The reason the example was pending,
# or nil if the example was not pending.
attr_accessor :pending_message
# @return [Exception, nil] The exception triggered while
# executing the pending example. If no exception was triggered
# it would no longer get a status of `:pending` unless it was
# tagged with `:skip`.
attr_accessor :pending_exception
# @return [Boolean] For examples tagged with `:pending`,
# this indicates whether or not it now passes.
attr_accessor :pending_fixed
alias pending_fixed? pending_fixed
# @api private
# Records the finished status of the example.
def record_finished(status, finished_at)
self.status = status
self.finished_at = finished_at
self.run_time = (finished_at - started_at).to_f
end
private
# For backwards compatibility we present `status` as a string
# when presenting the legacy hash interface.
def hash_for_delegation
super.tap do |hash|
hash[:status] &&= status.to_s
end
end
def set_value(name, value)
value &&= value.to_sym if name == :status
super(name, value)
end
def get_value(name)
if name == :status
status.to_s if status
else
super
end
end
def issue_deprecation(method_name, *args)
RSpec.deprecate("Treating `metadata[:execution_result]` as a hash",
:replacement => "the attributes methods to access the data")
end
end
end
# @private
# Provides an execution context for before/after :suite hooks.
class SuiteHookContext < Example
def initialize
super(AnonymousExampleGroup, "", {})
end
# To ensure we don't silence errors...
def set_exception(exception, context=nil)
raise exception
end
end
end
end
| 1 | 13,031 | We generally use metadata for this sort of thing, indeed there is a `metadata[:execution_result]`. | rspec-rspec-core | rb |
@@ -483,6 +483,8 @@ class SpreadPlot(ElementPlot):
style_opts = line_properties + fill_properties
_plot_methods = dict(single='patch')
+ _stream_data = False # Plot does not support streaming data
+
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y')
xvals = element.dimension_values(0) | 1 | from collections import defaultdict
import numpy as np
import param
from bokeh.models import CategoricalColorMapper, CustomJS, Whisker, Range1d
from bokeh.models.tools import BoxSelectTool
from bokeh.transform import jitter
from ...core import Dataset, OrderedDict
from ...core.util import max_range, basestring, dimension_sanitizer, isfinite
from ...element import Bars
from ...operation import interpolate_curve
from ..util import compute_sizes, get_min_distance, dim_axis_label
from .element import (ElementPlot, ColorbarPlot, LegendPlot, line_properties,
fill_properties)
from .util import expand_batched_style, categorize_array, rgb2hex, mpl_to_bokeh
class PointPlot(LegendPlot, ColorbarPlot):
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the sizes will the drawn.""")
scaling_method = param.ObjectSelector(default="area",
objects=["width", "area"],
doc="""
Determines whether the `scaling_factor` should be applied to
the width or area of each point (default: "area").""")
jitter = param.Number(default=None, bounds=(0, None), doc="""
The amount of jitter to apply to offset the points along the x-axis.""")
scaling_factor = param.Number(default=1, bounds=(0, None), doc="""
Scaling factor which is applied to either the width or area
of each point, depending on the value of `scaling_method`.""")
size_fn = param.Callable(default=np.abs, doc="""
Function applied to size values before applying scaling,
to remove values lower than zero.""")
style_opts = (['cmap', 'palette', 'marker', 'size'] +
line_properties + fill_properties)
_plot_methods = dict(single='scatter', batched='scatter')
_batched_style_opts = line_properties + fill_properties + ['size']
def _get_size_data(self, element, ranges, style):
data, mapping = {}, {}
sdim = element.get_dimension(self.size_index)
if not sdim or self.static_source:
return data, mapping
map_key = 'size_' + sdim.name
ms = style.get('size', np.sqrt(6))**2
sizes = element.dimension_values(self.size_index)
sizes = compute_sizes(sizes, self.size_fn,
self.scaling_factor,
self.scaling_method, ms)
if sizes is None:
eltype = type(element).__name__
self.warning('%s dimension is not numeric, cannot '
'use to scale %s size.' % (sdim.pprint_label, eltype))
else:
data[map_key] = np.sqrt(sizes)
mapping['size'] = map_key
return data, mapping
def get_data(self, element, ranges, style):
dims = element.dimensions(label=True)
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
mapping = dict(x=dims[xidx], y=dims[yidx])
data = {}
if not self.static_source or self.batched:
xdim, ydim = dims[xidx], dims[yidx]
data[xdim] = element.dimension_values(xidx)
data[ydim] = element.dimension_values(yidx)
self._categorize_data(data, (xdim, ydim), element.dimensions())
cdata, cmapping = self._get_color_data(element, ranges, style)
data.update(cdata)
mapping.update(cmapping)
sdata, smapping = self._get_size_data(element, ranges, style)
data.update(sdata)
mapping.update(smapping)
if self.jitter:
axrange = 'y_range' if self.invert_axes else 'x_range'
mapping['x'] = jitter(dims[xidx], self.jitter,
range=self.handles[axrange])
self._get_hover_data(data, element)
return data, mapping, style
def get_batched_data(self, element, ranges):
data = defaultdict(list)
zorders = self._updated_zorders(element)
for (key, el), zorder in zip(element.data.items(), zorders):
self.set_param(**self.lookup_options(el, 'plot').options)
style = self.lookup_options(element.last, 'style')
style = style.max_cycles(len(self.ordering))[zorder]
eldata, elmapping, style = self.get_data(el, ranges, style)
for k, eld in eldata.items():
data[k].append(eld)
# Skip if data is empty
if not eldata:
continue
# Apply static styles
nvals = len(list(eldata.values())[0])
sdata, smapping = expand_batched_style(style, self._batched_style_opts,
elmapping, nvals)
elmapping.update(smapping)
for k, v in sdata.items():
data[k].append(v)
if 'hover' in self.handles:
for dim, k in zip(element.dimensions(), key):
sanitized = dimension_sanitizer(dim.name)
data[sanitized].append([k]*nvals)
data = {k: np.concatenate(v) for k, v in data.items()}
return data, elmapping, style
class VectorFieldPlot(ColorbarPlot):
arrow_heads = param.Boolean(default=True, doc="""
Whether or not to draw arrow heads.""")
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
size_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the sizes will the drawn.""")
normalize_lengths = param.Boolean(default=True, doc="""
Whether to normalize vector magnitudes automatically. If False,
it will be assumed that the lengths have already been correctly
normalized.""")
pivot = param.ObjectSelector(default='mid', objects=['mid', 'tip', 'tail'],
doc="""
The point around which the arrows should pivot valid options
include 'mid', 'tip' and 'tail'.""")
rescale_lengths = param.Boolean(default=True, doc="""
Whether the lengths will be rescaled to take into account the
smallest non-zero distance between two vectors.""")
style_opts = line_properties + ['scale', 'cmap']
_plot_methods = dict(single='segment')
def _get_lengths(self, element, ranges):
mag_dim = element.get_dimension(self.size_index)
(x0, x1), (y0, y1) = (element.range(i) for i in range(2))
if mag_dim:
magnitudes = element.dimension_values(mag_dim)
_, max_magnitude = ranges[mag_dim.name]
if self.normalize_lengths and max_magnitude != 0:
magnitudes = magnitudes / max_magnitude
if self.rescale_lengths:
base_dist = get_min_distance(element)
magnitudes *= base_dist
else:
magnitudes = np.ones(len(element))
if self.rescale_lengths:
base_dist = get_min_distance(element)
magnitudes *= base_dist
return magnitudes
def _glyph_properties(self, *args):
properties = super(VectorFieldPlot, self)._glyph_properties(*args)
properties.pop('scale', None)
return properties
def get_data(self, element, ranges, style):
input_scale = style.pop('scale', 1.0)
# Get x, y, angle, magnitude and color data
rads = element.dimension_values(2)
if self.invert_axes:
xidx, yidx = (1, 0)
rads = rads+1.5*np.pi
else:
xidx, yidx = (0, 1)
lens = self._get_lengths(element, ranges)/input_scale
cdim = element.get_dimension(self.color_index)
cdata, cmapping = self._get_color_data(element, ranges, style,
name='line_color')
# Compute segments and arrowheads
xs = element.dimension_values(xidx)
ys = element.dimension_values(yidx)
# Compute offset depending on pivot option
xoffsets = np.cos(rads)*lens/2.
yoffsets = np.sin(rads)*lens/2.
if self.pivot == 'mid':
nxoff, pxoff = xoffsets, xoffsets
nyoff, pyoff = yoffsets, yoffsets
elif self.pivot == 'tip':
nxoff, pxoff = 0, xoffsets*2
nyoff, pyoff = 0, yoffsets*2
elif self.pivot == 'tail':
nxoff, pxoff = xoffsets*2, 0
nyoff, pyoff = yoffsets*2, 0
x0s, x1s = (xs + nxoff, xs - pxoff)
y0s, y1s = (ys + nyoff, ys - pyoff)
if self.arrow_heads:
arrow_len = (lens/4.)
xa1s = x0s - np.cos(rads+np.pi/4)*arrow_len
ya1s = y0s - np.sin(rads+np.pi/4)*arrow_len
xa2s = x0s - np.cos(rads-np.pi/4)*arrow_len
ya2s = y0s - np.sin(rads-np.pi/4)*arrow_len
x0s = np.concatenate([x0s, x0s, x0s])
x1s = np.concatenate([x1s, xa1s, xa2s])
y0s = np.concatenate([y0s, y0s, y0s])
y1s = np.concatenate([y1s, ya1s, ya2s])
if cdim:
color = cdata.get(cdim.name)
color = np.concatenate([color, color, color])
elif cdim:
color = cdata.get(cdim.name)
data = {'x0': x0s, 'x1': x1s, 'y0': y0s, 'y1': y1s}
mapping = dict(x0='x0', x1='x1', y0='y0', y1='y1')
if cdim:
data[cdim.name] = color
mapping.update(cmapping)
return (data, mapping, style)
class CurvePlot(ElementPlot):
interpolation = param.ObjectSelector(objects=['linear', 'steps-mid',
'steps-pre', 'steps-post'],
default='linear', doc="""
Defines how the samples of the Curve are interpolated,
default is 'linear', other options include 'steps-mid',
'steps-pre' and 'steps-post'.""")
style_opts = line_properties
_plot_methods = dict(single='line', batched='multi_line')
_batched_style_opts = line_properties
def get_data(self, element, ranges, style):
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
x = element.get_dimension(xidx).name
y = element.get_dimension(yidx).name
if self.static_source and not self.batched:
return {}, dict(x=x, y=y), style
if 'steps' in self.interpolation:
element = interpolate_curve(element, interpolation=self.interpolation)
data = {x: element.dimension_values(xidx),
y: element.dimension_values(yidx)}
self._get_hover_data(data, element)
self._categorize_data(data, (x, y), element.dimensions())
return (data, dict(x=x, y=y), style)
def _hover_opts(self, element):
if self.batched:
dims = list(self.hmap.last.kdims)
line_policy = 'prev'
else:
dims = list(self.overlay_dims.keys())+element.dimensions()
line_policy = 'nearest'
return dims, dict(line_policy=line_policy)
def get_batched_data(self, overlay, ranges):
data = defaultdict(list)
zorders = self._updated_zorders(overlay)
for (key, el), zorder in zip(overlay.data.items(), zorders):
self.set_param(**self.lookup_options(el, 'plot').options)
style = self.lookup_options(el, 'style')
style = style.max_cycles(len(self.ordering))[zorder]
eldata, elmapping, style = self.get_data(el, ranges, style)
# Skip if data empty
if not eldata:
continue
for k, eld in eldata.items():
data[k].append(eld)
# Apply static styles
sdata, smapping = expand_batched_style(style, self._batched_style_opts,
elmapping, nvals=1)
elmapping.update(smapping)
for k, v in sdata.items():
data[k].append(v[0])
for d, k in zip(overlay.kdims, key):
sanitized = dimension_sanitizer(d.name)
data[sanitized].append(k)
data = {opt: vals for opt, vals in data.items()
if not any(v is None for v in vals)}
mapping = {{'x': 'xs', 'y': 'ys'}.get(k, k): v
for k, v in elmapping.items()}
return data, mapping, style
class HistogramPlot(ElementPlot):
style_opts = line_properties + fill_properties
_plot_methods = dict(single='quad')
def get_data(self, element, ranges, style):
if self.invert_axes:
mapping = dict(top='right', bottom='left', left=0, right='top')
else:
mapping = dict(top='top', bottom=0, left='left', right='right')
if self.static_source:
data = dict(top=[], left=[], right=[])
else:
x = element.kdims[0]
values = element.dimension_values(1)
edges = element.interface.coords(element, x, edges=True)
data = dict(top=values, left=edges[:-1], right=edges[1:])
self._get_hover_data(data, element)
return (data, mapping, style)
def get_extents(self, element, ranges):
x0, y0, x1, y1 = super(HistogramPlot, self).get_extents(element, ranges)
ylow, yhigh = element.get_dimension(1).range
y0 = ylow if isfinite(ylow) else np.nanmin([0, y0])
y1 = yhigh if isfinite(yhigh) else np.nanmax([0, y1])
return (x0, y0, x1, y1)
class SideHistogramPlot(ColorbarPlot, HistogramPlot):
style_opts = HistogramPlot.style_opts + ['cmap']
height = param.Integer(default=125, doc="The height of the plot")
width = param.Integer(default=125, doc="The width of the plot")
show_title = param.Boolean(default=False, doc="""
Whether to display the plot title.""")
default_tools = param.List(default=['save', 'pan', 'wheel_zoom',
'box_zoom', 'reset'],
doc="A list of plugin tools to use on the plot.")
_callback = """
color_mapper.low = cb_data['geometry']['{axis}0'];
color_mapper.high = cb_data['geometry']['{axis}1'];
source.change.emit()
main_source.change.emit()
"""
def __init__(self, *args, **kwargs):
super(SideHistogramPlot, self).__init__(*args, **kwargs)
if self.invert_axes:
self.default_tools.append('ybox_select')
else:
self.default_tools.append('xbox_select')
def get_data(self, element, ranges, style):
data, mapping, style = HistogramPlot.get_data(self, element, ranges, style)
color_dims = [d for d in self.adjoined.traverse(lambda x: x.handles.get('color_dim'))
if d is not None]
dim = color_dims[0] if color_dims else None
cmapper = self._get_colormapper(dim, element, {}, {})
if cmapper and dim in element.dimensions():
data[dim.name] = [] if self.static_source else element.dimension_values(dim)
mapping['fill_color'] = {'field': dim.name,
'transform': cmapper}
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
ret = super(SideHistogramPlot, self)._init_glyph(plot, mapping, properties)
if not 'field' in mapping.get('fill_color', {}):
return ret
dim = mapping['fill_color']['field']
sources = self.adjoined.traverse(lambda x: (x.handles.get('color_dim'),
x.handles.get('source')))
sources = [src for cdim, src in sources if cdim == dim]
tools = [t for t in self.handles['plot'].tools
if isinstance(t, BoxSelectTool)]
if not tools or not sources:
return
box_select, main_source = tools[0], sources[0]
handles = {'color_mapper': self.handles['color_mapper'],
'source': self.handles['source'],
'cds': self.handles['source'],
'main_source': main_source}
axis = 'y' if self.invert_axes else 'x'
callback = self._callback.format(axis=axis)
if box_select.callback:
box_select.callback.code += callback
box_select.callback.args.update(handles)
else:
box_select.callback = CustomJS(args=handles, code=callback)
return ret
class ErrorPlot(ElementPlot):
style_opts = line_properties + ['lower_head', 'upper_head']
_mapping = dict(base="base", upper="upper", lower="lower")
_plot_methods = dict(single=Whisker)
def get_data(self, element, ranges, style):
mapping = dict(self._mapping)
if self.static_source:
return {}, mapping, style
base = element.dimension_values(0)
ys = element.dimension_values(1)
if len(element.vdims) > 2:
neg, pos = (element.dimension_values(vd) for vd in element.vdims[1:3])
lower, upper = ys-neg, ys+pos
else:
err = element.dimension_values(2)
lower, upper = ys-err, ys+err
data = dict(base=base, lower=lower, upper=upper)
if self.invert_axes:
mapping['dimension'] = 'width'
else:
mapping['dimension'] = 'height'
self._categorize_data(data, ('base',), element.dimensions())
return (data, mapping, style)
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
properties.pop('legend', None)
for prop in ['color', 'alpha']:
if prop not in properties:
continue
pval = properties.pop(prop)
line_prop = 'line_%s' % prop
fill_prop = 'fill_%s' % prop
if line_prop not in properties:
properties[line_prop] = pval
if fill_prop not in properties and fill_prop in self.style_opts:
properties[fill_prop] = pval
properties = mpl_to_bokeh(properties)
plot_method = self._plot_methods['single']
glyph = plot_method(**dict(properties, **mapping))
plot.add_layout(glyph)
return None, glyph
class SpreadPlot(ElementPlot):
style_opts = line_properties + fill_properties
_plot_methods = dict(single='patch')
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y')
xvals = element.dimension_values(0)
mean = element.dimension_values(1)
neg_error = element.dimension_values(2)
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
lower = mean - neg_error
upper = mean + pos_error
band_x = np.append(xvals, xvals[::-1])
band_y = np.append(lower, upper[::-1])
if self.invert_axes:
data = dict(x=band_y, y=band_x)
else:
data = dict(x=band_x, y=band_y)
return data, mapping, style
class AreaPlot(SpreadPlot):
_stream_data = False # Plot does not support streaming data
def get_extents(self, element, ranges):
vdims = element.vdims
vdim = vdims[0].name
if len(vdims) > 1:
ranges[vdim] = max_range([ranges[vd.name] for vd in vdims])
else:
vdim = vdims[0].name
ranges[vdim] = (np.nanmin([0, ranges[vdim][0]]), ranges[vdim][1])
return super(AreaPlot, self).get_extents(element, ranges)
def get_data(self, element, ranges, style):
mapping = dict(x='x', y='y')
xs = element.dimension_values(0)
x2 = np.hstack((xs[::-1], xs))
if len(element.vdims) > 1:
bottom = element.dimension_values(2)
else:
bottom = np.zeros(len(element))
ys = np.hstack((bottom[::-1], element.dimension_values(1)))
if self.invert_axes:
data = dict(x=ys, y=x2)
else:
data = dict(x=x2, y=ys)
return data, mapping, style
class SpikesPlot(ColorbarPlot):
color_index = param.ClassSelector(default=None, allow_None=True,
class_=(basestring, int), doc="""
Index of the dimension from which the color will the drawn""")
spike_length = param.Number(default=0.5, doc="""
The length of each spike if Spikes object is one dimensional.""")
position = param.Number(default=0., doc="""
The position of the lower end of each spike.""")
show_legend = param.Boolean(default=True, doc="""
Whether to show legend for the plot.""")
style_opts = (['color', 'cmap', 'palette'] + line_properties)
_plot_methods = dict(single='segment')
def get_extents(self, element, ranges):
l, b, r, t = super(SpikesPlot, self).get_extents(element, ranges)
if len(element.dimensions()) == 1:
if self.batched:
bs, ts = [], []
# Iterate over current NdOverlay and compute extents
# from position and length plot options
frame = self.current_frame or self.hmap.last
for el in frame.values():
opts = self.lookup_options(el, 'plot').options
pos = opts.get('position', self.position)
length = opts.get('spike_length', self.spike_length)
bs.append(pos)
ts.append(pos+length)
b = np.nanmin(bs)
t = np.nanmax(ts)
else:
b, t = self.position, self.position+self.spike_length
else:
b = np.nanmin([0, b])
t = np.nanmax([0, t])
return l, b, r, t
def get_data(self, element, ranges, style):
dims = element.dimensions(label=True)
data = {}
pos = self.position
if len(element) == 0 or self.static_source:
data = {'x': [], 'y0': [], 'y1': []}
else:
data['x'] = element.dimension_values(0)
data['y0'] = np.full(len(element), pos)
if len(dims) > 1:
data['y1'] = element.dimension_values(1)+pos
else:
data['y1'] = data['y0']+self.spike_length
if self.invert_axes:
mapping = {'x0': 'y0', 'x1': 'y1', 'y0': 'x', 'y1': 'x'}
else:
mapping = {'x0': 'x', 'x1': 'x', 'y0': 'y0', 'y1': 'y1'}
cdim = element.get_dimension(self.color_index)
if cdim:
cmapper = self._get_colormapper(cdim, element, ranges, style)
data[cdim.name] = [] if self.static_source else element.dimension_values(cdim)
mapping['color'] = {'field': cdim.name,
'transform': cmapper}
if 'hover' in self.handles and not self.static_source:
for d in dims:
data[dimension_sanitizer(d)] = element.dimension_values(d)
return data, mapping, style
class SideSpikesPlot(SpikesPlot):
"""
SpikesPlot with useful defaults for plotting adjoined rug plot.
"""
xaxis = param.ObjectSelector(default='top-bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='right-bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
border = param.Integer(default=5, doc="Default borders on plot")
height = param.Integer(default=50, doc="Height of plot")
width = param.Integer(default=50, doc="Width of plot")
class BarPlot(ColorbarPlot, LegendPlot):
"""
BarPlot allows generating single- or multi-category
bar Charts, by selecting which key dimensions are
mapped onto separate groups, categories and stacks.
"""
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
group_index = param.ClassSelector(default=1, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into groups.""")
stack_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension in the supplied Bars
Element, which will stacked.""")
style_opts = line_properties + fill_properties + ['width', 'bar_width', 'cmap']
_plot_methods = dict(single=('vbar', 'hbar'))
# Declare that y-range should auto-range if not bounded
_y_range_type = Range1d
def get_extents(self, element, ranges):
"""
Make adjustments to plot extents by computing
stacked bar heights, adjusting the bar baseline
and forcing the x-axis to be categorical.
"""
if self.batched:
overlay = self.current_frame
element = Bars(overlay.table(), kdims=element.kdims+overlay.kdims,
vdims=element.vdims)
for kd in overlay.kdims:
ranges[kd.name] = overlay.range(kd)
stacked = element.get_dimension(self.stack_index)
extents = super(BarPlot, self).get_extents(element, ranges)
xdim = element.kdims[0]
ydim = element.vdims[0]
# Compute stack heights
if stacked:
ds = Dataset(element)
pos_range = ds.select(**{ydim.name: (0, None)}).aggregate(xdim, function=np.sum).range(ydim)
neg_range = ds.select(**{ydim.name: (None, 0)}).aggregate(xdim, function=np.sum).range(ydim)
y0, y1 = max_range([pos_range, neg_range])
else:
y0, y1 = ranges[ydim.name]
# Set y-baseline
if y0 < 0:
y1 = max([y1, 0])
elif self.logy:
y0 = (ydim.range[0] or (10**(np.log10(y1)-2)) if y1 else 0.01)
else:
y0 = 0
# Ensure x-axis is picked up as categorical
x0 = xdim.pprint_value(extents[0])
x1 = xdim.pprint_value(extents[2])
return (x0, y0, x1, y1)
def _get_factors(self, element):
"""
Get factors for categorical axes.
"""
gdim = element.get_dimension(self.group_index)
if gdim not in element.kdims:
gdim = None
sdim = element.get_dimension(self.stack_index)
if sdim not in element.kdims:
sdim = None
xdim, ydim = element.dimensions()[:2]
xvals = element.dimension_values(0, False)
xvals = [x if xvals.dtype.kind in 'SU' else xdim.pprint_value(x)
for x in xvals]
if gdim and not sdim:
gvals = element.dimension_values(gdim, False)
gvals = [g if gvals.dtype.kind in 'SU' else gdim.pprint_value(g) for g in gvals]
coords = ([(x, g) for x in xvals for g in gvals], [])
else:
coords = (xvals, [])
if self.invert_axes: coords = coords[::-1]
return coords
def _get_axis_labels(self, *args, **kwargs):
"""
Override axis mapping by setting the first key and value
dimension as the x-axis and y-axis labels.
"""
element = self.current_frame
if self.batched:
element = element.last
xlabel = dim_axis_label(element.kdims[0])
gdim = element.get_dimension(self.group_index)
if gdim and gdim in element.kdims:
xlabel = ', '.join([xlabel, dim_axis_label(gdim)])
return (xlabel, dim_axis_label(element.vdims[0]), None)
def get_stack(self, xvals, yvals, baselines, sign='positive'):
"""
Iterates over a x- and y-values in a stack layer
and appropriately offsets the layer on top of the
previous layer.
"""
bottoms, tops = [], []
for x, y in zip(xvals, yvals):
baseline = baselines[x][sign]
if sign == 'positive':
bottom = baseline
top = bottom+y
baseline = top
else:
top = baseline
bottom = top+y
baseline = bottom
baselines[x][sign] = baseline
bottoms.append(bottom)
tops.append(top)
return bottoms, tops
def _glyph_properties(self, *args):
props = super(BarPlot, self)._glyph_properties(*args)
return {k: v for k, v in props.items() if k not in ['width', 'bar_width']}
def _add_color_data(self, ds, ranges, style, cdim, data, mapping, factors, colors):
# Get colormapper
cdata, cmapping = self._get_color_data(ds, ranges, dict(style),
factors=factors, colors=colors)
if 'color' not in cmapping:
return
# Enable legend if colormapper is categorical
cmapper = cmapping['color']['transform']
if ('color' in cmapping and self.show_legend and
isinstance(cmapper, CategoricalColorMapper)):
mapping['legend'] = cdim.name
# Merge data and mappings
mapping.update(cmapping)
for k, cd in cdata.items():
if isinstance(cmapper, CategoricalColorMapper) and cd.dtype.kind in 'uif':
cd = categorize_array(cd, cdim)
if k not in data or len(data[k]) != [len(data[key]) for key in data if key != k][0]:
data[k].append(cd)
else:
data[k][-1] = cd
def get_data(self, element, ranges, style):
# Get x, y, group, stack and color dimensions
grouping = None
group_dim = element.get_dimension(self.group_index)
if group_dim not in element.kdims:
group_dim = None
else:
grouping = 'grouped'
stack_dim = element.get_dimension(self.stack_index)
if stack_dim not in element.kdims:
stack_dim = None
else:
grouping = 'stacked'
group_dim = None
xdim = element.get_dimension(0)
ydim = element.vdims[0]
no_cidx = self.color_index is None
color_index = (group_dim or stack_dim) if no_cidx else self.color_index
color_dim = element.get_dimension(color_index)
if color_dim:
self.color_index = color_dim.name
# Define style information
width = style.get('bar_width', style.get('width', 1))
cmap = style.get('cmap')
hover = 'hover' in self.handles
# Group by stack or group dim if necessary
if group_dim is None:
grouped = {0: element}
else:
grouped = element.groupby(group_dim, group_type=Dataset,
container_type=OrderedDict,
datatype=['dataframe', 'dictionary'])
y0, y1 = ranges.get(ydim.name, (None, None))
if self.logy:
bottom = (ydim.range[0] or (10**(np.log10(y1)-2)) if y1 else 0.01)
else:
bottom = 0
# Map attributes to data
if grouping == 'stacked':
mapping = {'x': xdim.name, 'top': 'top',
'bottom': 'bottom', 'width': width}
elif grouping == 'grouped':
mapping = {'x': 'xoffsets', 'top': ydim.name, 'bottom': bottom,
'width': width}
else:
mapping = {'x': xdim.name, 'top': ydim.name, 'bottom': bottom, 'width': width}
# Get colors
cdim = color_dim or group_dim
cvals = element.dimension_values(cdim, expanded=False) if cdim else None
if cvals is not None:
if cvals.dtype.kind in 'uif' and no_cidx:
cvals = categorize_array(cvals, color_dim)
factors = None if cvals.dtype.kind in 'uif' else list(cvals)
if cdim is xdim and factors:
factors = list(categorize_array(factors, xdim))
if cmap is None and factors:
styles = self.style.max_cycles(len(factors))
colors = [styles[i]['color'] for i in range(len(factors))]
colors = [rgb2hex(c) if isinstance(c, tuple) else c for c in colors]
else:
colors = None
else:
factors, colors = None, None
# Iterate over stacks and groups and accumulate data
data = defaultdict(list)
baselines = defaultdict(lambda: {'positive': bottom, 'negative': 0})
for i, (k, ds) in enumerate(grouped.items()):
k = k[0] if isinstance(k, tuple) else k
if group_dim:
gval = k if isinstance(k, basestring) else group_dim.pprint_value(k)
# Apply stacking or grouping
if grouping == 'stacked':
for sign, slc in [('negative', (None, 0)), ('positive', (0, None))]:
slc_ds = ds.select(**{ds.vdims[0].name: slc})
xs = slc_ds.dimension_values(xdim)
ys = slc_ds.dimension_values(ydim)
bs, ts = self.get_stack(xs, ys, baselines, sign)
data['bottom'].append(bs)
data['top'].append(ts)
data[xdim.name].append(xs)
data[stack_dim.name].append(slc_ds.dimension_values(stack_dim))
if hover: data[ydim.name].append(ys)
self._add_color_data(slc_ds, ranges, style, cdim, data,
mapping, factors, colors)
elif grouping == 'grouped':
xs = ds.dimension_values(xdim)
ys = ds.dimension_values(ydim)
xoffsets = [(x if xs.dtype.kind in 'SU' else xdim.pprint_value(x), gval)
for x in xs]
data['xoffsets'].append(xoffsets)
data[ydim.name].append(ys)
if hover: data[xdim.name].append(xs)
if group_dim not in ds.dimensions():
ds = ds.add_dimension(group_dim.name, ds.ndims, gval)
data[group_dim.name].append(ds.dimension_values(group_dim))
else:
data[xdim.name].append(ds.dimension_values(xdim))
data[ydim.name].append(ds.dimension_values(ydim))
if hover:
for vd in ds.vdims[1:]:
data[vd.name].append(ds.dimension_values(vd))
if not grouping == 'stacked':
self._add_color_data(ds, ranges, style, cdim, data,
mapping, factors, colors)
# Concatenate the stacks or groups
sanitized_data = {}
for col, vals in data.items():
if len(vals) == 1:
sanitized_data[dimension_sanitizer(col)] = vals[0]
elif vals:
sanitized_data[dimension_sanitizer(col)] = np.concatenate(vals)
for name, val in mapping.items():
sanitized = None
if isinstance(val, basestring):
sanitized = dimension_sanitizer(mapping[name])
mapping[name] = sanitized
elif isinstance(val, dict) and 'field' in val:
sanitized = dimension_sanitizer(val['field'])
val['field'] = sanitized
if sanitized is not None and sanitized not in sanitized_data:
sanitized_data[sanitized] = []
# Ensure x-values are categorical
xname = dimension_sanitizer(xdim.name)
if xname in sanitized_data:
sanitized_data[xname] = categorize_array(sanitized_data[xname], xdim)
# If axes inverted change mapping to match hbar signature
if self.invert_axes:
mapping.update({'y': mapping.pop('x'), 'left': mapping.pop('bottom'),
'right': mapping.pop('top'), 'height': mapping.pop('width')})
return sanitized_data, mapping, style
| 1 | 21,492 | This was very confusing until I realized this might refer to the *bokeh* use of the word 'streaming'. | holoviz-holoviews | py |
@@ -47,6 +47,10 @@ type TaskHandler struct {
tasksToEvents map[string]*eventList
// tasksToEventsLock for locking the map
tasksToEventsLock sync.RWMutex
+ // batchMap is used to collect container events
+ // between task transitions
+ batchMap map[string][]api.ContainerStateChange
+ batchMapLock sync.RWMutex
}
// NewTaskHandler returns a pointer to TaskHandler | 1 | // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package eventhandler
import (
"container/list"
"errors"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/api"
"github.com/aws/amazon-ecs-agent/agent/statechange"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/cihub/seelog"
)
// Maximum number of tasks that may be handled at once by the TaskHandler
const concurrentEventCalls = 3
type eventList struct {
// events is a list of *sendableEvents
events *list.List
// sending will check whether the list is already being handlerd
sending bool
//eventsListLock locks both the list and sending bool
eventListLock sync.Mutex
}
// TaskHandler encapsulates the the map of a task arn to task and container events
// associated with said task
type TaskHandler struct {
// submitSemaphore for the number of tasks that may be handled at once
submitSemaphore utils.Semaphore
// taskToEvents is arn:*eventList map so events may be serialized per task
//TODO: fix leak, currently items are never removed from this map
tasksToEvents map[string]*eventList
// tasksToEventsLock for locking the map
tasksToEventsLock sync.RWMutex
}
// NewTaskHandler returns a pointer to TaskHandler
func NewTaskHandler() *TaskHandler {
return &TaskHandler{
tasksToEvents: make(map[string]*eventList),
submitSemaphore: utils.NewSemaphore(concurrentEventCalls),
}
}
// AddStateChangeEvent queues up a state change for sending using the given client.
func (handler *TaskHandler) AddStateChangeEvent(change statechange.Event, client api.ECSClient) error {
switch change.GetEventType() {
case statechange.TaskEvent:
event, ok := change.(api.TaskStateChange)
if !ok {
return errors.New("eventhandler: unable to get task event from state change event")
}
handler.addEvent(newSendableTaskEvent(event), client)
return nil
case statechange.ContainerEvent:
event, ok := change.(api.ContainerStateChange)
if !ok {
return errors.New("eventhandler: unable to get container event from state change event")
}
handler.addEvent(newSendableContainerEvent(event), client)
return nil
default:
return errors.New("eventhandler: unable to determine event type from state change event")
}
}
// Prepares a given event to be sent by adding it to the handler's appropriate
// eventList
func (handler *TaskHandler) addEvent(change *sendableEvent, client api.ECSClient) {
seelog.Info("TaskHandler, Adding event: ", change)
taskEvents := handler.getTaskEventList(change)
taskEvents.eventListLock.Lock()
defer taskEvents.eventListLock.Unlock()
// Update taskEvent
taskEvents.events.PushBack(change)
if !taskEvents.sending {
taskEvents.sending = true
go handler.SubmitTaskEvents(taskEvents, client)
}
}
// getTaskEventList gets the eventList from taskToEvent map, and reduces the
// scope of the taskToEventsLock to just this function
func (handler *TaskHandler) getTaskEventList(change *sendableEvent) (taskEvents *eventList) {
handler.tasksToEventsLock.Lock()
defer handler.tasksToEventsLock.Unlock()
taskEvents, ok := handler.tasksToEvents[change.taskArn()]
if !ok {
seelog.Debug("TaskHandler, collecting events for new task ", change)
taskEvents = &eventList{events: list.New(), sending: false}
handler.tasksToEvents[change.taskArn()] = taskEvents
}
return taskEvents
}
// Continuously retries sending an event until it succeeds, sleeping between each
// attempt
func (handler *TaskHandler) SubmitTaskEvents(taskEvents *eventList, client api.ECSClient) {
backoff := utils.NewSimpleBackoff(1*time.Second, 30*time.Second, 0.20, 1.3)
// Mirror events.sending, but without the need to lock since this is local
// to our goroutine
done := false
for !done {
// If we looped back up here, we successfully submitted an event, but
// we haven't emptied the list so we should keep submitting
backoff.Reset()
utils.RetryWithBackoff(backoff, func() error {
// Lock and unlock within this function, allowing the list to be added
// to while we're not actively sending an event
seelog.Debug("TaskHandler, Waiting on semaphore to send...")
handler.submitSemaphore.Wait()
defer handler.submitSemaphore.Post()
seelog.Debug("TaskHandler, Aquiring lock for sending event...")
taskEvents.eventListLock.Lock()
defer taskEvents.eventListLock.Unlock()
seelog.Debug("TaskHandler, Aquired lock!")
var err error
if taskEvents.events.Len() == 0 {
seelog.Debug("TaskHandler, No events left; not retrying more")
taskEvents.sending = false
done = true
return nil
}
eventToSubmit := taskEvents.events.Front()
event := eventToSubmit.Value.(*sendableEvent)
if event.containerShouldBeSent() {
seelog.Info("TaskHandler, Sending container change: ", event)
err = client.SubmitContainerStateChange(event.containerChange)
if err == nil {
// submitted; ensure we don't retry it
event.setSent()
if event.containerChange.Container != nil {
event.containerChange.Container.SetSentStatus(event.containerChange.Status)
}
statesaver.Save()
seelog.Debug("TaskHandler, Submitted container state change")
backoff.Reset()
taskEvents.events.Remove(eventToSubmit)
} else {
seelog.Error("TaskHandler, Unretriable error submitting container state change ", err)
}
} else if event.taskShouldBeSent() {
seelog.Info("TaskHandler, Sending task change: ", event)
err = client.SubmitTaskStateChange(event.taskChange)
if err == nil {
// submitted or can't be retried; ensure we don't retry it
event.setSent()
if event.taskChange.Task != nil {
event.taskChange.Task.SetSentStatus(event.taskChange.Status)
}
statesaver.Save()
seelog.Debug("TaskHandler, Submitted task state change")
backoff.Reset()
taskEvents.events.Remove(eventToSubmit)
} else {
seelog.Error("TaskHandler, Unretriable error submitting container state change: ", err)
}
} else {
// Shouldn't be sent as either a task or container change event; must have been already sent
seelog.Info("TaskHandler, Not submitting redundant event; just removing")
taskEvents.events.Remove(eventToSubmit)
}
if taskEvents.events.Len() == 0 {
seelog.Debug("TaskHandler, Removed the last element, no longer sending")
taskEvents.sending = false
done = true
return nil
}
return err
})
}
}
| 1 | 16,144 | Can you rename this as `tasksToContainerStates` ? If you do that, you can rename the lock as well | aws-amazon-ecs-agent | go |
@@ -177,7 +177,7 @@ public class UserAccountManager {
*/
public Account getCurrentAccount() {
final Account[] accounts = accountManager.getAccountsByType(accountType);
- if (accounts == null || accounts.length == 0) {
+ if (accounts.length == 0) {
return null;
}
| 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.accounts;
import android.accounts.Account;
import android.accounts.AccountManager;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Bundle;
import android.text.TextUtils;
import com.salesforce.androidsdk.app.Features;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.AuthenticatorService;
import com.salesforce.androidsdk.rest.ClientManager;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This class acts as a manager that provides methods to access
* user accounts that are currently logged in, and can be used
* to add new user accounts.
*
* @author bhariharan
*/
public class UserAccountManager {
private static final String CURRENT_USER_PREF = "current_user_info";
private static final String USER_ID_KEY = "user_id";
private static final String ORG_ID_KEY = "org_id";
public static final String USER_SWITCH_INTENT_ACTION = "com.salesforce.USERSWITCHED";
/**
* Represents how the current user has been switched to, as found in an intent sent to a {@link android.content.BroadcastReceiver}
* filtering {@link #USER_SWITCH_INTENT_ACTION}. User switching including logging in, logging out and switching between authenticated
* users. For backwards compatibility, the case where the last user has logged out is not included, as this currently does not
* send a broadcast.
*/
public static final String EXTRA_USER_SWITCH_TYPE = "com.salesforce.USER_SWITCH_TYPE";
/**
* A switch has occurred between two authenticated users.
*
* <p>Use this constant with {@link #EXTRA_USER_SWITCH_TYPE}.</p>
*/
public static final int USER_SWITCH_TYPE_DEFAULT = -1;
/**
* The first user has logged in and is being switched to. There were no users authenticated before this switch.
*
* <p>Use this constant with {@link #EXTRA_USER_SWITCH_TYPE}.</p>
*/
public static final int USER_SWITCH_TYPE_FIRST_LOGIN = 0;
/**
* An additional user has logged in and is being switched to. There was at least one user authenticated before this switch.
*
* <p>Use this constant with {@link #EXTRA_USER_SWITCH_TYPE}.</p>
*/
public static final int USER_SWITCH_TYPE_LOGIN = 1;
/**
* A user has a logged out and another authenticated user is being switched to.
*
* <p>Use this constant with {@link #EXTRA_USER_SWITCH_TYPE}.</p>
*/
public static final int USER_SWITCH_TYPE_LOGOUT = 2;
private static UserAccountManager INSTANCE;
private Context context;
private AccountManager accountManager;
private String accountType;
/**
* Returns a singleton instance of this class.
*
* @return Instance of this class.
*/
public static UserAccountManager getInstance() {
if (INSTANCE == null) {
INSTANCE = new UserAccountManager();
}
return INSTANCE;
}
/**
* Protected constructor.
*/
protected UserAccountManager() {
context = SalesforceSDKManager.getInstance().getAppContext();
accountManager = AccountManager.get(context);
accountType = SalesforceSDKManager.getInstance().getAccountType();
}
/**
* Stores the current active user's user ID and org ID in a shared preference file.
*
* @param userId User ID.
* @param orgId Org ID.
*/
public void storeCurrentUserInfo(String userId, String orgId) {
final SharedPreferences sp = context.getSharedPreferences(CURRENT_USER_PREF,
Context.MODE_PRIVATE);
final Editor e = sp.edit();
e.putString(USER_ID_KEY, userId);
e.putString(ORG_ID_KEY, orgId);
e.commit();
}
/**
* Returns the stored user ID.
*
* @return User ID.
*/
public String getStoredUserId() {
final SharedPreferences sp = context.getSharedPreferences(CURRENT_USER_PREF,
Context.MODE_PRIVATE);
return sp.getString(USER_ID_KEY, null);
}
/**
* Returns the stored org ID.
*
* @return Org ID.
*/
public String getStoredOrgId() {
final SharedPreferences sp = context.getSharedPreferences(CURRENT_USER_PREF,
Context.MODE_PRIVATE);
return sp.getString(ORG_ID_KEY, null);
}
/**
* Returns the current user logged in.
*
* @return Current user that's logged in.
*/
public UserAccount getCurrentUser() {
return buildUserAccount(getCurrentAccount());
}
/**
* Returns the current user logged in.
*
* @return Current user that's logged in.
*/
public Account getCurrentAccount() {
final Account[] accounts = accountManager.getAccountsByType(accountType);
if (accounts == null || accounts.length == 0) {
return null;
}
// Register feature MU if more than one user
if (accounts.length > 1) {
SalesforceSDKManager.getInstance().registerUsedAppFeature(Features.FEATURE_MULTI_USERS);
} else {
SalesforceSDKManager.getInstance().unregisterUsedAppFeature(Features.FEATURE_MULTI_USERS);
}
// Reads the stored user ID and org ID.
final SharedPreferences sp = context.getSharedPreferences(CURRENT_USER_PREF,
Context.MODE_PRIVATE);
final String storedUserId = sp.getString(USER_ID_KEY, "");
final String storedOrgId = sp.getString(ORG_ID_KEY, "");
for (final Account account : accounts) {
if (account != null) {
// Reads the user ID and org ID from account manager.
final String orgId = SalesforceSDKManager.decrypt(accountManager.getUserData(account,
AuthenticatorService.KEY_ORG_ID));
final String userId = SalesforceSDKManager.decrypt(accountManager.getUserData(account,
AuthenticatorService.KEY_USER_ID));
if (storedUserId.trim().equals(userId)
&& storedOrgId.trim().equals(orgId)) {
return account;
}
}
}
return null;
}
/**
* Returns a list of authenticated users.
*
* @return List of authenticated users.
*/
public List<UserAccount> getAuthenticatedUsers() {
final Account[] accounts = accountManager.getAccountsByType(accountType);
if (accounts == null || accounts.length == 0) {
return null;
}
final List<UserAccount> userAccounts = new ArrayList<UserAccount>();
for (final Account account : accounts) {
final UserAccount userAccount = buildUserAccount(account);
if (userAccount != null) {
userAccounts.add(userAccount);
}
}
if (userAccounts.size() == 0) {
return null;
}
return userAccounts;
}
/**
* Returns whether the specified user account exists or not.
*
* @param account User account.
* @return True - if it exists, False - otherwise.
*/
public boolean doesUserAccountExist(UserAccount account) {
if (account == null) {
return false;
}
final List<UserAccount> userAccounts = getAuthenticatedUsers();
if (userAccounts == null || userAccounts.size() == 0) {
return false;
}
for (final UserAccount userAccount : userAccounts) {
if (account.equals(userAccount)) {
return true;
}
}
return false;
}
/**
* Switches to the specified user account. If the specified user account
* is invalid/doesn't exist, this method kicks off the login flow
* for a new user. When the user account switch is complete, it is
* imperative for the app to update its cached references to RestClient,
* to avoid holding on to a RestClient from the previous user.
*
* @param user User account to switch to.
*/
public void switchToUser(UserAccount user) {
// All that's known is that the user is being switched
switchToUser(user, USER_SWITCH_TYPE_DEFAULT, null);
}
/**
* Switches to the specified user account.
*
* @param user the user account to switch to
* @param userSwitchType a {@code USER_SWITCH_TYPE} constant
* @param extras a optional Bundle of extras to pass additional
* information during user switch
*
* @see #switchToUser(UserAccount)
*/
public void switchToUser(UserAccount user, int userSwitchType, Bundle extras) {
if (user == null || !doesUserAccountExist(user)) {
switchToNewUser();
return;
}
final UserAccount curUser = getCurrentUser();
/*
* Checks if we are attempting to switch to the current user.
* In this case, there's nothing to be done.
*/
if (user.equals(curUser)) {
return;
}
final ClientManager cm = new ClientManager(context, accountType,
SalesforceSDKManager.getInstance().getLoginOptions(), true);
final Account account = cm.getAccountByName(user.getAccountName());
storeCurrentUserInfo(user.getUserId(), user.getOrgId());
cm.peekRestClient(account);
sendUserSwitchIntent(userSwitchType, extras);
}
/**
* Kicks off the login flow to switch to a new user. Once the login
* flow is complete, the context will automatically become the
* new user's context and a call to peekRestClient() or getRestClient()
* in ClientManager will return a RestClient instance for the new user.
*/
public void switchToNewUser() {
final Bundle options = SalesforceSDKManager.getInstance().getLoginOptions().asBundle();
switchToNewUserWithOptions(options);
}
/**
* Kicks off the login flow to switch to a new user with jwt. Once the login
* flow is complete, the context will automatically become the
* new user's context and a call to peekRestClient() or getRestClient()
* in ClientManager will return a RestClient instance for the new user.
*
* @param jwt JWT.
* @param url Instance/My domain URL.
*/
public void switchToNewUser(String jwt, String url) {
final Bundle options = SalesforceSDKManager.getInstance().getLoginOptions(jwt, url).asBundle();
switchToNewUserWithOptions(options);
}
private void switchToNewUserWithOptions(Bundle options) {
final Bundle reply = new Bundle();
final Intent i = new Intent(context, SalesforceSDKManager.getInstance().getLoginActivityClass());
i.setFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
i.putExtras(options);
reply.putParcelable(AccountManager.KEY_INTENT, i);
context.startActivity(i);
}
/**
* Logs the current user out.
*
* @param frontActivity Front activity.
*/
public void signoutCurrentUser(Activity frontActivity) {
SalesforceSDKManager.getInstance().logout(frontActivity);
}
/**
* Logs the current user out.
*
* @param frontActivity Front activity.
* @param showLoginPage True - if the login page should be shown, False - otherwise.
*/
public void signoutCurrentUser(Activity frontActivity, boolean showLoginPage) {
SalesforceSDKManager.getInstance().logout(frontActivity, showLoginPage);
}
/**
* Logs the specified user out. If the user specified is not the current
* user, push notification un-registration will not take place.
*
* @param userAccount User account.
* @param frontActivity Front activity.
*/
public void signoutUser(UserAccount userAccount, Activity frontActivity) {
final Account account = buildAccount(userAccount);
SalesforceSDKManager.getInstance().logout(account, frontActivity);
}
/**
* Logs the specified user out. If the user specified is not the current
* user, push notification un-registration will not take place.
*
* @param userAccount User account.
* @param frontActivity Front activity.
* @param showLoginPage True - if the login page should be shown, False - otherwise.
*/
public void signoutUser(UserAccount userAccount, Activity frontActivity, boolean showLoginPage) {
final Account account = buildAccount(userAccount);
SalesforceSDKManager.getInstance().logout(account, frontActivity, showLoginPage);
}
/**
* Builds a UserAccount object from the saved account.
*
* @param account Account object.
* @return UserAccount object.
*/
public UserAccount buildUserAccount(Account account) {
if (account == null) {
return null;
}
final String authToken = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AccountManager.KEY_AUTHTOKEN));
final String refreshToken = SalesforceSDKManager.decrypt(accountManager.getPassword(account));
final String loginServer = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_LOGIN_URL));
final String idUrl = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_ID_URL));
final String instanceServer = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_INSTANCE_URL));
final String orgId = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_ORG_ID));
final String userId = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_USER_ID));
final String username = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_USERNAME));
final String accountName = accountManager.getUserData(account, AccountManager.KEY_ACCOUNT_NAME);
final String lastName = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_LAST_NAME));
final String email = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_EMAIL));
final String encFirstName = accountManager.getUserData(account, AuthenticatorService.KEY_FIRST_NAME);
String firstName = null;
if (encFirstName != null) {
firstName = SalesforceSDKManager.decrypt(encFirstName);
}
final String encDisplayName = accountManager.getUserData(account, AuthenticatorService.KEY_DISPLAY_NAME);
String displayName = null;
if (encDisplayName != null) {
displayName = SalesforceSDKManager.decrypt(accountManager.getUserData(account, AuthenticatorService.KEY_DISPLAY_NAME));
}
final String encPhotoUrl = accountManager.getUserData(account, AuthenticatorService.KEY_PHOTO_URL);
String photoUrl = null;
if (encPhotoUrl != null) {
photoUrl = SalesforceSDKManager.decrypt(encPhotoUrl);
}
final String encThumbnailUrl = accountManager.getUserData(account, AuthenticatorService.KEY_THUMBNAIL_URL);
String thumbnailUrl = null;
if (encThumbnailUrl != null) {
thumbnailUrl = SalesforceSDKManager.decrypt(encThumbnailUrl);
}
Map<String, String> additionalOauthValues = null;
final List<String> additionalOauthKeys = SalesforceSDKManager.getInstance().getAdditionalOauthKeys();
if (additionalOauthKeys != null && !additionalOauthKeys.isEmpty()) {
additionalOauthValues = new HashMap<>();
for (final String key : additionalOauthKeys) {
if (!TextUtils.isEmpty(key)) {
final String encValue = accountManager.getUserData(account, key);
String value = null;
if (encValue != null) {
value = SalesforceSDKManager.decrypt(encValue);
}
additionalOauthValues.put(key, value);
}
}
}
final String encCommunityId = accountManager.getUserData(account, AuthenticatorService.KEY_COMMUNITY_ID);
String communityId = null;
if (encCommunityId != null) {
communityId = SalesforceSDKManager.decrypt(encCommunityId);
}
final String encCommunityUrl = accountManager.getUserData(account, AuthenticatorService.KEY_COMMUNITY_URL);
String communityUrl = null;
if (encCommunityUrl != null) {
communityUrl = SalesforceSDKManager.decrypt(encCommunityUrl);
}
if (authToken == null || instanceServer == null || userId == null || orgId == null) {
return null;
}
return UserAccountBuilder.getInstance().authToken(authToken).refreshToken(refreshToken).
loginServer(loginServer).idUrl(idUrl).instanceServer(instanceServer).orgId(orgId).
userId(userId).username(username).accountName(accountName).communityId(communityId).
communityUrl(communityUrl).firstName(firstName).lastName(lastName).displayName(displayName).
email(email).photoUrl(photoUrl).thumbnailUrl(thumbnailUrl).
additionalOauthValues(additionalOauthValues).build();
}
/**
* Builds an Account object from the user account passed in.
*
* @param userAccount UserAccount object.
* @return Account object.
*/
public Account buildAccount(UserAccount userAccount) {
final Account[] accounts = accountManager.getAccountsByType(accountType);
if (userAccount == null) {
return null;
}
if (accounts == null || accounts.length == 0) {
return null;
}
// Reads the user account's user ID and org ID.
final String storedUserId = ((userAccount.getUserId() == null) ? "" : userAccount.getUserId());
final String storedOrgId = ((userAccount.getOrgId() == null) ? "" : userAccount.getOrgId());
for (final Account account : accounts) {
if (account != null) {
// Reads the user ID and org ID from account manager.
final String orgId = SalesforceSDKManager.decrypt(accountManager.getUserData(account,
AuthenticatorService.KEY_ORG_ID));
final String userId = SalesforceSDKManager.decrypt(accountManager.getUserData(account,
AuthenticatorService.KEY_USER_ID));
if (storedUserId.trim().equals(userId.trim())
&& storedOrgId.trim().equals(orgId.trim())) {
return account;
}
}
}
return null;
}
/**
* Broadcasts an intent that a user switch has occurred.
*/
public void sendUserSwitchIntent() {
// By default, the type of switch is not known
sendUserSwitchIntent(USER_SWITCH_TYPE_DEFAULT, null);
}
/**
* Broadcasts an intent that a user switch has occurred.
*
* @param userSwitchType
* a {@code USER_SWITCH_TYPE} constant
* @param extras
* an optional Bundle of extras to add to the broadcast intent
*/
public final void sendUserSwitchIntent(int userSwitchType, Bundle extras) {
final Intent intent = new Intent(USER_SWITCH_INTENT_ACTION);
intent.setPackage(context.getPackageName());
intent.putExtra(EXTRA_USER_SWITCH_TYPE, userSwitchType);
if (extras != null) {
intent.putExtras(extras);
}
SalesforceSDKManager.getInstance().getAppContext().sendBroadcast(intent);
}
/**
* Retrieves a stored user account from org ID and user ID.
*
* @param orgId Org ID.
* @param userId User ID.
* @return User account.
*/
public UserAccount getUserFromOrgAndUserId(String orgId, String userId) {
if (TextUtils.isEmpty(orgId) || TextUtils.isEmpty(userId)) {
return null;
}
final List<UserAccount> userAccounts = getAuthenticatedUsers();
if (userAccounts == null || userAccounts.size() == 0) {
return null;
}
for (final UserAccount userAccount : userAccounts) {
if (orgId.equals(userAccount.getOrgId()) && userId.equals(userAccount.getUserId())) {
return userAccount;
}
}
return null;
}
}
| 1 | 16,955 | Fixing `lint` warnings that have existed for a while. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -452,6 +452,18 @@ public interface Iterator<T> extends java.util.Iterator<T>, Traversable<T> {
return io.vavr.collection.Collections.fill(n, s);
}
+ /**
+ * Returns a Iterator containing {@code n} times the given {@code element}
+ *
+ * @param <T> Component type of the Iterator
+ * @param n The number of elements
+ * @param element The element
+ * @return An iterator of {@code n} sequence elements, where each element is the given {@code element}.
+ */
+ static <T> Iterator<T> fill(int n, T element) {
+ return io.vavr.collection.Collections.fillObject(n, element);
+ }
+
/**
* Creates an Iterator of characters starting from {@code from}, extending to {@code toExclusive - 1}.
* <p> | 1 | /* __ __ __ __ __ ___
* \ \ / / \ \ / / __/
* \ \/ / /\ \ \/ / /
* \____/__/ \__\____/__/
*
* Copyright 2014-2018 Vavr, http://vavr.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.vavr.collection;
import io.vavr.*;
import io.vavr.collection.IteratorModule.ConcatIterator;
import io.vavr.collection.IteratorModule.DistinctIterator;
import io.vavr.collection.IteratorModule.GroupedIterator;
import io.vavr.control.Option;
import java.math.BigDecimal;
import java.util.*;
import java.util.function.*;
import static java.lang.Double.NEGATIVE_INFINITY;
import static java.lang.Double.POSITIVE_INFINITY;
import static java.math.RoundingMode.HALF_UP;
import static io.vavr.collection.IteratorModule.BigDecimalHelper.areEqual;
import static io.vavr.collection.IteratorModule.BigDecimalHelper.asDecimal;
import static io.vavr.collection.IteratorModule.CachedIterator;
import static io.vavr.collection.IteratorModule.EmptyIterator;
/**
* {@code io.vavr.collection.Iterator} is a compositional replacement for {@code java.util.Iterator}
* whose purpose is to iterate <em>once</em> over a sequence of elements.
* <p>
* <strong>Note:</strong> Iterators encapsulate mutable state.
* They are not meant to be used concurrently by different threads. Do not reuse Iterators, e.g. after passing to
* {@linkplain io.vavr.collection.List#ofAll(Iterable)}.
* <p>
* There are two abstract methods: {@code hasNext} for checking if there is a next element available,
* and {@code next} which removes the next element from the iterator and returns it. They can be called
* an arbitrary amount of times. If {@code hasNext} returns false, a call of {@code next} will throw
* a {@code NoSuchElementException}.
* <p>
* <strong>Caution: Other methods than {@code hasNext} and {@code next} can be called only once (exclusively).
* More specifically, after calling a method it cannot be guaranteed that the next call will succeed.</strong>
* <p>
* An Iterator that can be only used once because it is a traversal pointer into a collection, and not a collection
* itself.
*
* @param <T> Component type
* @author Daniel Dietrich
*/
// DEV-NOTE: we prefer returning empty() over this if !hasNext() == true in order to free memory.
public interface Iterator<T> extends java.util.Iterator<T>, Traversable<T> {
/**
* Creates an Iterator which traverses along the concatenation of the given iterables.
*
* @param iterables The iterables
* @param <T> Component type.
* @return A new {@code io.vavr.collection.Iterator}
*/
@SuppressWarnings("varargs")
@SafeVarargs
static <T> Iterator<T> concat(Iterable<? extends T>... iterables) {
Objects.requireNonNull(iterables, "iterables is null");
if (iterables.length == 0) {
return empty();
} else {
ConcatIterator<T> res = new ConcatIterator<>();
for (Iterable<? extends T> iterable : iterables) {
res.append(iterable.iterator());
}
return res;
}
}
/**
* Creates an Iterator which traverses along the concatenation of the given iterables.
*
* @param iterables The iterable of iterables
* @param <T> Component type.
* @return A new {@code io.vavr.collection.Iterator}
*/
static <T> Iterator<T> concat(Iterable<? extends Iterable<? extends T>> iterables) {
Objects.requireNonNull(iterables, "iterables is null");
if (!iterables.iterator().hasNext()) {
return empty();
} else {
ConcatIterator<T> res = new ConcatIterator<>();
for (Iterable<? extends T> iterable : iterables) {
res.append(iterable.iterator());
}
return res;
}
}
/**
* Returns the empty Iterator.
*
* @param <T> Component type
* @return The empty Iterator
*/
@SuppressWarnings("unchecked")
static <T> Iterator<T> empty() {
return (Iterator<T>) EmptyIterator.INSTANCE;
}
/**
* Narrows a widened {@code Iterator<? extends T>} to {@code Iterator<T>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param iterator An {@code Iterator}.
* @param <T> Component type of the {@code Iterator}.
* @return the given {@code iterator} instance as narrowed type {@code Iterator<T>}.
*/
@SuppressWarnings("unchecked")
static <T> Iterator<T> narrow(Iterator<? extends T> iterator) {
return (Iterator<T>) iterator;
}
/**
* Creates an Iterator which traverses one element.
*
* @param element An element
* @param <T> Component type.
* @return A new Iterator
*/
static <T> Iterator<T> of(T element) {
return new AbstractIterator<T>() {
boolean hasNext = true;
@Override
public boolean hasNext() {
return hasNext;
}
@Override
public T getNext() {
hasNext = false;
return element;
}
};
}
/**
* Creates an Iterator which traverses the given elements.
*
* @param elements Zero or more elements
* @param <T> Component type
* @return A new Iterator
*/
@SafeVarargs
static <T> Iterator<T> of(T... elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements.length == 0) {
return empty();
} else {
return new AbstractIterator<T>() {
int index = 0;
@Override
public boolean hasNext() {
return index < elements.length;
}
@Override
public T getNext() {
return elements[index++];
}
};
}
}
/**
* Creates an Iterator based on the given Iterable. This is a convenience method for
* {@code Iterator.ofAll(iterable.iterator()}.
*
* @param iterable A {@link Iterable}
* @param <T> Component type.
* @return A new {@code io.vavr.collection.Iterator}
*/
@SuppressWarnings("unchecked")
static <T> Iterator<T> ofAll(Iterable<? extends T> iterable) {
Objects.requireNonNull(iterable, "iterable is null");
if (iterable instanceof Iterator) {
return (Iterator<T>) iterable;
} else {
return ofAll(iterable.iterator());
}
}
/**
* Creates an Iterator based on the given Iterator by
* delegating calls of {@code hasNext()} and {@code next()} to it.
*
* @param iterator A {@link java.util.Iterator}
* @param <T> Component type.
* @return A new {@code io.vavr.collection.Iterator}
*/
@SuppressWarnings("unchecked")
static <T> Iterator<T> ofAll(java.util.Iterator<? extends T> iterator) {
Objects.requireNonNull(iterator, "iterator is null");
if (iterator instanceof Iterator) {
return (Iterator<T>) iterator;
} else {
return new AbstractIterator<T>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public T getNext() {
return iterator.next();
}
};
}
}
/**
* Creates an Iterator from boolean values.
*
* @param elements boolean values
* @return A new Iterator of Boolean values
* @throws NullPointerException if elements is null
*/
static Iterator<Boolean> ofAll(boolean... elements) {
Objects.requireNonNull(elements, "elements is null");
return new AbstractIterator<Boolean>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public Boolean getNext() {
return elements[i++];
}
};
}
/**
* Creates an Iterator from byte values.
*
* @param elements byte values
* @return A new Iterator of Byte values
* @throws NullPointerException if elements is null
*/
static Iterator<Byte> ofAll(byte... elements) {
Objects.requireNonNull(elements, "elements is null");
return new AbstractIterator<Byte>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public Byte getNext() {
return elements[i++];
}
};
}
/**
* Creates an Iterator from char values.
*
* @param elements char values
* @return A new Iterator of Character values
* @throws NullPointerException if elements is null
*/
static Iterator<Character> ofAll(char... elements) {
Objects.requireNonNull(elements, "elements is null");
return new AbstractIterator<Character>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public Character getNext() {
return elements[i++];
}
};
}
/**
* Creates ann Iterator from double values.
*
* @param elements double values
* @return A new Iterator of Double values
* @throws NullPointerException if elements is null
*/
static Iterator<Double> ofAll(double... elements) {
Objects.requireNonNull(elements, "elements is null");
return new AbstractIterator<Double>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public Double getNext() {
return elements[i++];
}
};
}
/**
* Creates an Iterator from float values.
*
* @param elements float values
* @return A new Iterator of Float values
* @throws NullPointerException if elements is null
*/
static Iterator<Float> ofAll(float... elements) {
Objects.requireNonNull(elements, "elements is null");
return new AbstractIterator<Float>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public Float getNext() {
return elements[i++];
}
};
}
/**
* Creates an Iterator from int values.
*
* @param elements int values
* @return A new Iterator of Integer values
* @throws NullPointerException if elements is null
*/
static Iterator<Integer> ofAll(int... elements) {
Objects.requireNonNull(elements, "elements is null");
return new AbstractIterator<Integer>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public Integer getNext() {
return elements[i++];
}
};
}
/**
* Creates an Iterator from long values.
*
* @param elements long values
* @return A new Iterator of Long values
* @throws NullPointerException if elements is null
*/
static Iterator<Long> ofAll(long... elements) {
Objects.requireNonNull(elements, "elements is null");
return new AbstractIterator<Long>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public Long getNext() {
return elements[i++];
}
};
}
/**
* Creates an Iterator from short values.
*
* @param elements short values
* @return A new Iterator of Short values
* @throws NullPointerException if elements is null
*/
static Iterator<Short> ofAll(short... elements) {
Objects.requireNonNull(elements, "elements is null");
return new AbstractIterator<Short>() {
int i = 0;
@Override
public boolean hasNext() {
return i < elements.length;
}
@Override
public Short getNext() {
return elements[i++];
}
};
}
/**
* Returns an Iterator on a sequence of {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the Iterator
* @param n The number of elements
* @param f The Function computing element values
* @return An Iterator on a sequence of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
static <T> Iterator<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return io.vavr.collection.Collections.tabulate(n, f);
}
/**
* Returns an Iterator on a sequence of {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the Iterator
* @param n The number of elements
* @param s The Supplier computing element values
* @return An iterator on a sequence of {@code n} elements, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
static <T> Iterator<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return io.vavr.collection.Collections.fill(n, s);
}
/**
* Creates an Iterator of characters starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.range('a', 'c') // = ('a', 'b')
* Iterator.range('c', 'a') // = ()
* </code>
* </pre>
*
* @param from the first character
* @param toExclusive the successor of the last character
* @return a range of characters as specified or the empty range if {@code from >= toExclusive}
*/
static Iterator<Character> range(char from, char toExclusive) {
return rangeBy(from, toExclusive, 1);
}
/**
* Creates an Iterator of characters starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeBy('a', 'c', 1) // = ('a', 'b')
* Iterator.rangeBy('a', 'd', 2) // = ('a', 'c')
* Iterator.rangeBy('d', 'a', -2) // = ('d', 'b')
* Iterator.rangeBy('d', 'a', 2) // = ()
* </code>
* </pre>
*
* @param from the first character
* @param toExclusive the successor of the last character if step > 0, the predecessor of the last character if step < 0
* @param step the step
* @return a range of characters as specified or the empty range if {@code signum(step) == signum(from - toExclusive)}.
* @throws IllegalArgumentException if {@code step} is zero
*/
static Iterator<Character> rangeBy(char from, char toExclusive, int step) {
return rangeBy((int) from, (int) toExclusive, step).map(i -> (char) i.shortValue());
}
@GwtIncompatible("BigDecimalHelper is GwtIncompatible")
static Iterator<Double> rangeBy(double from, double toExclusive, double step) {
final BigDecimal fromDecimal = asDecimal(from), toDecimal = asDecimal(toExclusive), stepDecimal = asDecimal(step);
return rangeBy(fromDecimal, toDecimal, stepDecimal).map(BigDecimal::doubleValue);
}
static Iterator<BigDecimal> rangeBy(BigDecimal from, BigDecimal toExclusive, BigDecimal step) {
if (step.signum() == 0) {
throw new IllegalArgumentException("step cannot be 0");
} else if (areEqual(from, toExclusive) || step.signum() == from.subtract(toExclusive).signum()) {
return empty();
} else {
if (step.signum() > 0) {
return new AbstractIterator<BigDecimal>() {
BigDecimal i = from;
@Override
public boolean hasNext() {
return i.compareTo(toExclusive) < 0;
}
@Override
public BigDecimal getNext() {
final BigDecimal next = this.i;
this.i = next.add(step);
return next;
}
};
} else {
return new AbstractIterator<BigDecimal>() {
BigDecimal i = from;
@Override
public boolean hasNext() {
return i.compareTo(toExclusive) > 0;
}
@Override
public BigDecimal getNext() {
final BigDecimal next = this.i;
this.i = next.add(step);
return next;
}
};
}
}
}
/**
* Creates an Iterator of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.range(0, 0) // = ()
* Iterator.range(2, 0) // = ()
* Iterator.range(-2, 2) // = (-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or the empty range if {@code from >= toExclusive}
*/
static Iterator<Integer> range(int from, int toExclusive) {
return rangeBy(from, toExclusive, 1);
}
/**
* Creates an Iterator of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeBy(1, 3, 1) // = (1, 2)
* Iterator.rangeBy(1, 4, 2) // = (1, 3)
* Iterator.rangeBy(4, 1, -2) // = (4, 2)
* Iterator.rangeBy(4, 1, 2) // = ()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1 if step > 0, the last number - 1 if step < 0
* @param step the step
* @return a range of long values as specified or the empty range if {@code (from == toExclusive) || (step * (from - toExclusive) > 0)}.
* @throws IllegalArgumentException if {@code step} is zero
*/
static Iterator<Integer> rangeBy(int from, int toExclusive, int step) {
final int toInclusive = toExclusive - (step > 0 ? 1 : -1);
return rangeClosedBy(from, toInclusive, step);
}
/**
* Creates an Iterator of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.range(0L, 0L) // = ()
* Iterator.range(2L, 0L) // = ()
* Iterator.range(-2L, 2L) // = (-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or the empty range if {@code from >= toExclusive}
*/
static Iterator<Long> range(long from, long toExclusive) {
return rangeBy(from, toExclusive, 1);
}
/**
* Creates an Iterator of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeBy(1L, 3L, 1L) // = (1L, 2L)
* Iterator.rangeBy(1L, 4L, 2L) // = (1L, 3L)
* Iterator.rangeBy(4L, 1L, -2L) // = (4L, 2L)
* Iterator.rangeBy(4L, 1L, 2L) // = ()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1 if step > 0, the last number - 1 if step < 0
* @param step the step
* @return a range of long values as specified or the empty range if {@code (from == toExclusive) || (step * (from - toExclusive) > 0)}.
* @throws IllegalArgumentException if {@code step} is zero
*/
static Iterator<Long> rangeBy(long from, long toExclusive, long step) {
final long toInclusive = toExclusive - (step > 0 ? 1 : -1);
return rangeClosedBy(from, toInclusive, step);
}
/**
* Creates an Iterator of characters starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeClosed('a', 'c') // = ('a', 'b', 'c')
* Iterator.rangeClosed('c', 'a') // = ()
* </code>
* </pre>
*
* @param from the first character
* @param toInclusive the last character
* @return a range of characters as specified or the empty range if {@code from > toInclusive}
*/
static Iterator<Character> rangeClosed(char from, char toInclusive) {
return rangeClosedBy(from, toInclusive, 1);
}
/**
* Creates an Iterator of characters starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeClosedBy('a', 'c', 1) // = ('a', 'b', 'c')
* Iterator.rangeClosedBy('a', 'd', 2) // = ('a', 'c')
* Iterator.rangeClosedBy('d', 'a', -2) // = ('d', 'b')
* Iterator.rangeClosedBy('d', 'a', 2) // = ()
* </code>
* </pre>
*
* @param from the first character
* @param toInclusive the last character
* @param step the step
* @return a range of characters as specified or the empty range if {@code signum(step) == signum(from - toInclusive)}.
* @throws IllegalArgumentException if {@code step} is zero
*/
static Iterator<Character> rangeClosedBy(char from, char toInclusive, int step) {
return rangeClosedBy((int) from, (int) toInclusive, step).map(i -> (char) i.shortValue());
}
@GwtIncompatible
static Iterator<Double> rangeClosedBy(double from, double toInclusive, double step) {
if (from == toInclusive) {
return of(from);
}
final double toExclusive = (step > 0) ? Math.nextUp(toInclusive) : Math.nextDown(toInclusive);
return rangeBy(from, toExclusive, step);
}
/**
* Creates an Iterator of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeClosed(0, 0) // = (0)
* Iterator.rangeClosed(2, 0) // = ()
* Iterator.rangeClosed(-2, 2) // = (-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or the empty range if {@code from > toInclusive}
*/
static Iterator<Integer> rangeClosed(int from, int toInclusive) {
return rangeClosedBy(from, toInclusive, 1);
}
/**
* Creates an Iterator of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeClosedBy(1, 3, 1) // = (1, 2, 3)
* Iterator.rangeClosedBy(1, 4, 2) // = (1, 3)
* Iterator.rangeClosedBy(4, 1, -2) // = (4, 2)
* Iterator.rangeClosedBy(4, 1, 2) // = ()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if {@code signum(step) == signum(from - toInclusive)}.
* @throws IllegalArgumentException if {@code step} is zero
*/
static Iterator<Integer> rangeClosedBy(int from, int toInclusive, int step) {
if (step == 0) {
throw new IllegalArgumentException("step cannot be 0");
} else if (from == toInclusive) {
return of(from);
} else if (Integer.signum(step) == Integer.signum(from - toInclusive)) {
return empty();
} else {
final int end = toInclusive - step;
if (step > 0) {
return new AbstractIterator<Integer>() {
int i = from - step;
@Override
public boolean hasNext() {
return i <= end;
}
@Override
public Integer getNext() {
return i += step;
}
};
} else {
return new AbstractIterator<Integer>() {
int i = from - step;
@Override
public boolean hasNext() {
return i >= end;
}
@Override
public Integer getNext() {
return i += step;
}
};
}
}
}
/**
* Creates an Iterator of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeClosed(0L, 0L) // = (0L)
* Iterator.rangeClosed(2L, 0L) // = ()
* Iterator.rangeClosed(-2L, 2L) // = (-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or the empty range if {@code from > toInclusive}
*/
static Iterator<Long> rangeClosed(long from, long toInclusive) {
return rangeClosedBy(from, toInclusive, 1L);
}
/**
* Creates an Iterator of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* Iterator.rangeClosedBy(1L, 3L, 1L) // = (1L, 2L, 3L)
* Iterator.rangeClosedBy(1L, 4L, 2L) // = (1L, 3L)
* Iterator.rangeClosedBy(4L, 1L, -2L) // = (4L, 2L)
* Iterator.rangeClosedBy(4L, 1L, 2L) // = ()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if {@code signum(step) == signum(from - toInclusive)}.
* @throws IllegalArgumentException if {@code step} is zero
*/
static Iterator<Long> rangeClosedBy(long from, long toInclusive, long step) {
if (step == 0) {
throw new IllegalArgumentException("step cannot be 0");
} else if (from == toInclusive) {
return of(from);
} else if (Long.signum(step) == Long.signum(from - toInclusive)) {
return empty();
} else {
final long end = toInclusive - step;
if (step > 0) {
return new AbstractIterator<Long>() {
long i = from - step;
@Override
public boolean hasNext() {
return i <= end;
}
@Override
public Long getNext() {
return i += step;
}
};
} else {
return new AbstractIterator<Long>() {
long i = from - step;
@Override
public boolean hasNext() {
return i >= end;
}
@Override
public Long getNext() {
return i += step;
}
};
}
}
}
/**
* Returns an infinite iterator of int values starting from {@code value}.
* <p>
* The {@code Iterator} extends to {@code Integer.MIN_VALUE} when passing {@code Integer.MAX_VALUE}.
*
* @param value a start int value
* @return a new {@code Iterator} of int values starting from {@code from}
*/
static Iterator<Integer> from(int value) {
return new AbstractIterator<Integer>() {
private int next = value;
@Override
public boolean hasNext() {
return true;
}
@Override
public Integer getNext() {
return next++;
}
};
}
/**
* Returns an infinite iterator of int values starting from {@code value} and spaced by {@code step}.
* <p>
* The {@code Iterator} extends to {@code Integer.MIN_VALUE} when passing {@code Integer.MAX_VALUE}.
*
* @param value a start int value
* @param step the step by which to advance on each iteration
* @return a new {@code Iterator} of int values starting from {@code from}
*/
static Iterator<Integer> from(int value, int step) {
return new AbstractIterator<Integer>() {
private int next = value;
@Override
public boolean hasNext() {
return true;
}
@Override
public Integer getNext() {
final int result = next;
next += step;
return result;
}
};
}
/**
* Returns an infinite iterator of long values starting from {@code value}.
* <p>
* The {@code Iterator} extends to {@code Long.MIN_VALUE} when passing {@code Long.MAX_VALUE}.
*
* @param value a start long value
* @return a new {@code Iterator} of long values starting from {@code from}
*/
static Iterator<Long> from(long value) {
return new AbstractIterator<Long>() {
private long next = value;
@Override
public boolean hasNext() {
return true;
}
@Override
public Long getNext() {
return next++;
}
};
}
/**
* Returns an infinite iterator of long values starting from {@code value} and spaced by {@code step}.
* <p>
* The {@code Iterator} extends to {@code Long.MIN_VALUE} when passing {@code Long.MAX_VALUE}.
*
* @param value a start long value
* @param step the step by which to advance on each iteration
* @return a new {@code Iterator} of long values starting from {@code from}
*/
static Iterator<Long> from(long value, long step) {
return new AbstractIterator<Long>() {
private long next = value;
@Override
public boolean hasNext() {
return true;
}
@Override
public Long getNext() {
final long result = next;
next += step;
return result;
}
};
}
/**
* Generates an infinite iterator using a value Supplier.
*
* @param supplier A Supplier of iterator values
* @param <T> value type
* @return A new {@code Iterator}
*/
static <T> Iterator<T> continually(Supplier<? extends T> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return new AbstractIterator<T>() {
@Override
public boolean hasNext() {
return true;
}
@Override
public T getNext() {
return supplier.get();
}
};
}
/**
* Creates an iterator that repeatedly invokes the supplier
* while it's a {@code Some} and end on the first {@code None}
*
* @param supplier A Supplier of iterator values
* @param <T> value type
* @return A new {@code Iterator}
* @throws NullPointerException if supplier produces null value
*/
static <T> Iterator<T> iterate(Supplier<? extends Option<? extends T>> supplier) {
Objects.requireNonNull(supplier, "supplier is null");
return new AbstractIterator<T>() {
Option<? extends T> nextOption;
@Override
public boolean hasNext() {
if (nextOption == null) {
nextOption = supplier.get();
}
return nextOption.isDefined();
}
@Override
public T getNext() {
final T next = nextOption.get();
nextOption = null;
return next;
}
};
}
/**
* Generates an infinite iterator using a function to calculate the next value
* based on the previous.
*
* @param seed The first value in the iterator
* @param f A function to calculate the next value based on the previous
* @param <T> value type
* @return A new {@code Iterator}
*/
static <T> Iterator<T> iterate(T seed, Function<? super T, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return new AbstractIterator<T>() {
Function<? super T, ? extends T> nextFunc = s -> {
nextFunc = f;
return seed;
};
T current = null;
@Override
public boolean hasNext() {
return true;
}
@Override
public T getNext() {
current = nextFunc.apply(current);
return current;
}
};
}
/**
* Creates an infinite iterator returning the given element.
*
* @param t An element
* @param <T> Element type
* @return A new Iterator containing infinite {@code t}'s.
*/
static <T> Iterator<T> continually(T t) {
return new AbstractIterator<T>() {
@Override
public boolean hasNext() {
return true;
}
@Override
public T getNext() {
return t;
}
};
}
// -- Additional methods of Iterator
@Override
default <R> Iterator<R> collect(PartialFunction<? super T, ? extends R> partialFunction) {
Objects.requireNonNull(partialFunction, "partialFunction is null");
return filter(partialFunction::isDefinedAt).map(partialFunction::apply);
}
// DEV-NOTE: cannot use arg Iterable, it would be ambiguous
default Iterator<T> concat(java.util.Iterator<? extends T> that) {
Objects.requireNonNull(that, "that is null");
if (!that.hasNext()) {
return this;
} else if (!hasNext()) {
return ofAll(that);
} else {
return concat(this, ofAll(that));
}
}
/**
* Inserts an element between all elements of this Iterator.
*
* @param element An element.
* @return an interspersed version of this
*/
default Iterator<T> intersperse(T element) {
if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
boolean insertElement = false;
@Override
public boolean hasNext() {
return that.hasNext();
}
@Override
public T getNext() {
if (insertElement) {
insertElement = false;
return element;
} else {
insertElement = true;
return that.next();
}
}
};
}
}
/**
* Transforms this {@code Iterator}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
default <U> U transform(Function<? super Iterator<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
default <U> Iterator<Tuple2<T, U>> zip(Iterable<? extends U> that) {
return zipWith(that, Tuple::of);
}
@Override
default <U, R> Iterator<R> zipWith(Iterable<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper) {
Objects.requireNonNull(that, "that is null");
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return empty();
} else {
final Iterator<T> it1 = this;
final java.util.Iterator<? extends U> it2 = that.iterator();
return new AbstractIterator<R>() {
@Override
public boolean hasNext() {
return it1.hasNext() && it2.hasNext();
}
@Override
public R getNext() {
return mapper.apply(it1.next(), it2.next());
}
};
}
}
@Override
default <U> Iterator<Tuple2<T, U>> zipAll(Iterable<? extends U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
final java.util.Iterator<? extends U> thatIt = that.iterator();
if (isEmpty() && !thatIt.hasNext()) {
return empty();
} else {
final Iterator<T> thisIt = this;
return new AbstractIterator<Tuple2<T, U>>() {
@Override
public boolean hasNext() {
return thisIt.hasNext() || thatIt.hasNext();
}
@Override
public Tuple2<T, U> getNext() {
final T v1 = thisIt.hasNext() ? thisIt.next() : thisElem;
final U v2 = thatIt.hasNext() ? thatIt.next() : thatElem;
return Tuple.of(v1, v2);
}
};
}
}
@Override
default Iterator<Tuple2<T, Integer>> zipWithIndex() {
return zipWithIndex(Tuple::of);
}
@Override
default <U> Iterator<U> zipWithIndex(BiFunction<? super T, ? super Integer, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return empty();
} else {
final Iterator<T> it1 = this;
return new AbstractIterator<U>() {
private int index = 0;
@Override
public boolean hasNext() {
return it1.hasNext();
}
@Override
public U getNext() {
return mapper.apply(it1.next(), index++);
}
};
}
}
@Override
default <T1, T2> Tuple2<Iterator<T1>, Iterator<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
if (!hasNext()) {
return Tuple.of(empty(), empty());
} else {
final Stream<Tuple2<? extends T1, ? extends T2>> source = Stream.ofAll(this.map(unzipper));
return Tuple.of(source.map(t -> (T1) t._1).iterator(), source.map(t -> (T2) t._2).iterator());
}
}
@Override
default <T1, T2, T3> Tuple3<Iterator<T1>, Iterator<T2>, Iterator<T3>> unzip3(
Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
if (!hasNext()) {
return Tuple.of(empty(), empty(), empty());
} else {
final Stream<Tuple3<? extends T1, ? extends T2, ? extends T3>> source = Stream.ofAll(this.map(unzipper));
return Tuple.of(source.map(t -> (T1) t._1).iterator(), source.map(t -> (T2) t._2).iterator(), source.map(t -> (T3) t._3).iterator());
}
}
/**
* Creates an iterator from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating elements, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting iterator and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* Iterator.unfold(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x-1, x)));
* // List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
* </code>
* </pre>
*
* @param <T> type of seeds and unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a list with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
static <T> Iterator<T> unfold(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends T>>> f) {
return unfoldLeft(seed, f);
}
/**
* Creates an iterator from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating elements, otherwise {@code Some} {@code Tuple}
* of the value to add to the resulting iterator and
* the element for the next call.
* <p>
* Example:
* <pre>
* <code>
* Iterator.unfoldLeft(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x-1, x)));
* // List(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
* </code>
* </pre>
*
* @param <T> type of seeds
* @param <U> type of unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a list with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
static <T, U> Iterator<U> unfoldLeft(T seed, Function<? super T, Option<Tuple2<? extends T, ? extends U>>> f) {
Objects.requireNonNull(f, "f is null");
return Stream.<U> ofAll(
unfoldRight(seed, f.andThen(tupleOpt -> tupleOpt.map(t -> Tuple.of(t._2, t._1)))))
.reverse().iterator();
}
/**
* Creates an iterator from a seed value and a function.
* The function takes the seed at first.
* The function should return {@code None} when it's
* done generating elements, otherwise {@code Some} {@code Tuple}
* of the element for the next call and the value to add to the
* resulting iterator.
* <p>
* Example:
* <pre>
* <code>
* Iterator.unfoldRight(10, x -> x == 0
* ? Option.none()
* : Option.of(new Tuple2<>(x, x-1)));
* // List(10, 9, 8, 7, 6, 5, 4, 3, 2, 1))
* </code>
* </pre>
*
* @param <T> type of seeds
* @param <U> type of unfolded values
* @param seed the start value for the iteration
* @param f the function to get the next step of the iteration
* @return a list with the values built up by the iteration
* @throws NullPointerException if {@code f} is null
*/
static <T, U> Iterator<U> unfoldRight(T seed, Function<? super T, Option<Tuple2<? extends U, ? extends T>>> f) {
Objects.requireNonNull(f, "the unfold iterating function is null");
return new AbstractIterator<U>() {
private Option<Tuple2<? extends U, ? extends T>> nextVal = f.apply(seed);
@Override
public boolean hasNext() {
return nextVal.isDefined();
}
@Override
public U getNext() {
final U result = nextVal.get()._1;
nextVal = f.apply(nextVal.get()._2);
return result;
}
};
}
// -- Overridden methods of Traversable
@Override
default Iterator<T> distinct() {
if (!hasNext()) {
return empty();
} else {
return new DistinctIterator<>(this, io.vavr.collection.HashSet.empty(), Function.identity());
}
}
@Override
default Iterator<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
if (!hasNext()) {
return empty();
} else {
return new DistinctIterator<>(this, TreeSet.empty(comparator), Function.identity());
}
}
@Override
default <U> Iterator<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
if (!hasNext()) {
return empty();
} else {
return new DistinctIterator<>(this, io.vavr.collection.HashSet.empty(), keyExtractor);
}
}
/**
* Removes up to n elements from this iterator.
*
* @param n A number
* @return The empty iterator, if {@code n <= 0} or this is empty, otherwise a new iterator without the first n elements.
*/
@Override
default Iterator<T> drop(int n) {
if (n <= 0) {
return this;
} else if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
long count = n;
@Override
public boolean hasNext() {
while (count > 0 && that.hasNext()) {
that.next(); // discarded
count--;
}
return that.hasNext();
}
@Override
public T getNext() {
return that.next();
}
};
}
}
@Override
default Iterator<T> dropRight(int n) {
if (n <= 0) {
return this;
} else if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
private io.vavr.collection.Queue<T> queue = io.vavr.collection.Queue.empty();
@Override
public boolean hasNext() {
while (queue.length() < n && that.hasNext()) {
queue = queue.append(that.next());
}
return queue.length() == n && that.hasNext();
}
@Override
public T getNext() {
final Tuple2<T, io.vavr.collection.Queue<T>> t = queue.append(that.next()).dequeue();
queue = t._2;
return t._1;
}
};
}
}
@Override
default Iterator<T> dropUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
default Iterator<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (!hasNext()) {
return empty();
} else {
final CachedIterator<T> that = new CachedIterator<>(this);
while (that.hasNext() && predicate.test(that.touch())) {
that.next();
}
return that;
}
}
/**
* Returns an Iterator that contains elements that satisfy the given {@code predicate}.
*
* @param predicate A predicate
* @return A new Iterator
*/
@Override
default Iterator<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
Option<T> next = Option.none();
@Override
public boolean hasNext() {
while (next.isEmpty() && that.hasNext()) {
final T candidate = that.next();
if (predicate.test(candidate)) {
next = Option.some(candidate);
}
}
return next.isDefined();
}
@Override
public T getNext() {
final T result = next.get();
next = Option.none();
return result;
}
};
}
}
@Override
default Iterator<T> reject(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return filter(predicate.negate());
}
@Override
default Option<T> findLast(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
T last = null;
while (hasNext()) {
final T elem = next();
if (predicate.test(elem)) {
last = elem;
}
}
return Option.of(last);
}
/**
* FlatMaps the elements of this Iterator to Iterables, which are iterated in the order of occurrence.
*
* @param mapper A mapper
* @param <U> Component type
* @return A new Iterable
*/
@Override
default <U> Iterator<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<U>() {
final Iterator<? extends T> inputs = that;
java.util.Iterator<? extends U> current = java.util.Collections.emptyIterator();
@Override
public boolean hasNext() {
boolean currentHasNext;
while (!(currentHasNext = current.hasNext()) && inputs.hasNext()) {
current = mapper.apply(inputs.next()).iterator();
}
return currentHasNext;
}
@Override
public U getNext() {
return current.next();
}
};
}
}
@Override
default <U> U foldRight(U zero, BiFunction<? super T, ? super U, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return Stream.ofAll(this).foldRight(zero, f);
}
@Override
default T get() {
return head();
}
@Override
default <C> Map<C, Iterator<T>> groupBy(Function<? super T, ? extends C> classifier) {
return io.vavr.collection.Collections.groupBy(this, classifier, Iterator::ofAll);
}
@Override
default Iterator<Seq<T>> grouped(int size) {
return new GroupedIterator<>(this, size, size);
}
@Override
default boolean hasDefiniteSize() {
return false;
}
@Override
default T head() {
if (!hasNext()) {
throw new NoSuchElementException("head() on empty iterator");
}
return next();
}
@Override
default Iterator<T> init() {
if (!hasNext()) {
throw new UnsupportedOperationException();
} else {
return dropRight(1);
}
}
@Override
default Option<Iterator<T>> initOption() {
return hasNext() ? Option.some(init()) : Option.none();
}
/**
* An {@code Iterator} is computed synchronously.
*
* @return false
*/
@Override
default boolean isAsync() {
return false;
}
@Override
default boolean isEmpty() {
return !hasNext();
}
/**
* An {@code Iterator} is computed lazily.
*
* @return true
*/
@Override
default boolean isLazy() {
return true;
}
@Override
default boolean isTraversableAgain() {
return false;
}
@Override
default boolean isSequential() {
return true;
}
@Override
default Iterator<T> iterator() {
return this;
}
@Override
default T last() {
return Collections.last(this);
}
@Override
default int length() {
return foldLeft(0, (n, ignored) -> n + 1);
}
/**
* Maps the elements of this Iterator lazily using the given {@code mapper}.
*
* @param mapper A mapper.
* @param <U> Component type
* @return A new Iterator
*/
@Override
default <U> Iterator<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<U>() {
@Override
public boolean hasNext() {
return that.hasNext();
}
@Override
public U getNext() {
return mapper.apply(that.next());
}
};
}
}
@Override
default Iterator<T> orElse(Iterable<? extends T> other) {
return isEmpty() ? ofAll(other) : this;
}
@Override
default Iterator<T> orElse(Supplier<? extends Iterable<? extends T>> supplier) {
return isEmpty() ? ofAll(supplier.get()) : this;
}
@Override
default Tuple2<Iterator<T>, Iterator<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (!hasNext()) {
return Tuple.of(empty(), empty());
} else {
final Stream<T> that = Stream.ofAll(this);
final Iterator<T> first = that.iterator().filter(predicate);
final Iterator<T> second = that.iterator().filter(predicate.negate());
return Tuple.of(first, second);
}
}
@Override
default Iterator<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
@Override
public boolean hasNext() {
return that.hasNext();
}
@Override
public T getNext() {
final T next = that.next();
action.accept(next);
return next;
}
};
}
}
@Override
default T reduceLeft(BiFunction<? super T, ? super T, ? extends T> op) {
Objects.requireNonNull(op, "op is null");
if (isEmpty()) {
throw new NoSuchElementException("reduceLeft on Nil");
} else {
T xs = next();
while (hasNext()) {
xs = op.apply(xs, next());
}
return xs;
}
}
@Override
default T reduceRight(BiFunction<? super T, ? super T, ? extends T> op) {
Objects.requireNonNull(op, "op is null");
if (isEmpty()) {
throw new NoSuchElementException("reduceRight on Nil");
} else {
final Stream<T> reversed = Stream.ofAll(this).reverse();
return reversed.reduceLeft((xs, x) -> op.apply(x, xs));
}
}
@Override
default Iterator<T> replace(T currentElement, T newElement) {
if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
boolean isFirst = true;
@Override
public boolean hasNext() {
return that.hasNext();
}
@Override
public T getNext() {
final T elem = that.next();
if (isFirst && Objects.equals(currentElement, elem)) {
isFirst = false;
return newElement;
} else {
return elem;
}
}
};
}
}
@Override
default Iterator<T> replaceAll(T currentElement, T newElement) {
if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
@Override
public boolean hasNext() {
return that.hasNext();
}
@Override
public T getNext() {
final T elem = that.next();
if (Objects.equals(currentElement, elem)) {
return newElement;
} else {
return elem;
}
}
};
}
}
@Override
default Iterator<T> retainAll(Iterable<? extends T> elements) {
return io.vavr.collection.Collections.retainAll(this, elements);
}
@Override
default Traversable<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
return scanLeft(zero, operation);
}
@Override
default <U> Iterator<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
if (isEmpty()) {
return of(zero);
} else {
final Iterator<T> that = this;
return new AbstractIterator<U>() {
boolean isFirst = true;
U acc = zero;
@Override
public boolean hasNext() {
return isFirst || that.hasNext();
}
@Override
public U getNext() {
if (isFirst) {
isFirst = false;
return acc;
} else {
acc = operation.apply(acc, that.next());
return acc;
}
}
};
}
}
// not lazy!
@Override
default <U> Iterator<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
if (isEmpty()) {
return of(zero);
} else {
return io.vavr.collection.Collections.scanRight(this, zero, operation, Function.identity());
}
}
@Override
default Iterator<Seq<T>> slideBy(Function<? super T, ?> classifier) {
Objects.requireNonNull(classifier, "classifier is null");
if (!hasNext()) {
return empty();
} else {
final CachedIterator<T> source = new CachedIterator<>(this);
return new AbstractIterator<Seq<T>>() {
private Stream<T> next = null;
@Override
public boolean hasNext() {
if (next == null && source.hasNext()) {
final Object key = classifier.apply(source.touch());
final java.util.List<T> acc = new ArrayList<>();
while (source.hasNext() && key.equals(classifier.apply(source.touch()))) {
acc.add(source.getNext());
}
next = Stream.ofAll(acc);
}
return next != null;
}
@Override
public Stream<T> getNext() {
final Stream<T> result = next;
next = null;
return result;
}
};
}
}
@Override
default Iterator<Seq<T>> sliding(int size) {
return sliding(size, 1);
}
@Override
default Iterator<Seq<T>> sliding(int size, int step) {
return new GroupedIterator<>(this, size, step);
}
@Override
default Tuple2<Iterator<T>, Iterator<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (!hasNext()) {
return Tuple.of(empty(), empty());
} else {
final Stream<T> that = Stream.ofAll(this);
return Tuple.of(that.iterator().takeWhile(predicate), that.iterator().dropWhile(predicate));
}
}
@Override
default String stringPrefix() {
return "Iterator";
}
@Override
default Iterator<T> tail() {
if (!hasNext()) {
throw new UnsupportedOperationException();
} else {
next(); // remove first element
return this;
}
}
@Override
default Option<Iterator<T>> tailOption() {
if (hasNext()) {
next();
return Option.some(this);
} else {
return Option.none();
}
}
/**
* Take the first n elements from this iterator.
*
* @param n A number
* @return The empty iterator, if {@code n <= 0} or this is empty, otherwise a new iterator without the first n elements.
*/
@Override
default Iterator<T> take(int n) {
if (n <= 0 || !hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
long count = n;
@Override
public boolean hasNext() {
return count > 0 && that.hasNext();
}
@Override
public T getNext() {
count--;
return that.next();
}
};
}
}
@Override
default Iterator<T> takeRight(int n) {
if (n <= 0) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
private io.vavr.collection.Queue<T> queue = io.vavr.collection.Queue.empty();
@Override
public boolean hasNext() {
while (that.hasNext()) {
queue = queue.enqueue(that.next());
if (queue.length() > n) {
queue = queue.dequeue()._2;
}
}
return queue.length() > 0;
}
@Override
public T getNext() {
final Tuple2<T, io.vavr.collection.Queue<T>> t = queue.dequeue();
queue = t._2;
return t._1;
}
};
}
}
@Override
default Iterator<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeWhile(predicate.negate());
}
@Override
default Iterator<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (!hasNext()) {
return empty();
} else {
final Iterator<T> that = this;
return new AbstractIterator<T>() {
private T next;
private boolean cached = false;
private boolean finished = false;
@Override
public boolean hasNext() {
if (cached) {
return true;
} else if (finished) {
return false;
} else if (that.hasNext()) {
next = that.next();
if (predicate.test(next)) {
cached = true;
return true;
}
}
finished = true;
return false;
}
@Override
public T getNext() {
cached = false;
return next;
}
};
}
}
}
interface IteratorModule {
// inspired by Scala's ConcatIterator
final class ConcatIterator<T> extends AbstractIterator<T> {
private static class Cell<T> {
Iterator<T> it;
Cell<T> next;
static <T> Cell<T> of(Iterator<T> it) {
Cell<T> cell = new Cell<>();
cell.it = it;
return cell;
}
Cell<T> append(Iterator<T> it) {
Cell<T> cell = of(it);
next = cell;
return cell;
}
}
private Iterator<T> curr;
private Cell<T> tail;
private Cell<T> last;
private boolean hasNextCalculated;
void append(java.util.Iterator<? extends T> that) {
final Iterator<T> it = Iterator.ofAll(that);
if (tail == null) {
tail = last = Cell.of(it);
} else {
last = last.append(it);
}
}
@Override
public Iterator<T> concat(java.util.Iterator<? extends T> that) {
append(that);
return this;
}
@Override
public boolean hasNext() {
if (hasNextCalculated) {
return curr != null;
}
hasNextCalculated = true;
while(true) {
if (curr != null) {
if (curr.hasNext()) {
return true;
} else {
curr = null;
}
}
if (tail == null) {
return false;
}
curr = tail.it;
tail = tail.next;
while (curr instanceof ConcatIterator) {
ConcatIterator<T> it = (ConcatIterator<T>) curr;
curr = it.curr;
it.last.next = tail;
tail = it.tail;
}
}
}
@Override
public T getNext() {
hasNextCalculated = false;
return curr.next();
}
}
final class DistinctIterator<T, U> extends AbstractIterator<T> {
private final Iterator<? extends T> that;
private io.vavr.collection.Set<U> known;
private final Function<? super T, ? extends U> keyExtractor;
private T next = null;
DistinctIterator(Iterator<? extends T> that, Set<U> set, Function<? super T, ? extends U> keyExtractor) {
this.that = that;
this.known = set;
this.keyExtractor = keyExtractor;
}
@Override
public boolean hasNext() {
while (next == null && that.hasNext()) {
final T elem = that.next();
final U key = keyExtractor.apply(elem);
if (!known.contains(key)) {
known = known.add(key);
next = elem;
}
}
return next != null;
}
@Override
public T getNext() {
final T result = next;
next = null;
return result;
}
}
final class EmptyIterator implements Iterator<Object> {
static final EmptyIterator INSTANCE = new EmptyIterator();
@Override
public boolean hasNext() { return false; }
@Override
public Object next() { throw new NoSuchElementException(stringPrefix() + ".next()"); }
@Override
public String stringPrefix() {
return "EmptyIterator";
}
@Override
public String toString() {
return stringPrefix() + "()";
}
}
final class GroupedIterator<T> implements Iterator<Seq<T>> {
private final Iterator<T> that;
private final int size;
private final int step;
private final int gap;
private final int preserve;
private Object[] buffer;
GroupedIterator(Iterator<T> that, int size, int step) {
if (size < 1 || step < 1) {
throw new IllegalArgumentException("size (" + size + ") and step (" + step + ") must both be positive");
}
this.that = that;
this.size = size;
this.step = step;
this.gap = Math.max(step - size, 0);
this.preserve = Math.max(size - step, 0);
this.buffer = take(that, new Object[size], 0, size);
}
@Override
public boolean hasNext() {
return buffer.length > 0;
}
@Override
public Seq<T> next() {
if (buffer.length == 0) {
throw new NoSuchElementException();
}
final Object[] result = buffer;
if (that.hasNext()) {
buffer = new Object[size];
if (preserve > 0) {
System.arraycopy(result, step, buffer, 0, preserve);
}
if (gap > 0) {
drop(that, gap);
buffer = take(that, buffer, preserve, size);
} else {
buffer = take(that, buffer, preserve, step);
}
} else {
buffer = new Object[0];
}
return Array.wrap(result);
}
private static void drop(Iterator<?> source, int count) {
for (int i = 0; i < count && source.hasNext(); i++) {
source.next();
}
}
private static Object[] take(Iterator<?> source, Object[] target, int offset, int count) {
int i = offset;
while (i < count + offset && source.hasNext()) {
target[i] = source.next();
i++;
}
if (i < target.length) {
final Object[] result = new Object[i];
System.arraycopy(target, 0, result, 0, i);
return result;
} else {
return target;
}
}
}
final class CachedIterator<T> extends AbstractIterator<T> {
private final Iterator<T> that;
private T next;
private boolean cached = false;
CachedIterator(Iterator<T> that) {
this.that = that;
}
@Override
public boolean hasNext() {
return cached || that.hasNext();
}
@Override
public T getNext() {
if (cached) {
T result = next;
next = null;
cached = false;
return result;
} else {
return that.next();
}
}
T touch() {
next = next();
cached = true;
return next;
}
}
final class BigDecimalHelper {
@GwtIncompatible("Math::nextDown is not implemented")
private static final Lazy<BigDecimal> INFINITY_DISTANCE = Lazy.of(() -> {
final BigDecimal two = BigDecimal.valueOf(2);
final BigDecimal supremum = BigDecimal.valueOf(Math.nextDown(Double.POSITIVE_INFINITY));
BigDecimal lowerBound = supremum;
BigDecimal upperBound = two.pow(Double.MAX_EXPONENT + 1);
while (true) {
final BigDecimal magicValue = lowerBound.add(upperBound).divide(two, HALF_UP);
if (Double.isInfinite(magicValue.doubleValue())) {
if (areEqual(magicValue, upperBound)) {
return magicValue.subtract(supremum);
}
upperBound = magicValue;
} else {
lowerBound = magicValue;
}
}
});
/* scale-independent equality */
static boolean areEqual(BigDecimal from, BigDecimal toExclusive) {
return from.compareTo(toExclusive) == 0;
}
/* parse infinite values also */
@GwtIncompatible("Math::nextUp is not implemented")
static BigDecimal asDecimal(double number) {
if (number == NEGATIVE_INFINITY) {
final BigDecimal result = BigDecimal.valueOf(Math.nextUp(NEGATIVE_INFINITY));
return result.subtract(INFINITY_DISTANCE.get());
} else if (number == POSITIVE_INFINITY) {
final BigDecimal result = BigDecimal.valueOf(Math.nextDown(POSITIVE_INFINITY));
return result.add(INFINITY_DISTANCE.get());
} else {
return BigDecimal.valueOf(number);
}
}
}
}
| 1 | 12,927 | An iterator **of {\@code n} sequential elements,** where each element ~are~ **is the** given {\@code element}. | vavr-io-vavr | java |
@@ -3,11 +3,13 @@
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
+using Datadog.Trace.Configuration;
+
namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AdoNet.MySql
{
internal static class MySqlConstants
{
- public const string SqlCommandIntegrationName = "MySqlCommand";
+ public const string SqlCommandIntegrationName = nameof(IntegrationIds.AdoNet);
internal struct MySqlDataClientData : IAdoNetClientData
{ | 1 | // <copyright file="MySqlConstants.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AdoNet.MySql
{
internal static class MySqlConstants
{
public const string SqlCommandIntegrationName = "MySqlCommand";
internal struct MySqlDataClientData : IAdoNetClientData
{
public string IntegrationName => SqlCommandIntegrationName;
public string AssemblyName => "MySql.Data";
public string SqlCommandType => "MySql.Data.MySqlClient.MySqlCommand";
public string MinimumVersion => "6.7.0";
public string MaximumVersion => "6.*.*";
public string DataReaderType => "MySql.Data.MySqlClient.MySqlDataReader";
public string DataReaderTaskType => "System.Threading.Tasks.Task`1<MySql.Data.MySqlClient.MySqlDataReader>";
}
internal struct MySqlData8ClientData : IAdoNetClientData
{
public string IntegrationName => SqlCommandIntegrationName;
public string AssemblyName => "MySql.Data";
public string SqlCommandType => "MySql.Data.MySqlClient.MySqlCommand";
public string MinimumVersion => "8.0.0";
public string MaximumVersion => "8.*.*";
public string DataReaderType => "MySql.Data.MySqlClient.MySqlDataReader";
public string DataReaderTaskType => "System.Threading.Tasks.Task`1<MySql.Data.MySqlClient.MySqlDataReader>";
}
internal struct MySqlConnectorClientData : IAdoNetClientData
{
public string IntegrationName => SqlCommandIntegrationName;
public string AssemblyName => "MySqlConnector";
public string SqlCommandType => "MySqlConnector.MySqlCommand";
public string MinimumVersion => "1.0.0";
public string MaximumVersion => "1.*.*";
public string DataReaderType => "MySqlConnector.MySqlDataReader";
public string DataReaderTaskType => "System.Threading.Tasks.Task`1<MySqlConnector.MySqlDataReader>";
}
}
}
| 1 | 22,240 | If all of the `SqlCommandIntegrationName` values are the same, can we just remove it from `IAdoNetClientData` entirely and put the constant there? | DataDog-dd-trace-dotnet | .cs |
@@ -17,6 +17,8 @@ limitations under the License.
package venafi
import (
+ "github.com/go-logr/logr"
+ logf "github.com/jetstack/cert-manager/pkg/logs"
corelisters "k8s.io/client-go/listers/core/v1"
apiutil "github.com/jetstack/cert-manager/pkg/api/util" | 1 | /*
Copyright 2018 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package venafi
import (
corelisters "k8s.io/client-go/listers/core/v1"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
cmapi "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha2"
"github.com/jetstack/cert-manager/pkg/controller"
"github.com/jetstack/cert-manager/pkg/issuer"
"github.com/jetstack/cert-manager/pkg/issuer/venafi/client"
)
// Venafi is a implementation of govcert library to manager certificates from TPP or Venafi Cloud
type Venafi struct {
issuer cmapi.GenericIssuer
*controller.Context
secretsLister corelisters.SecretLister
// Namespace in which to read resources related to this Issuer from.
// For Issuers, this will be the namespace of the Issuer.
// For ClusterIssuers, this will be the cluster resource namespace.
resourceNamespace string
clientBuilder client.VenafiClientBuilder
}
func NewVenafi(ctx *controller.Context, issuer cmapi.GenericIssuer) (issuer.Interface, error) {
return &Venafi{
issuer: issuer,
secretsLister: ctx.KubeSharedInformerFactory.Core().V1().Secrets().Lister(),
resourceNamespace: ctx.IssuerOptions.ResourceNamespace(issuer),
clientBuilder: client.New,
Context: ctx,
}, nil
}
func init() {
issuer.RegisterIssuer(apiutil.IssuerVenafi, NewVenafi)
}
| 1 | 22,893 | Maybe regroup this import with the other cert-manager packages. | jetstack-cert-manager | go |
@@ -167,6 +167,8 @@ public class UserPreferences {
int theme = getTheme();
if (theme == R.style.Theme_AntennaPod_Dark) {
return R.style.Theme_AntennaPod_Dark_NoTitle;
+ }else if (theme == R.style.Theme_AntennaPod_TrueBlack){
+ return R.style.Theme_AntennaPod_TrueBlack_NoTitle;
} else {
return R.style.Theme_AntennaPod_Light_NoTitle;
} | 1 | package de.danoeh.antennapod.core.preferences;
import android.app.AlarmManager;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.os.SystemClock;
import android.preference.PreferenceManager;
import android.support.annotation.IntRange;
import android.support.annotation.NonNull;
import android.support.v4.app.NotificationCompat;
import android.text.TextUtils;
import android.util.Log;
import org.json.JSONArray;
import org.json.JSONException;
import java.io.File;
import java.io.IOException;
import java.net.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.List;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.receiver.FeedUpdateReceiver;
import de.danoeh.antennapod.core.service.download.ProxyConfig;
import de.danoeh.antennapod.core.storage.APCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APNullCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APQueueCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.EpisodeCleanupAlgorithm;
import de.danoeh.antennapod.core.util.Converter;
/**
* Provides access to preferences set by the user in the settings screen. A
* private instance of this class must first be instantiated via
* init() or otherwise every public method will throw an Exception
* when called.
*/
public class UserPreferences {
private static final String IMPORT_DIR = "import/";
private static final String TAG = "UserPreferences";
// User Interface
public static final String PREF_THEME = "prefTheme";
public static final String PREF_HIDDEN_DRAWER_ITEMS = "prefHiddenDrawerItems";
private static final String PREF_DRAWER_FEED_ORDER = "prefDrawerFeedOrder";
private static final String PREF_DRAWER_FEED_COUNTER = "prefDrawerFeedIndicator";
private static final String PREF_EXPANDED_NOTIFICATION = "prefExpandNotify";
private static final String PREF_PERSISTENT_NOTIFICATION = "prefPersistNotify";
public static final String PREF_COMPACT_NOTIFICATION_BUTTONS = "prefCompactNotificationButtons";
public static final String PREF_LOCKSCREEN_BACKGROUND = "prefLockscreenBackground";
private static final String PREF_SHOW_DOWNLOAD_REPORT = "prefShowDownloadReport";
// Queue
private static final String PREF_QUEUE_ADD_TO_FRONT = "prefQueueAddToFront";
// Playback
public static final String PREF_PAUSE_ON_HEADSET_DISCONNECT = "prefPauseOnHeadsetDisconnect";
public static final String PREF_UNPAUSE_ON_HEADSET_RECONNECT = "prefUnpauseOnHeadsetReconnect";
private static final String PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT = "prefUnpauseOnBluetoothReconnect";
private static final String PREF_HARDWARE_FOWARD_BUTTON_SKIPS = "prefHardwareForwardButtonSkips";
private static final String PREF_HARDWARE_PREVIOUS_BUTTON_RESTARTS = "prefHardwarePreviousButtonRestarts";
public static final String PREF_FOLLOW_QUEUE = "prefFollowQueue";
private static final String PREF_SKIP_KEEPS_EPISODE = "prefSkipKeepsEpisode";
private static final String PREF_FAVORITE_KEEPS_EPISODE = "prefFavoriteKeepsEpisode";
private static final String PREF_AUTO_DELETE = "prefAutoDelete";
public static final String PREF_SMART_MARK_AS_PLAYED_SECS = "prefSmartMarkAsPlayedSecs";
private static final String PREF_PLAYBACK_SPEED_ARRAY = "prefPlaybackSpeedArray";
private static final String PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS = "prefPauseForFocusLoss";
private static final String PREF_RESUME_AFTER_CALL = "prefResumeAfterCall";
// Network
private static final String PREF_ENQUEUE_DOWNLOADED = "prefEnqueueDownloaded";
public static final String PREF_UPDATE_INTERVAL = "prefAutoUpdateIntervall";
private static final String PREF_MOBILE_UPDATE = "prefMobileUpdate";
public static final String PREF_EPISODE_CLEANUP = "prefEpisodeCleanup";
public static final String PREF_PARALLEL_DOWNLOADS = "prefParallelDownloads";
public static final String PREF_EPISODE_CACHE_SIZE = "prefEpisodeCacheSize";
public static final String PREF_ENABLE_AUTODL = "prefEnableAutoDl";
public static final String PREF_ENABLE_AUTODL_ON_BATTERY = "prefEnableAutoDownloadOnBattery";
public static final String PREF_ENABLE_AUTODL_WIFI_FILTER = "prefEnableAutoDownloadWifiFilter";
private static final String PREF_ENABLE_AUTODL_ON_MOBILE = "prefEnableAutoDownloadOnMobile";
private static final String PREF_AUTODL_SELECTED_NETWORKS = "prefAutodownloadSelectedNetworks";
private static final String PREF_PROXY_TYPE = "prefProxyType";
private static final String PREF_PROXY_HOST = "prefProxyHost";
private static final String PREF_PROXY_PORT = "prefProxyPort";
private static final String PREF_PROXY_USER = "prefProxyUser";
private static final String PREF_PROXY_PASSWORD = "prefProxyPassword";
// Services
private static final String PREF_AUTO_FLATTR = "pref_auto_flattr";
private static final String PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD = "prefAutoFlattrPlayedDurationThreshold";
private static final String PREF_GPODNET_NOTIFICATIONS = "pref_gpodnet_notifications";
// Other
private static final String PREF_DATA_FOLDER = "prefDataFolder";
public static final String PREF_IMAGE_CACHE_SIZE = "prefImageCacheSize";
// Mediaplayer
private static final String PREF_PLAYBACK_SPEED = "prefPlaybackSpeed";
private static final String PREF_FAST_FORWARD_SECS = "prefFastForwardSecs";
private static final String PREF_REWIND_SECS = "prefRewindSecs";
private static final String PREF_QUEUE_LOCKED = "prefQueueLocked";
private static final String IMAGE_CACHE_DEFAULT_VALUE = "100";
private static final int IMAGE_CACHE_SIZE_MINIMUM = 20;
private static final String PREF_LEFT_VOLUME = "prefLeftVolume";
private static final String PREF_RIGHT_VOLUME = "prefRightVolume";
// Experimental
public static final String PREF_SONIC = "prefSonic";
private static final String PREF_STEREO_TO_MONO = "PrefStereoToMono";
public static final String PREF_NORMALIZER = "prefNormalizer";
public static final String PREF_CAST_ENABLED = "prefCast"; //Used for enabling Chromecast support
public static final int EPISODE_CLEANUP_QUEUE = -1;
public static final int EPISODE_CLEANUP_NULL = -2;
public static final int EPISODE_CLEANUP_DEFAULT = 0;
// Constants
private static final int NOTIFICATION_BUTTON_REWIND = 0;
private static final int NOTIFICATION_BUTTON_FAST_FORWARD = 1;
private static final int NOTIFICATION_BUTTON_SKIP = 2;
private static final int EPISODE_CACHE_SIZE_UNLIMITED = -1;
public static final int FEED_ORDER_COUNTER = 0;
public static final int FEED_ORDER_ALPHABETICAL = 1;
public static final int FEED_ORDER_LAST_UPDATE = 2;
public static final int FEED_ORDER_MOST_PLAYED = 3;
public static final int FEED_COUNTER_SHOW_NEW_UNPLAYED_SUM = 0;
public static final int FEED_COUNTER_SHOW_NEW = 1;
public static final int FEED_COUNTER_SHOW_UNPLAYED = 2;
public static final int FEED_COUNTER_SHOW_NONE = 3;
public static final int FEED_COUNTER_SHOW_DOWNLOADED = 4;
private static Context context;
private static SharedPreferences prefs;
/**
* Sets up the UserPreferences class.
*
* @throws IllegalArgumentException if context is null
*/
public static void init(@NonNull Context context) {
Log.d(TAG, "Creating new instance of UserPreferences");
UserPreferences.context = context.getApplicationContext();
UserPreferences.prefs = PreferenceManager.getDefaultSharedPreferences(context);
createImportDirectory();
createNoMediaFile();
}
/**
* Returns theme as R.style value
*
* @return R.style.Theme_AntennaPod_Light or R.style.Theme_AntennaPod_Dark
*/
public static int getTheme() {
return readThemeValue(prefs.getString(PREF_THEME, "0"));
}
public static int getNoTitleTheme() {
int theme = getTheme();
if (theme == R.style.Theme_AntennaPod_Dark) {
return R.style.Theme_AntennaPod_Dark_NoTitle;
} else {
return R.style.Theme_AntennaPod_Light_NoTitle;
}
}
public static List<String> getHiddenDrawerItems() {
String hiddenItems = prefs.getString(PREF_HIDDEN_DRAWER_ITEMS, "");
return new ArrayList<>(Arrays.asList(TextUtils.split(hiddenItems, ",")));
}
public static List<Integer> getCompactNotificationButtons() {
String[] buttons = TextUtils.split(
prefs.getString(PREF_COMPACT_NOTIFICATION_BUTTONS,
String.valueOf(NOTIFICATION_BUTTON_SKIP)),
",");
List<Integer> notificationButtons = new ArrayList<>();
for (String button : buttons) {
notificationButtons.add(Integer.parseInt(button));
}
return notificationButtons;
}
/**
* Helper function to return whether the specified button should be shown on compact
* notifications.
*
* @param buttonId Either NOTIFICATION_BUTTON_REWIND, NOTIFICATION_BUTTON_FAST_FORWARD or
* NOTIFICATION_BUTTON_SKIP.
* @return {@code true} if button should be shown, {@code false} otherwise
*/
private static boolean showButtonOnCompactNotification(int buttonId) {
return getCompactNotificationButtons().contains(buttonId);
}
public static boolean showRewindOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_REWIND);
}
public static boolean showFastForwardOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_FAST_FORWARD);
}
public static boolean showSkipOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_SKIP);
}
public static int getFeedOrder() {
String value = prefs.getString(PREF_DRAWER_FEED_ORDER, "0");
return Integer.parseInt(value);
}
public static int getFeedCounterSetting() {
String value = prefs.getString(PREF_DRAWER_FEED_COUNTER, "0");
return Integer.parseInt(value);
}
/**
* Returns notification priority.
*
* @return NotificationCompat.PRIORITY_MAX or NotificationCompat.PRIORITY_DEFAULT
*/
public static int getNotifyPriority() {
if (prefs.getBoolean(PREF_EXPANDED_NOTIFICATION, false)) {
return NotificationCompat.PRIORITY_MAX;
} else {
return NotificationCompat.PRIORITY_DEFAULT;
}
}
/**
* Returns true if notifications are persistent
*
* @return {@code true} if notifications are persistent, {@code false} otherwise
*/
public static boolean isPersistNotify() {
return prefs.getBoolean(PREF_PERSISTENT_NOTIFICATION, true);
}
/**
* Returns true if the lockscreen background should be set to the current episode's image
*
* @return {@code true} if the lockscreen background should be set, {@code false} otherwise
*/
public static boolean setLockscreenBackground() {
return prefs.getBoolean(PREF_LOCKSCREEN_BACKGROUND, true);
}
/**
* Returns true if download reports are shown
*
* @return {@code true} if download reports are shown, {@code false} otherwise
*/
public static boolean showDownloadReport() {
return prefs.getBoolean(PREF_SHOW_DOWNLOAD_REPORT, true);
}
public static boolean enqueueDownloadedEpisodes() {
return prefs.getBoolean(PREF_ENQUEUE_DOWNLOADED, true);
}
public static boolean enqueueAtFront() {
return prefs.getBoolean(PREF_QUEUE_ADD_TO_FRONT, false);
}
public static boolean isPauseOnHeadsetDisconnect() {
return prefs.getBoolean(PREF_PAUSE_ON_HEADSET_DISCONNECT, true);
}
public static boolean isUnpauseOnHeadsetReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_HEADSET_RECONNECT, true);
}
public static boolean isUnpauseOnBluetoothReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT, false);
}
public static boolean shouldHardwareButtonSkip() {
return prefs.getBoolean(PREF_HARDWARE_FOWARD_BUTTON_SKIPS, false);
}
public static boolean shouldHardwarePreviousButtonRestart() {
return prefs.getBoolean(PREF_HARDWARE_PREVIOUS_BUTTON_RESTARTS, false);
}
public static boolean isFollowQueue() {
return prefs.getBoolean(PREF_FOLLOW_QUEUE, true);
}
public static boolean shouldSkipKeepEpisode() { return prefs.getBoolean(PREF_SKIP_KEEPS_EPISODE, true); }
public static boolean shouldFavoriteKeepEpisode() {
return prefs.getBoolean(PREF_FAVORITE_KEEPS_EPISODE, true);
}
public static boolean isAutoDelete() {
return prefs.getBoolean(PREF_AUTO_DELETE, false);
}
public static int getSmartMarkAsPlayedSecs() {
return Integer.parseInt(prefs.getString(PREF_SMART_MARK_AS_PLAYED_SECS, "30"));
}
public static boolean isAutoFlattr() {
return prefs.getBoolean(PREF_AUTO_FLATTR, false);
}
public static String getPlaybackSpeed() {
return prefs.getString(PREF_PLAYBACK_SPEED, "1.00");
}
public static String[] getPlaybackSpeedArray() {
return readPlaybackSpeedArray(prefs.getString(PREF_PLAYBACK_SPEED_ARRAY, null));
}
public static float getLeftVolume() {
int volume = prefs.getInt(PREF_LEFT_VOLUME, 100);
return Converter.getVolumeFromPercentage(volume);
}
public static float getRightVolume() {
int volume = prefs.getInt(PREF_RIGHT_VOLUME, 100);
return Converter.getVolumeFromPercentage(volume);
}
public static int getLeftVolumePercentage() {
return prefs.getInt(PREF_LEFT_VOLUME, 100);
}
public static int getRightVolumePercentage() {
return prefs.getInt(PREF_RIGHT_VOLUME, 100);
}
public static boolean shouldPauseForFocusLoss() {
return prefs.getBoolean(PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS, false);
}
/*
* Returns update interval in milliseconds; value 0 means that auto update is disabled
* or feeds are updated at a certain time of day
*/
public static long getUpdateInterval() {
String updateInterval = prefs.getString(PREF_UPDATE_INTERVAL, "0");
if(!updateInterval.contains(":")) {
return readUpdateInterval(updateInterval);
} else {
return 0;
}
}
public static int[] getUpdateTimeOfDay() {
String datetime = prefs.getString(PREF_UPDATE_INTERVAL, "");
if(datetime.length() >= 3 && datetime.contains(":")) {
String[] parts = datetime.split(":");
int hourOfDay = Integer.parseInt(parts[0]);
int minute = Integer.parseInt(parts[1]);
return new int[] { hourOfDay, minute };
} else {
return new int[0];
}
}
public static boolean isAllowMobileUpdate() {
return prefs.getBoolean(PREF_MOBILE_UPDATE, false);
}
public static int getParallelDownloads() {
return Integer.parseInt(prefs.getString(PREF_PARALLEL_DOWNLOADS, "4"));
}
public static int getEpisodeCacheSizeUnlimited() {
return context.getResources().getInteger(R.integer.episode_cache_size_unlimited);
}
/**
* Returns the capacity of the episode cache. This method will return the
* negative integer EPISODE_CACHE_SIZE_UNLIMITED if the cache size is set to
* 'unlimited'.
*/
public static int getEpisodeCacheSize() {
return readEpisodeCacheSizeInternal(prefs.getString(PREF_EPISODE_CACHE_SIZE, "20"));
}
public static boolean isEnableAutodownload() {
return prefs.getBoolean(PREF_ENABLE_AUTODL, false);
}
public static boolean isEnableAutodownloadOnBattery() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_BATTERY, true);
}
public static boolean isEnableAutodownloadWifiFilter() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_WIFI_FILTER, false);
}
public static boolean isEnableAutodownloadOnMobile() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_MOBILE, false);
}
public static int getImageCacheSize() {
String cacheSizeString = prefs.getString(PREF_IMAGE_CACHE_SIZE, IMAGE_CACHE_DEFAULT_VALUE);
int cacheSizeInt = Integer.parseInt(cacheSizeString);
// if the cache size is too small the user won't get any images at all
// that's bad, force it back to the default.
if (cacheSizeInt < IMAGE_CACHE_SIZE_MINIMUM) {
prefs.edit().putString(PREF_IMAGE_CACHE_SIZE, IMAGE_CACHE_DEFAULT_VALUE).apply();
cacheSizeInt = Integer.parseInt(IMAGE_CACHE_DEFAULT_VALUE);
}
int cacheSizeMB = cacheSizeInt * 1024 * 1024;
return cacheSizeMB;
}
public static int getFastForwardSecs() {
return prefs.getInt(PREF_FAST_FORWARD_SECS, 30);
}
public static int getRewindSecs() {
return prefs.getInt(PREF_REWIND_SECS, 30);
}
/**
* Returns the time after which an episode should be auto-flattr'd in percent of the episode's
* duration.
*/
public static float getAutoFlattrPlayedDurationThreshold() {
return prefs.getFloat(PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD, 0.8f);
}
public static String[] getAutodownloadSelectedNetworks() {
String selectedNetWorks = prefs.getString(PREF_AUTODL_SELECTED_NETWORKS, "");
return TextUtils.split(selectedNetWorks, ",");
}
public static void setProxyConfig(ProxyConfig config) {
SharedPreferences.Editor editor = prefs.edit();
editor.putString(PREF_PROXY_TYPE, config.type.name());
if(TextUtils.isEmpty(config.host)) {
editor.remove(PREF_PROXY_HOST);
} else {
editor.putString(PREF_PROXY_HOST, config.host);
}
if(config.port <= 0 || config.port > 65535) {
editor.remove(PREF_PROXY_PORT);
} else {
editor.putInt(PREF_PROXY_PORT, config.port);
}
if(TextUtils.isEmpty(config.username)) {
editor.remove(PREF_PROXY_USER);
} else {
editor.putString(PREF_PROXY_USER, config.username);
}
if(TextUtils.isEmpty(config.password)) {
editor.remove(PREF_PROXY_PASSWORD);
} else {
editor.putString(PREF_PROXY_PASSWORD, config.password);
}
editor.apply();
}
public static ProxyConfig getProxyConfig() {
Proxy.Type type = Proxy.Type.valueOf(prefs.getString(PREF_PROXY_TYPE, Proxy.Type.DIRECT.name()));
String host = prefs.getString(PREF_PROXY_HOST, null);
int port = prefs.getInt(PREF_PROXY_PORT, 0);
String username = prefs.getString(PREF_PROXY_USER, null);
String password = prefs.getString(PREF_PROXY_PASSWORD, null);
return new ProxyConfig(type, host, port, username, password);
}
public static boolean shouldResumeAfterCall() {
return prefs.getBoolean(PREF_RESUME_AFTER_CALL, true);
}
public static boolean isQueueLocked() {
return prefs.getBoolean(PREF_QUEUE_LOCKED, false);
}
public static void setFastForwardSecs(int secs) {
prefs.edit()
.putInt(PREF_FAST_FORWARD_SECS, secs)
.apply();
}
public static void setRewindSecs(int secs) {
prefs.edit()
.putInt(PREF_REWIND_SECS, secs)
.apply();
}
public static void setPlaybackSpeed(String speed) {
prefs.edit()
.putString(PREF_PLAYBACK_SPEED, speed)
.apply();
}
public static void setPlaybackSpeedArray(String[] speeds) {
JSONArray jsonArray = new JSONArray();
for (String speed : speeds) {
jsonArray.put(speed);
}
prefs.edit()
.putString(PREF_PLAYBACK_SPEED_ARRAY, jsonArray.toString())
.apply();
}
public static void setVolume(@IntRange(from = 0, to = 100) int leftVolume,
@IntRange(from = 0, to = 100) int rightVolume) {
prefs.edit()
.putInt(PREF_LEFT_VOLUME, leftVolume)
.putInt(PREF_RIGHT_VOLUME, rightVolume)
.apply();
}
public static void setAutodownloadSelectedNetworks(String[] value) {
prefs.edit()
.putString(PREF_AUTODL_SELECTED_NETWORKS, TextUtils.join(",", value))
.apply();
}
/**
* Sets the update interval value.
*/
public static void setUpdateInterval(long hours) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, String.valueOf(hours))
.apply();
// when updating with an interval, we assume the user wants
// to update *now* and then every 'hours' interval thereafter.
restartUpdateAlarm(true);
}
/**
* Sets the update interval value.
*/
public static void setUpdateTimeOfDay(int hourOfDay, int minute) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, hourOfDay + ":" + minute)
.apply();
restartUpdateAlarm(false);
}
/**
* Change the auto-flattr settings
*
* @param enabled Whether automatic flattring should be enabled at all
* @param autoFlattrThreshold The percentage of playback time after which an episode should be
* flattrd. Must be a value between 0 and 1 (inclusive)
* */
public static void setAutoFlattrSettings( boolean enabled, float autoFlattrThreshold) {
if(autoFlattrThreshold < 0.0 || autoFlattrThreshold > 1.0) {
throw new IllegalArgumentException("Flattr threshold must be in range [0.0, 1.0]");
}
prefs.edit()
.putBoolean(PREF_AUTO_FLATTR, enabled)
.putFloat(PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD, autoFlattrThreshold)
.apply();
}
public static boolean gpodnetNotificationsEnabled() {
return prefs.getBoolean(PREF_GPODNET_NOTIFICATIONS, true);
}
public static void setGpodnetNotificationsEnabled() {
prefs.edit()
.putBoolean(PREF_GPODNET_NOTIFICATIONS, true)
.apply();
}
public static void setHiddenDrawerItems(List<String> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_HIDDEN_DRAWER_ITEMS, str)
.apply();
}
public static void setCompactNotificationButtons(List<Integer> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_COMPACT_NOTIFICATION_BUTTONS, str)
.apply();
}
public static void setQueueLocked(boolean locked) {
prefs.edit()
.putBoolean(PREF_QUEUE_LOCKED, locked)
.apply();
}
private static int readThemeValue(String valueFromPrefs) {
switch (Integer.parseInt(valueFromPrefs)) {
case 0:
return R.style.Theme_AntennaPod_Light;
case 1:
return R.style.Theme_AntennaPod_Dark;
default:
return R.style.Theme_AntennaPod_Light;
}
}
private static long readUpdateInterval(String valueFromPrefs) {
int hours = Integer.parseInt(valueFromPrefs);
return TimeUnit.HOURS.toMillis(hours);
}
private static int readEpisodeCacheSizeInternal(String valueFromPrefs) {
if (valueFromPrefs.equals(context.getString(R.string.pref_episode_cache_unlimited))) {
return EPISODE_CACHE_SIZE_UNLIMITED;
} else {
return Integer.parseInt(valueFromPrefs);
}
}
private static String[] readPlaybackSpeedArray(String valueFromPrefs) {
String[] selectedSpeeds = null;
// If this preference hasn't been set yet, return the default options
if (valueFromPrefs == null) {
selectedSpeeds = new String[] { "1.00", "1.25", "1.50", "1.75", "2.00" };
} else {
try {
JSONArray jsonArray = new JSONArray(valueFromPrefs);
selectedSpeeds = new String[jsonArray.length()];
for (int i = 0; i < jsonArray.length(); i++) {
selectedSpeeds[i] = jsonArray.getString(i);
}
} catch (JSONException e) {
Log.e(TAG, "Got JSON error when trying to get speeds from JSONArray");
e.printStackTrace();
}
}
return selectedSpeeds;
}
public static boolean useSonic() {
return prefs.getBoolean(PREF_SONIC, false);
}
public static void enableSonic(boolean enable) {
prefs.edit()
.putBoolean(PREF_SONIC, enable)
.apply();
}
public static boolean stereoToMono() {
return prefs.getBoolean(PREF_STEREO_TO_MONO, false);
}
public static void stereoToMono(boolean enable) {
prefs.edit()
.putBoolean(PREF_STEREO_TO_MONO, enable)
.apply();
}
public static EpisodeCleanupAlgorithm getEpisodeCleanupAlgorithm() {
int cleanupValue = Integer.parseInt(prefs.getString(PREF_EPISODE_CLEANUP, "-1"));
if (cleanupValue == EPISODE_CLEANUP_QUEUE) {
return new APQueueCleanupAlgorithm();
} else if (cleanupValue == EPISODE_CLEANUP_NULL) {
return new APNullCleanupAlgorithm();
} else {
return new APCleanupAlgorithm(cleanupValue);
}
}
/**
* Return the folder where the app stores all of its data. This method will
* return the standard data folder if none has been set by the user.
*
* @param type The name of the folder inside the data folder. May be null
* when accessing the root of the data folder.
* @return The data folder that has been requested or null if the folder
* could not be created.
*/
public static File getDataFolder(String type) {
String strDir = prefs.getString(PREF_DATA_FOLDER, null);
if (strDir == null) {
Log.d(TAG, "Using default data folder");
return context.getExternalFilesDir(type);
} else {
File dataDir = new File(strDir);
if (!dataDir.exists()) {
if (!dataDir.mkdir()) {
Log.w(TAG, "Could not create data folder");
return null;
}
}
if (type == null) {
return dataDir;
} else {
// handle path separators
String[] dirs = type.split("/");
for (int i = 0; i < dirs.length; i++) {
if (dirs.length > 0) {
if (i < dirs.length - 1) {
dataDir = getDataFolder(dirs[i]);
if (dataDir == null) {
return null;
}
}
type = dirs[i];
}
}
File typeDir = new File(dataDir, type);
if (!typeDir.exists()) {
if (dataDir.canWrite()) {
if (!typeDir.mkdir()) {
Log.e(TAG, "Could not create data folder named " + type);
return null;
}
}
}
return typeDir;
}
}
}
public static void setDataFolder(String dir) {
Log.d(TAG, "setDataFolder(dir: " + dir + ")");
prefs.edit()
.putString(PREF_DATA_FOLDER, dir)
.apply();
createImportDirectory();
}
/**
* Create a .nomedia file to prevent scanning by the media scanner.
*/
private static void createNoMediaFile() {
File f = new File(context.getExternalFilesDir(null), ".nomedia");
if (!f.exists()) {
try {
f.createNewFile();
} catch (IOException e) {
Log.e(TAG, "Could not create .nomedia file");
e.printStackTrace();
}
Log.d(TAG, ".nomedia file created");
}
}
/**
* Creates the import directory if it doesn't exist and if storage is
* available
*/
private static void createImportDirectory() {
File importDir = getDataFolder(IMPORT_DIR);
if (importDir != null) {
if (importDir.exists()) {
Log.d(TAG, "Import directory already exists");
} else {
Log.d(TAG, "Creating import directory");
importDir.mkdir();
}
} else {
Log.d(TAG, "Could not access external storage.");
}
}
public static void restartUpdateAlarm(boolean now) {
int[] timeOfDay = getUpdateTimeOfDay();
Log.d(TAG, "timeOfDay: " + Arrays.toString(timeOfDay));
if (timeOfDay.length == 2) {
restartUpdateTimeOfDayAlarm(timeOfDay[0], timeOfDay[1]);
} else {
long milliseconds = getUpdateInterval();
long startTrigger = milliseconds;
if (now) {
startTrigger = TimeUnit.SECONDS.toMillis(10);
}
restartUpdateIntervalAlarm(startTrigger, milliseconds);
}
}
/**
* Sets the interval in which the feeds are refreshed automatically
*/
private static void restartUpdateIntervalAlarm(long triggerAtMillis, long intervalMillis) {
Log.d(TAG, "Restarting update alarm.");
AlarmManager alarmManager = (AlarmManager) context.getSystemService(Context.ALARM_SERVICE);
Intent intent = new Intent(context, FeedUpdateReceiver.class);
PendingIntent updateIntent = PendingIntent.getBroadcast(context, 0, intent, 0);
alarmManager.cancel(updateIntent);
if (intervalMillis > 0) {
alarmManager.set(AlarmManager.ELAPSED_REALTIME_WAKEUP,
SystemClock.elapsedRealtime() + triggerAtMillis,
updateIntent);
Log.d(TAG, "Changed alarm to new interval " + TimeUnit.MILLISECONDS.toHours(intervalMillis) + " h");
} else {
Log.d(TAG, "Automatic update was deactivated");
}
}
/**
* Sets time of day the feeds are refreshed automatically
*/
private static void restartUpdateTimeOfDayAlarm(int hoursOfDay, int minute) {
Log.d(TAG, "Restarting update alarm.");
AlarmManager alarmManager = (AlarmManager) context.getSystemService(Context.ALARM_SERVICE);
PendingIntent updateIntent = PendingIntent.getBroadcast(context, 0,
new Intent(context, FeedUpdateReceiver.class), 0);
alarmManager.cancel(updateIntent);
Calendar now = Calendar.getInstance();
Calendar alarm = (Calendar)now.clone();
alarm.set(Calendar.HOUR_OF_DAY, hoursOfDay);
alarm.set(Calendar.MINUTE, minute);
if (alarm.before(now) || alarm.equals(now)) {
alarm.add(Calendar.DATE, 1);
}
Log.d(TAG, "Alarm set for: " + alarm.toString() + " : " + alarm.getTimeInMillis());
alarmManager.set(AlarmManager.RTC_WAKEUP,
alarm.getTimeInMillis(),
updateIntent);
Log.d(TAG, "Changed alarm to new time of day " + hoursOfDay + ":" + minute);
}
/**
* Reads episode cache size as it is saved in the episode_cache_size_values array.
*/
public static int readEpisodeCacheSize(String valueFromPrefs) {
return readEpisodeCacheSizeInternal(valueFromPrefs);
}
/**
* Evaluates whether Cast support (Chromecast, Audio Cast, etc) is enabled on the preferences.
*/
public static boolean isCastEnabled() {
return prefs.getBoolean(PREF_CAST_ENABLED, false);
}
}
| 1 | 13,687 | There are spaces missing ;) Just have a look at the code style of the `if` statement above | AntennaPod-AntennaPod | java |
@@ -55,6 +55,9 @@ func (w *DefaultWorker) Generate(
mq := NewMessageQueue(pending)
candidateMsgs := orderMessageCandidates(mq.Drain())
candidateMsgs = w.filterPenalizableMessages(ctx, candidateMsgs)
+ if len(candidateMsgs) > block.BlockMessageLimit {
+ candidateMsgs = candidateMsgs[:block.BlockMessageLimit]
+ }
var blsAccepted []*types.SignedMessage
var secpAccepted []*types.SignedMessage | 1 | package mining
// Block generation is part of the logic of the DefaultWorker.
// 'generate' is that function that actually creates a new block from a base
// TipSet using the DefaultWorker's many utilities.
import (
"context"
"time"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/specs-actors/actors/abi"
"github.com/pkg/errors"
bls "github.com/filecoin-project/filecoin-ffi"
"github.com/filecoin-project/go-filecoin/internal/pkg/block"
"github.com/filecoin-project/go-filecoin/internal/pkg/crypto"
"github.com/filecoin-project/go-filecoin/internal/pkg/drand"
e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid"
"github.com/filecoin-project/go-filecoin/internal/pkg/types"
)
// Generate returns a new block created from the messages in the pool.
// The resulting output is not empty: it has either a block or an error.
func (w *DefaultWorker) Generate(
ctx context.Context,
baseTipSet block.TipSet,
ticket block.Ticket,
electionProof crypto.VRFPi,
nullBlockCount abi.ChainEpoch,
posts []block.PoStProof,
drandEntries []*drand.Entry,
) Output {
generateTimer := time.Now()
defer func() {
log.Infof("[TIMER] DefaultWorker.Generate baseTipset: %s - elapsed time: %s", baseTipSet.String(), time.Since(generateTimer).Round(time.Millisecond))
}()
weight, err := w.getWeight(ctx, baseTipSet)
if err != nil {
return NewOutputErr(errors.Wrap(err, "get weight"))
}
baseHeight, err := baseTipSet.Height()
if err != nil {
return NewOutputErr(errors.Wrap(err, "get base tip set height"))
}
blockHeight := baseHeight + nullBlockCount + 1
// Construct list of message candidates for inclusion.
// These messages will be processed, and those that fail excluded from the block.
pending := w.messageSource.Pending()
mq := NewMessageQueue(pending)
candidateMsgs := orderMessageCandidates(mq.Drain())
candidateMsgs = w.filterPenalizableMessages(ctx, candidateMsgs)
var blsAccepted []*types.SignedMessage
var secpAccepted []*types.SignedMessage
// Align the results with the candidate signed messages to accumulate the messages lists
// to include in the block, and handle failed messages.
for _, msg := range candidateMsgs {
if msg.Message.From.Protocol() == address.BLS {
blsAccepted = append(blsAccepted, msg)
} else {
secpAccepted = append(secpAccepted, msg)
}
}
// Create an aggregage signature for messages
unwrappedBLSMessages, blsAggregateSig, err := aggregateBLS(blsAccepted)
if err != nil {
return NewOutputErr(errors.Wrap(err, "could not aggregate bls messages"))
}
// Persist messages to ipld storage
txMetaCid, err := w.messageStore.StoreMessages(ctx, secpAccepted, unwrappedBLSMessages)
if err != nil {
return NewOutputErr(errors.Wrap(err, "error persisting messages"))
}
// get tipset state root and receipt root
baseStateRoot, err := w.tsMetadata.GetTipSetStateRoot(baseTipSet.Key())
if err != nil {
return NewOutputErr(errors.Wrapf(err, "error retrieving state root for tipset %s", baseTipSet.Key().String()))
}
baseReceiptRoot, err := w.tsMetadata.GetTipSetReceiptsRoot(baseTipSet.Key())
if err != nil {
return NewOutputErr(errors.Wrapf(err, "error retrieving receipt root for tipset %s", baseTipSet.Key().String()))
}
// Set the block timestamp to be exactly the start of the target epoch, regardless of the current time.
// The real time might actually be much later than this if catching up from a pause in chain progress.
epochStartTime := w.clock.StartTimeOfEpoch(blockHeight)
if drandEntries == nil {
drandEntries = []*drand.Entry{}
}
if posts == nil {
posts = []block.PoStProof{}
}
next := &block.Block{
Miner: w.minerAddr,
Height: blockHeight,
BeaconEntries: drandEntries,
ElectionProof: &crypto.ElectionProof{VRFProof: electionProof},
Messages: e.NewCid(txMetaCid),
MessageReceipts: e.NewCid(baseReceiptRoot),
Parents: baseTipSet.Key(),
ParentWeight: weight,
PoStProofs: posts,
StateRoot: e.NewCid(baseStateRoot),
Ticket: ticket,
Timestamp: uint64(epochStartTime.Unix()),
BLSAggregateSig: &blsAggregateSig,
}
view, err := w.api.PowerStateView(baseTipSet.Key())
if err != nil {
return NewOutputErr(errors.Wrapf(err, "failed to read state view"))
}
_, workerAddr, err := view.MinerControlAddresses(ctx, w.minerAddr)
if err != nil {
return NewOutputErr(errors.Wrap(err, "failed to read workerAddr during block generation"))
}
workerSigningAddr, err := view.AccountSignerAddress(ctx, workerAddr)
if err != nil {
return NewOutputErr(errors.Wrap(err, "failed to convert worker address to signing address"))
}
blockSig, err := w.workerSigner.SignBytes(ctx, next.SignatureData(), workerSigningAddr)
if err != nil {
return NewOutputErr(errors.Wrap(err, "failed to sign block"))
}
next.BlockSig = &blockSig
return NewOutput(next, blsAccepted, secpAccepted)
}
func aggregateBLS(blsMessages []*types.SignedMessage) ([]*types.UnsignedMessage, crypto.Signature, error) {
var sigs []bls.Signature
var unwrappedMsgs []*types.UnsignedMessage
for _, msg := range blsMessages {
// unwrap messages
unwrappedMsgs = append(unwrappedMsgs, &msg.Message)
if msg.Signature.Type != crypto.SigTypeBLS {
return []*types.UnsignedMessage{}, crypto.Signature{}, errors.New("non-BLS message signature")
}
// store message signature as bls signature
blsSig := bls.Signature{}
copy(blsSig[:], msg.Signature.Data)
sigs = append(sigs, blsSig)
}
blsAggregateSig := bls.Aggregate(sigs)
if blsAggregateSig == nil {
return []*types.UnsignedMessage{}, crypto.Signature{}, errors.New("could not aggregate signatures")
}
return unwrappedMsgs, crypto.Signature{
Type: crypto.SigTypeBLS,
Data: blsAggregateSig[:],
}, nil
}
// When a block is validated, BLS messages are processed first, so for simplicity all BLS
// messages are considered first here too.
func orderMessageCandidates(messages []*types.SignedMessage) []*types.SignedMessage {
blsMessages := []*types.SignedMessage{}
secpMessages := []*types.SignedMessage{}
for _, m := range messages {
if m.Message.From.Protocol() == address.BLS {
blsMessages = append(blsMessages, m)
} else {
secpMessages = append(secpMessages, m)
}
}
return append(blsMessages, secpMessages...)
}
func (w *DefaultWorker) filterPenalizableMessages(ctx context.Context, messages []*types.SignedMessage) []*types.SignedMessage {
var goodMessages []*types.SignedMessage
for _, msg := range messages {
err := w.penaltyChecker.PenaltyCheck(ctx, &msg.Message)
if err != nil {
mCid, _ := msg.Cid()
log.Debugf("Msg: %s excluded in block because penalized with err %s", mCid, err)
continue
}
goodMessages = append(goodMessages, msg)
}
return goodMessages
}
| 1 | 23,748 | We should pass the size limit as a parameter to Drain so that we don't take messages out of the pool and then drop them. It's ok to merge as is, I will file an issue. | filecoin-project-venus | go |
@@ -1059,7 +1059,16 @@ encode_with_patch_list(dcontext_t *dcontext, patch_list_t *patch, instrlist_t *i
instr_encode_to_copy(dcontext, inst, vmcode_get_writable_addr(pc), pc);
byte *nxt_pc = vmcode_get_executable_addr(nxt_writable_pc);
ASSERT(nxt_pc != NULL);
+#ifdef AARCH64
+ /* Unlike X86 and ARM/AArch32 which use 1 instruction for an indirect jump,
+ * AArch64 requires 2 instructions: LDR+BR, see INSTR_CREATE_ldr()
+ * followed by XINST_CREATE_jump_reg() calls in
+ * emit_special_ibl_xfer().
+ */
+ len = (int)(nxt_pc - pc) + AARCH64_INSTR_SIZE;
+#else
len = (int)(nxt_pc - pc);
+#endif
pc = nxt_pc;
if (cur < patch->num_relocations && inst == patch->entry[cur].where.instr) { | 1 | /* **********************************************************
* Copyright (c) 2010-2021 Google, Inc. All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* file "emit_utils_shared.c" */
/* The Pentium processors maintain cache consistency in hardware, so we don't
* worry about getting stale cache entries.
*/
/* FIXME i#1551: flush code cache after update it on ARM because the hardware
* does not maintain cache consistency in hardware.
*/
#include "../globals.h"
#include "../link.h"
#include "../fragment.h"
#include "../fcache.h"
#include "../emit.h"
#include "arch.h"
#include "instr.h"
#include "instr_create_shared.h"
#include "instrlist.h"
#include "instrument.h" /* for dr_insert_call() */
#include "proc.h"
#include "decode.h"
#include "decode_fast.h"
#include "x86/decode_private.h"
#ifdef DEBUG
# include "disassemble.h"
#endif
#include <limits.h> /* for UCHAR_MAX */
#include "../perscache.h"
#ifdef VMX86_SERVER
# include "vmkuw.h"
#endif
/* fragment_t fields */
/* CAUTION: if TAG_OFFS changes from 0, must change indirect exit stub! */
#define FRAGMENT_START_PC_OFFS (offsetof(fragment_t, start_pc))
#define FRAGMENT_COUNTER_OFFS (offsetof(fragment_t, hot_counter))
#define FRAGMENT_PREFIX_SIZE_OFFS (offsetof(fragment_t, prefix_size))
#ifdef TRACE_HEAD_CACHE_INCR
/* linkstub_t field */
# define LINKSTUB_TARGET_FRAG_OFFS (offsetof(direct_linkstub_t, target_fragment))
#endif
/* make code more readable by shortening long lines
* we mark all as meta to avoid client interface asserts
*/
#define POST instrlist_meta_postinsert
#define PRE instrlist_meta_preinsert
#define APP instrlist_meta_append
/**
** CAUTION!
**
** The following definitions and routines are highly dependent upon
** dcontext and TLS offsets.
**
**/
/* FIXME i#1551: update remaining comments in this file to not be x86-specific */
/***************************************************************************
***************************************************************************
** EXIT STUB
**
** N.B.: all exit stubs must support atomic linking and unlinking,
** meaning a link/unlink operation must involve a single store!
**/
/* The general flow of a direct exit stub is:
*
* spill xax/r0 -> TLS
* move &linkstub -> xax/r0
* jmp fcache_return
*
* The general flow of an indirect exit stub (only used if -indirect_stubs) is:
*
* spill xbx/r1 -> TLS
* move &linkstub -> xbx/r1
* jmp indirect_branch_lookup
*/
/* DIRECT_EXIT_STUB_SIZE is in arch_exports.h */
#define STUB_DIRECT_SIZE(flags) DIRECT_EXIT_STUB_SIZE(flags)
#ifdef X86
/* for -thread_private, we're relying on the fact that
* SIZE32_MOV_XBX_TO_TLS == SIZE32_MOV_XBX_TO_ABS, and that
* x64 always uses tls
*/
# define STUB_INDIRECT_SIZE32 \
(SIZE32_MOV_XBX_TO_TLS + SIZE32_MOV_PTR_IMM_TO_XAX + JMP_LONG_LENGTH)
# define STUB_INDIRECT_SIZE64 \
(SIZE64_MOV_XBX_TO_TLS + SIZE64_MOV_PTR_IMM_TO_XAX + JMP_LONG_LENGTH)
# define STUB_INDIRECT_SIZE(flags) \
(FRAG_IS_32(flags) ? STUB_INDIRECT_SIZE32 : STUB_INDIRECT_SIZE64)
#elif defined(AARCH64)
# define STUB_INDIRECT_SIZE(flags) (7 * AARCH64_INSTR_SIZE)
#else
/* indirect stub is parallel to the direct one minus the data slot */
# define STUB_INDIRECT_SIZE(flags) \
(DIRECT_EXIT_STUB_SIZE(flags) - DIRECT_EXIT_STUB_DATA_SZ)
#endif
/* STUB_COARSE_DIRECT_SIZE is in arch_exports.h */
#define STUB_COARSE_INDIRECT_SIZE(flags) (STUB_INDIRECT_SIZE(flags))
/* Return size in bytes required for an exit stub with specified
* target and FRAG_ flags
*/
int
exit_stub_size(dcontext_t *dcontext, cache_pc target, uint flags)
{
if (TEST(FRAG_COARSE_GRAIN, flags)) {
/* For coarse: bb building points at bb ibl, and then insert_exit_stub
* changes that to the appropriate coarse prefix. So the emit() calls to
* this routine pass in a real ibl. But any later calls, e.g. for
* disassembly, that ask linkstub_size() will call EXIT_TARGET_TAG() which
* calls indirect_linkstub_target() which returns get_coarse_ibl_prefix():
* which then is not recognized as indirect by this routine!
* Note that coarse_indirect_stub_jmp_target() derefs the prefix:
* should we require callers who have stub pc to call that instead of us
* de-referencing?
*/
target = coarse_deref_ibl_prefix(dcontext, target);
}
if (is_indirect_branch_lookup_routine(dcontext, target)) {
/* indirect branch */
/* FIXME: Since we don't have the stub flags we'll lookup the
* target routine's template in a very roundabout fashion here
* by dispatching on the ibl_routine entry point
*/
ibl_code_t *ibl_code;
ibl_type_t ibl_type;
IF_X86_64(gencode_mode_t mode;)
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type_ex(dcontext, target, &ibl_type _IF_X86_64(&mode));
ASSERT(is_ibl);
IF_X86_64(ASSERT(mode == FRAGMENT_GENCODE_MODE(flags) ||
(DYNAMO_OPTION(x86_to_x64) && mode == GENCODE_X86_TO_X64)));
ibl_code = get_ibl_routine_code_ex(dcontext, ibl_type.branch_type,
flags _IF_X86_64(mode));
if (!EXIT_HAS_STUB(ibltype_to_linktype(ibl_code->branch_type),
IBL_FRAG_FLAGS(ibl_code)))
return 0;
if (TEST(FRAG_COARSE_GRAIN, flags)) {
IF_WINDOWS(ASSERT(!is_shared_syscall_routine(dcontext, target)));
/* keep in synch w/ coarse_indirect_stub_size() */
return (STUB_COARSE_INDIRECT_SIZE(flags));
}
#ifdef WINDOWS
if (is_shared_syscall_routine(dcontext, target)) {
return INTERNAL_OPTION(shared_syscalls_fastpath) ? 5
: STUB_INDIRECT_SIZE(flags);
}
#endif
if (ibl_code->ibl_head_is_inlined)
return ibl_code->inline_stub_length;
else
return (STUB_INDIRECT_SIZE(flags));
} else {
/* direct branch */
if (TEST(FRAG_COARSE_GRAIN, flags))
return (STUB_COARSE_DIRECT_SIZE(flags));
else
return (STUB_DIRECT_SIZE(flags));
}
}
static bool
is_patchable_exit_stub_helper(dcontext_t *dcontext, cache_pc ltarget, ushort lflags,
uint fflags)
{
if (LINKSTUB_INDIRECT(lflags)) {
/*indirect */
if (!DYNAMO_OPTION(indirect_stubs))
return false;
if (
#ifdef WINDOWS
!is_shared_syscall_routine(dcontext, ltarget) &&
#endif
get_ibl_routine_code(dcontext, extract_branchtype(lflags), fflags)
->ibl_head_is_inlined) {
return !DYNAMO_OPTION(atomic_inlined_linking);
} else {
return true;
}
} else {
/* direct */
ASSERT(LINKSTUB_DIRECT(lflags));
#ifdef TRACE_HEAD_CACHE_INCR
return true;
#else
return false;
#endif
}
}
bool
is_patchable_exit_stub(dcontext_t *dcontext, linkstub_t *l, fragment_t *f)
{
return is_patchable_exit_stub_helper(dcontext, EXIT_TARGET_TAG(dcontext, f, l),
l->flags, f->flags);
}
bool
is_exit_cti_stub_patchable(dcontext_t *dcontext, instr_t *inst, uint frag_flags)
{
app_pc target;
/* we figure out what the linkstub flags should be
* N.B.: we have to be careful to match the LINKSTUB_ macros
*/
ushort lflags = (ushort)instr_exit_branch_type(inst);
ASSERT_TRUNCATE(lflags, ushort, instr_exit_branch_type(inst));
ASSERT(instr_is_exit_cti(inst));
target = instr_get_branch_target_pc(inst);
if (is_indirect_branch_lookup_routine(dcontext, target)) {
lflags |= LINK_INDIRECT;
} else {
lflags |= LINK_DIRECT;
}
return is_patchable_exit_stub_helper(dcontext, target, lflags, frag_flags);
}
uint
bytes_for_exitstub_alignment(dcontext_t *dcontext, linkstub_t *l, fragment_t *f,
byte *startpc)
{
if (is_patchable_exit_stub(dcontext, l, f)) {
/* assumption - we only hot patch the ending jmp of the exit stub
* (and that exit stub size returns the right values) */
ptr_uint_t shift = ALIGN_SHIFT_SIZE(
startpc +
exit_stub_size(dcontext, EXIT_TARGET_TAG(dcontext, f, l), f->flags) -
EXIT_STUB_PATCH_OFFSET,
EXIT_STUB_PATCH_SIZE, PAD_JMPS_ALIGNMENT);
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(shift)));
return (uint)shift;
}
return 0;
}
/* Returns an upper bound on the number of bytes that will be needed to add
* this fragment to a trace */
uint
extend_trace_pad_bytes(fragment_t *add_frag)
{
/* To estimate we count the number of exit ctis by counting the linkstubs. */
bool inline_ibl_head = TEST(FRAG_IS_TRACE, add_frag->flags)
? DYNAMO_OPTION(inline_trace_ibl)
: DYNAMO_OPTION(inline_bb_ibl);
int num_patchables = 0;
for (linkstub_t *l = FRAGMENT_EXIT_STUBS(add_frag); l != NULL;
l = LINKSTUB_NEXT_EXIT(l)) {
num_patchables++;
if (LINKSTUB_INDIRECT(l->flags) && inline_ibl_head)
num_patchables += 2;
/* We ignore cbr_fallthrough: only one of them should need nops. */
}
return num_patchables * MAX_PAD_SIZE;
}
/* return startpc shifted by the necessary bytes to pad patchable jmps of the
* exit stub to proper alignment */
byte *
pad_for_exitstub_alignment(dcontext_t *dcontext, linkstub_t *l, fragment_t *f,
byte *startpc)
{
uint shift;
ASSERT(PAD_FRAGMENT_JMPS(f->flags)); /* shouldn't call this otherwise */
shift = bytes_for_exitstub_alignment(dcontext, l, f, startpc);
if (shift > 0) {
/* Pad with 1 byte instructions so looks nice in debuggers.
* decode_fragment also checks for this as a sanity check. Note,
* while these instructions can never be reached, they will be decoded
* by shift fcache pointers so must put something valid here. */
SET_TO_DEBUG(startpc, shift);
startpc += shift;
STATS_PAD_JMPS_ADD(f->flags, num_shifted_stubs, 1);
STATS_PAD_JMPS_ADD(f->flags, shifted_stub_bytes, shift);
} else {
STATS_PAD_JMPS_ADD(f->flags, num_stubs_no_shift, 1);
}
return startpc;
}
/* Only used if -no_pad_jmps_shift_{bb,trace}. FIXME this routine is expensive (the
* instr_expand) and we may end up removing app nops (an optimizations but
* not really what we're after here). */
void
remove_nops_from_ilist(dcontext_t *dcontext,
instrlist_t *ilist _IF_DEBUG(bool recreating))
{
instr_t *inst, *next_inst;
for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) {
/* FIXME : expensive, just expand instr before cti, function not used
* if -no_pad_jmps_shift_{bb,trace} */
inst = instr_expand(dcontext, ilist, inst);
next_inst = instr_get_next(inst);
if (instr_is_nop(inst)) {
instrlist_remove(ilist, inst);
DOSTATS({
if (!recreating) {
STATS_INC(num_nops_removed);
STATS_ADD(num_nop_bytes_removed, instr_length(dcontext, inst));
}
});
instr_destroy(dcontext, inst);
}
}
}
cache_pc
get_direct_exit_target(dcontext_t *dcontext, uint flags)
{
if (FRAG_DB_SHARED(flags)) {
if (TEST(FRAG_COARSE_GRAIN, flags)) {
/* note that entrance stubs should target their unit's prefix,
* who will then target this routine
*/
return fcache_return_coarse_routine(IF_X86_64(FRAGMENT_GENCODE_MODE(flags)));
} else
return fcache_return_shared_routine(IF_X86_64(FRAGMENT_GENCODE_MODE(flags)));
} else {
return fcache_return_routine_ex(
dcontext _IF_X86_64(FRAGMENT_GENCODE_MODE(flags)));
}
}
int
insert_exit_stub(dcontext_t *dcontext, fragment_t *f, linkstub_t *l, cache_pc stub_pc)
{
return insert_exit_stub_other_flags(dcontext, f, l, stub_pc, l->flags);
}
/* Returns true if the exit cti is ever dynamically modified */
bool
is_exit_cti_patchable(dcontext_t *dcontext, instr_t *inst, uint frag_flags)
{
app_pc target;
if (TEST(FRAG_COARSE_GRAIN, frag_flags)) {
/* Case 8647: coarse grain fragment bodies always link through stubs
* until frozen, so their ctis are never patched except at freeze time
* when we suspend the world.
*/
ASSERT(!TEST(FRAG_IS_TRACE, frag_flags));
return false;
}
ASSERT(instr_is_exit_cti(inst));
target = instr_get_branch_target_pc(inst);
if (is_indirect_branch_lookup_routine(dcontext, target)) {
/* whether has an inline stub or not, cti is always
* patched if -no_indirect_stubs
*/
if (!DYNAMO_OPTION(indirect_stubs))
return true;
#ifdef WINDOWS
if (target != shared_syscall_routine(dcontext)) {
#endif
return get_ibl_routine_code(
dcontext, extract_branchtype((ushort)instr_exit_branch_type(inst)),
frag_flags)
->ibl_head_is_inlined;
#ifdef WINDOWS
}
return false;
#endif
} else {
/* direct exit */
if (instr_branch_special_exit(inst))
return false;
return true;
}
}
/* returns true if exit cti no longer points at stub
* (certain situations, like profiling or TRACE_HEAD_CACHE_INCR, go
* through the stub even when linked)
*/
bool
link_direct_exit(dcontext_t *dcontext, fragment_t *f, linkstub_t *l, fragment_t *targetf,
bool hot_patch)
{
#ifdef TRACE_HEAD_CACHE_INCR
byte *stub_pc = (byte *)(EXIT_STUB_PC(dcontext, f, l));
#endif
ASSERT(linkstub_owned_by_fragment(dcontext, f, l));
ASSERT(LINKSTUB_DIRECT(l->flags));
STATS_INC(num_direct_links);
#ifdef TRACE_HEAD_CACHE_INCR
if ((targetf->flags & FRAG_IS_TRACE_HEAD) != 0) {
LOG(THREAD, LOG_LINKS, 4,
"\tlinking F%d." PFX " to incr routine b/c F%d is trace head\n", f->id,
EXIT_CTI_PC(f, l), targetf->id);
/* FIXME: more efficient way than multiple calls to get size-5? */
ASSERT(linkstub_size(dcontext, f, l) == DIRECT_EXIT_STUB_SIZE(f->flags));
patch_branch(FRAG_ISA_MODE(f->flags),
stub_pc + DIRECT_EXIT_STUB_SIZE(f->flags) - 5,
trace_head_incr_routine(dcontext), hot_patch);
return false; /* going through stub */
}
#endif
/* change jmp target to point to the passed-in target */
if (exit_cti_reaches_target(dcontext, f, l, (cache_pc)FCACHE_ENTRY_PC(targetf))) {
/* TODO i#1911: Patching the exit_cti to point to the linked fragment is
* theoretically not sound. Architecture specifications do not guarantee
* any bound on when these changes will be visible to other processor
* elements.
*/
patch_branch(FRAG_ISA_MODE(f->flags), EXIT_CTI_PC(f, l), FCACHE_ENTRY_PC(targetf),
hot_patch);
return true; /* do not need stub anymore */
} else {
/* Branch to the stub and use a longer-reaching branch from there.
* XXX i#1611: add support for load-into-PC as an exit cti to eliminate
* this stub-requiring scheme.
*/
patch_stub(f, (cache_pc)EXIT_STUB_PC(dcontext, f, l),
(cache_pc)FCACHE_ENTRY_PC(targetf),
(cache_pc)FCACHE_PREFIX_ENTRY_PC(targetf), hot_patch);
STATS_INC(num_far_direct_links);
/* Exit cti should already be pointing to the top of the exit stub */
return false; /* still need stub */
}
}
void
unlink_direct_exit(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
cache_pc stub_pc = (cache_pc)EXIT_STUB_PC(dcontext, f, l);
#ifdef TRACE_HEAD_CACHE_INCR
direct_linkstub_t *dl = (direct_linkstub_t *)l;
#endif
ASSERT(linkstub_owned_by_fragment(dcontext, f, l));
ASSERT(LINKSTUB_DIRECT(l->flags));
#ifdef TRACE_HEAD_CACHE_INCR
if (dl->target_fragment != NULL) { /* HACK to tell if targeted trace head */
byte *pc = (byte *)(EXIT_STUB_PC(dcontext, f, l));
/* FIXME: more efficient way than multiple calls to get size-5? */
ASSERT(linkstub_size(dcontext, f, l) == DIRECT_EXIT_STUB_SIZE(f->flags));
patch_branch(FRAG_ISA_MODE(f->flags), pc + DIRECT_EXIT_STUB_SIZE(f->flags) - 5,
get_direct_exit_target(dcontext, f->flags), HOT_PATCHABLE);
}
#endif
/* XXX: should we store a flag, or try to have the prior target's cache pc,
* to determine if exit_cti_reaches_target()? For now we blindly unlink
* both near and far styles.
*/
/* change jmp target to point to top of exit stub */
patch_branch(FRAG_ISA_MODE(f->flags), EXIT_CTI_PC(f, l), stub_pc, HOT_PATCHABLE);
unpatch_stub(dcontext, f, stub_pc, HOT_PATCHABLE);
}
/* NOTE : for inlined indirect branches linking is !NOT! atomic with respect
* to a thread executing in the cache unless using the atomic_inlined_linking
* option (unlike unlinking)
*/
void
link_indirect_exit(dcontext_t *dcontext, fragment_t *f, linkstub_t *l, bool hot_patch)
{
app_pc target_tag = EXIT_TARGET_TAG(dcontext, f, l);
/* w/ indirect exits now having their stub pcs computed based
* on the cti targets, we must calculate them at a consistent
* state (we do have multi-stage modifications for inlined stubs)
*/
byte *stub_pc = (byte *)EXIT_STUB_PC(dcontext, f, l);
ASSERT(!TEST(FRAG_COARSE_GRAIN, f->flags));
ASSERT(linkstub_owned_by_fragment(dcontext, f, l));
ASSERT(LINKSTUB_INDIRECT(l->flags));
/* target is always the same, so if it's already linked, this is a nop */
if ((l->flags & LINK_LINKED) != 0) {
STATS_INC(num_indirect_already_linked);
return;
}
STATS_INC(num_indirect_links);
if (IF_WINDOWS_ELSE(!is_shared_syscall_routine(dcontext, target_tag), true)) {
ibl_code_t *ibl_code =
get_ibl_routine_code(dcontext, extract_branchtype(l->flags), f->flags);
if (ibl_code->ibl_head_is_inlined) {
/* need to make branch target the top of the exit stub */
patch_branch(FRAG_ISA_MODE(f->flags), EXIT_CTI_PC(f, l), stub_pc, hot_patch);
if (DYNAMO_OPTION(atomic_inlined_linking)) {
return;
}
}
}
link_indirect_exit_arch(dcontext, f, l, hot_patch, target_tag);
}
int
linkstub_unlink_entry_offset(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
ibl_code_t *ibl_code;
ASSERT(linkstub_owned_by_fragment(dcontext, f, l));
if (!LINKSTUB_INDIRECT(l->flags))
return 0;
#ifdef WINDOWS
if (is_shared_syscall_routine(dcontext, EXIT_TARGET_TAG(dcontext, f, l)))
return 0;
#endif
ibl_code = get_ibl_routine_code(dcontext, extract_branchtype(l->flags), f->flags);
if (ibl_code->ibl_head_is_inlined)
return ibl_code->inline_unlink_offs;
else
return 0;
}
cache_pc
indirect_linkstub_target(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
ASSERT(LINKSTUB_INDIRECT(l->flags));
ASSERT(!TESTANY(LINK_NI_SYSCALL_ALL, l->flags));
#ifdef WINDOWS
if (EXIT_TARGETS_SHARED_SYSCALL(l->flags)) {
/* currently this is the only way to distinguish shared_syscall
* exit from other indirect exits and from other exits in
* a fragment containing ignorable or non-ignorable syscalls
*/
ASSERT(TEST(FRAG_HAS_SYSCALL, f->flags));
return shared_syscall_routine_ex(
dcontext _IF_X86_64(FRAGMENT_GENCODE_MODE(f->flags)));
}
#endif
if (TEST(FRAG_COARSE_GRAIN, f->flags)) {
/* Need to target the ibl prefix. Passing in cti works as well as stub,
* and avoids a circular dependence where linkstub_unlink_entry_offset()
* call this routine to get the target and then this routine asks for
* the stub which calls linkstub_unlink_entry_offset()...
*/
return get_coarse_ibl_prefix(dcontext, EXIT_CTI_PC(f, l),
extract_branchtype(l->flags));
} else {
return get_ibl_routine_ex(dcontext, get_ibl_entry_type(l->flags),
get_source_fragment_type(dcontext, f->flags),
extract_branchtype(l->flags)
_IF_X86_64(FRAGMENT_GENCODE_MODE(f->flags)));
}
}
/* based on machine state, returns which of cbr l1 and fall-through l2
* must have been taken
*/
linkstub_t *
linkstub_cbr_disambiguate(dcontext_t *dcontext, fragment_t *f, linkstub_t *l1,
linkstub_t *l2)
{
instr_t instr;
linkstub_t *taken;
bool inverted = false;
instr_init(dcontext, &instr);
decode(dcontext, EXIT_CTI_PC(f, l1), &instr);
ASSERT(instr_is_cbr(&instr));
/* On ARM, we invert the logic of OP_cb{,n}z when we mangle it */
IF_ARM(inverted = instr_is_cti_short_rewrite(&instr, EXIT_CTI_PC(f, l1)));
if (instr_cbr_taken(&instr, get_mcontext(dcontext), false /*post-state*/))
taken = inverted ? l2 : l1;
else
taken = inverted ? l1 : l2;
instr_free(dcontext, &instr);
return taken;
}
/*******************************************************************************
* COARSE-GRAIN FRAGMENT SUPPORT
*/
/* FIXME: case 10334: pass in info? */
bool
coarse_is_trace_head(cache_pc stub)
{
if (coarse_is_entrance_stub(stub)) {
cache_pc tgt = entrance_stub_jmp_target(stub);
/* FIXME: could see if tgt is a jmp and deref and cmp to
* trace_head_return_coarse_routine() to avoid the vmvector
* lookup required to find the prefix
*/
return tgt == trace_head_return_coarse_prefix(stub, NULL);
}
return false;
}
cache_pc
entrance_stub_jmp_target(cache_pc stub)
{
cache_pc jmp = entrance_stub_jmp(stub);
cache_pc tgt;
ASSERT(jmp != NULL);
tgt = (cache_pc)PC_RELATIVE_TARGET(jmp + 1);
#ifdef X86
ASSERT(*jmp == JMP_OPCODE);
#elif defined(ARM)
/* FIXMED i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
return tgt;
}
app_pc
entrance_stub_target_tag(cache_pc stub, coarse_info_t *info)
{
cache_pc jmp = entrance_stub_jmp(stub);
app_pc tag;
/* find the immed that is put into tls: at end of pre-jmp instr */
#if defined(X86) && defined(X64)
/* To identify whether 32-bit: we could look up the coarse_info_t
* this is part of but that's expensive so we check whether the
* tls offset has 2 high byte 0's (we always use addr16 for 32-bit).
* 32-bit:
* 67 64 c7 06 e0 0e 02 99 4e 7d addr16 mov $0x7d4e9902 -> %fs:0x0ee0
* 64-bit is split into high and low dwords:
* 65 c7 04 25 20 16 00 00 02 99 4e 7d mov $0x7d4e9902 -> %gs:0x1620
* 65 c7 04 25 24 16 00 00 00 00 00 00 mov $0x00000000 -> %gs:0x1624
* both are followed by a direct jmp.
*/
if (*((ushort *)(jmp - 6)) == 0) { /* 64-bit has 2 0's for high 2 bytes of tls offs */
ptr_uint_t high32 = (ptr_uint_t) * ((uint *)(jmp - 4));
ptr_uint_t low32 =
(ptr_uint_t) * ((uint *)(jmp - (SIZE64_MOV_PTR_IMM_TO_TLS / 2) - 4));
tag = (cache_pc)((high32 << 32) | low32);
} else { /* else fall-through to 32-bit case */
#endif
tag = *((cache_pc *)(jmp - 4));
#if defined(X86) && defined(X64)
}
#endif
/* if frozen, this could be a persist-time app pc (i#670).
* we take in info so we can know mod_shift (we can decode to find it
* for unlinked but not for linked)
*/
if (info == NULL)
info = get_stub_coarse_info(stub);
if (info->mod_shift != 0 && tag >= info->base_pc + info->mod_shift &&
tag < info->end_pc + info->mod_shift)
tag -= info->mod_shift;
return tag;
}
bool
coarse_is_indirect_stub(cache_pc pc)
{
/* match insert_jmp_to_ibl */
return instr_raw_is_tls_spill(pc, SCRATCH_REG1 /*xbx/r1*/, INDIRECT_STUB_SPILL_SLOT);
}
/* caller should call fragment_coarse_entry_pclookup() ahead of time
* to avoid deadlock if caller holds info->lock
*/
bool
coarse_cti_is_intra_fragment(dcontext_t *dcontext, coarse_info_t *info, instr_t *inst,
cache_pc start_pc)
{
/* We don't know the size of the fragment but we want to support
* intra-fragment ctis for clients (i#665) so we use some
* heuristics. A real cti is either linked to a target within the
* same coarse unit (where its target will be an entry point) or
* points at a stub of some kind (frozen exit prefix or separate
* entrance stub or inlined indirect stub).
*/
cache_pc tgt = opnd_get_pc(instr_get_target(inst));
if (tgt < start_pc || tgt >= start_pc + MAX_FRAGMENT_SIZE)
return false;
/* If tgt is an entry, then it's a linked exit cti.
* XXX: This may acquire info->lock if it's never been called before.
*/
if (fragment_coarse_entry_pclookup(dcontext, info, tgt) != NULL) {
/* i#1032: To handle an intra cti that targets the final instr in the bb which
* was a jmp and elided, we rely on the assumption that a coarse bb exit
* cti is either 1 indirect or 2 direct with no code past it.
* Thus, the instr after an exit cti must either be an entry point for
* an adjacent fragment, or the 2nd cti for a direct.
*/
cache_pc post_inst_pc = instr_get_raw_bits(inst) + instr_length(dcontext, inst);
instr_t post_inst_instr;
bool intra = true;
instr_init(dcontext, &post_inst_instr);
if (post_inst_pc >= info->cache_end_pc ||
fragment_coarse_entry_pclookup(dcontext, info, post_inst_pc) != NULL ||
(decode_cti(dcontext, post_inst_pc, &post_inst_instr) != NULL &&
instr_is_cti(&post_inst_instr))) {
intra = false;
}
instr_free(dcontext, &post_inst_instr);
if (!intra)
return false;
}
/* These lookups can get expensive but should only hit them when have
* clients adding intra-fragment ctis.
* XXX: is there a min distance we could use to rule out being in stubs?
* For frozen though prefixes are right after cache.
*/
if (coarse_is_indirect_stub(tgt) || in_coarse_stubs(tgt) ||
in_coarse_stub_prefixes(tgt))
return false;
return true;
}
cache_pc
coarse_indirect_stub_jmp_target(cache_pc stub)
{
#ifdef X86
cache_pc prefix_tgt, tgt;
cache_pc jmp;
size_t stub_size;
# ifdef X64
/* See the stub sequences in entrance_stub_target_tag(): 32-bit always has
* an addr prefix while 64-bit does not
*/
/* FIXME: PR 209709: test perf and remove if outweighs space */
if (*stub == ADDR_PREFIX_OPCODE)
stub_size = STUB_COARSE_INDIRECT_SIZE(FRAG_32_BIT);
else /* default */
# endif
stub_size = STUB_COARSE_INDIRECT_SIZE(0);
jmp = stub + stub_size - JMP_LONG_LENGTH;
ASSERT(*jmp == JMP_OPCODE);
prefix_tgt = (cache_pc)PC_RELATIVE_TARGET(jmp + 1);
ASSERT(*prefix_tgt == JMP_OPCODE);
tgt = (cache_pc)PC_RELATIVE_TARGET(prefix_tgt + 1);
return tgt;
#elif defined(AARCHXX)
/* FIXME i#1551, i#1569: NYI on ARM/AArch64 */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
#endif /* X86/ARM */
}
uint
coarse_indirect_stub_size(coarse_info_t *info)
{
/* Keep in synch w/ exit_stub_size(). We export this separately since
* it's difficult to get the target to pass to exit_stub_size().
*/
return STUB_COARSE_INDIRECT_SIZE(COARSE_32_FLAG(info));
}
/* Passing in stub's info avoids a vmvector lookup */
bool
entrance_stub_linked(cache_pc stub, coarse_info_t *info /*OPTIONAL*/)
{
/* entrance stubs are of two types:
* - targeting trace heads: always point to trace_head_return_coarse,
* whether target exists or not, so are always unlinked;
* - targeting non-trace-heads: if linked, point to fragment; if unlinked,
* point to fcache_return_coarse
*/
cache_pc tgt = entrance_stub_jmp_target(stub);
/* FIXME: do vmvector just once instead of for each call */
return (tgt != trace_head_return_coarse_prefix(stub, info) &&
tgt != fcache_return_coarse_prefix(stub, info));
}
/* Returns whether it had to change page protections */
static bool
patch_coarse_branch(dcontext_t *dcontext, cache_pc stub, cache_pc tgt, bool hot_patch,
coarse_info_t *info /*OPTIONAL*/)
{
bool stubs_readonly = false;
bool stubs_restore = false;
if (DYNAMO_OPTION(persist_protect_stubs)) {
if (info == NULL)
info = get_stub_coarse_info(stub);
ASSERT(info != NULL);
if (info->stubs_readonly) {
stubs_readonly = true;
stubs_restore = true;
/* if we don't preserve mapped-in COW state the protection change
* will fail (case 10570)
*/
make_copy_on_writable((byte *)PAGE_START(entrance_stub_jmp(stub)),
/* stub jmp can't cross page boundary (can't
* cross cache line in fact) */
PAGE_SIZE);
if (DYNAMO_OPTION(persist_protect_stubs_limit) > 0) {
info->stubs_write_count++;
if (info->stubs_write_count >
DYNAMO_OPTION(persist_protect_stubs_limit)) {
SYSLOG_INTERNAL_WARNING_ONCE("pcache stubs over write limit");
STATS_INC(pcache_unprot_over_limit);
stubs_restore = false;
info->stubs_readonly = false;
}
}
}
}
/* FIXME i#1551: for proper ARM support we'll need the ISA mode of the coarse unit */
patch_branch(dr_get_isa_mode(dcontext), entrance_stub_jmp(stub), tgt, HOT_PATCHABLE);
if (stubs_restore)
make_unwritable((byte *)PAGE_START(entrance_stub_jmp(stub)), PAGE_SIZE);
return stubs_readonly;
}
/* Passing in stub's info avoids a vmvector lookup */
void
link_entrance_stub(dcontext_t *dcontext, cache_pc stub, cache_pc tgt, bool hot_patch,
coarse_info_t *info /*OPTIONAL*/)
{
ASSERT(DYNAMO_OPTION(coarse_units));
ASSERT(self_owns_recursive_lock(&change_linking_lock));
LOG(THREAD, LOG_LINKS, 5, "link_entrance_stub " PFX "\n", stub);
if (patch_coarse_branch(dcontext, stub, tgt, hot_patch, info))
STATS_INC(pcache_unprot_link);
/* We check this afterward since this link may be what makes it consistent
* FIXME: pass in arg to not check target? Then call before and after */
ASSERT(coarse_is_entrance_stub(stub));
}
/* Passing in stub's info avoids a vmvector lookup */
void
unlink_entrance_stub(dcontext_t *dcontext, cache_pc stub, uint flags,
coarse_info_t *info /*OPTIONAL*/)
{
cache_pc tgt;
ASSERT(DYNAMO_OPTION(coarse_units));
ASSERT(coarse_is_entrance_stub(stub));
ASSERT(self_owns_recursive_lock(&change_linking_lock));
LOG(THREAD, LOG_LINKS, 5, "unlink_entrance_stub " PFX "\n", stub);
if (TESTANY(FRAG_IS_TRACE_HEAD | FRAG_IS_TRACE, flags))
tgt = trace_head_return_coarse_prefix(stub, info);
else
tgt = fcache_return_coarse_prefix(stub, info);
if (patch_coarse_branch(dcontext, stub, tgt, HOT_PATCHABLE, info))
STATS_INC(pcache_unprot_unlink);
}
cache_pc
entrance_stub_from_cti(cache_pc cti)
{
cache_pc disp = exit_cti_disp_pc(cti);
cache_pc tgt = (cache_pc)PC_RELATIVE_TARGET(disp);
return tgt;
}
/*******************************************************************************/
/* Patch list support routines */
void
init_patch_list(patch_list_t *patch, patch_list_type_t type)
{
patch->num_relocations = 0;
/* Cast to int to avoid a tautological comparison warning from clang. */
ASSERT_TRUNCATE(patch->type, ushort, (int)type);
patch->type = (ushort)type;
}
/* add an instruction to patch list and address of location for future updates */
/* Use the type checked wrappers add_patch_entry or add_patch_marker */
void
add_patch_entry_internal(patch_list_t *patch, instr_t *instr, ushort patch_flags,
short instruction_offset, ptr_uint_t value_location_offset)
{
uint i = patch->num_relocations;
ASSERT(patch->num_relocations < MAX_PATCH_ENTRIES);
/* Since in debug build we have the extra slots for stats, it's important
* to provide a useful release build message
*/
if (patch->num_relocations >= MAX_PATCH_ENTRIES) {
SYSLOG_CUSTOM_NOTIFY(SYSLOG_CRITICAL, MSG_EXCEPTION, 4,
"Maximum patch entries exceeded", get_application_name(),
get_application_pid(), "<maxpatch>",
"Maximum patch entries exceeded");
os_terminate(get_thread_private_dcontext(), TERMINATE_PROCESS);
ASSERT_NOT_REACHED();
}
LOG(THREAD_GET, LOG_EMIT, 4, "add_patch_entry[%d] value_location_offset=" PFX "\n", i,
value_location_offset);
patch->entry[i].where.instr = instr;
patch->entry[i].patch_flags = patch_flags;
patch->entry[i].value_location_offset = value_location_offset;
patch->entry[i].instr_offset = instruction_offset;
patch->num_relocations++;
}
/* add an instruction to patch list to retrieve its offset later.
Takes an instruction and an offset within the instruction.
Result: The offset within an encoded instruction stream will
be stored in target_offset by encode_with_patch_list
*/
void
add_patch_marker(patch_list_t *patch, instr_t *instr, ushort patch_flags,
short instr_offset, ptr_uint_t *target_offset /* OUT */)
{
add_patch_entry_internal(patch, instr, (ushort)(patch_flags | PATCH_MARKER),
instr_offset, (ptr_uint_t)target_offset);
}
/* remove PATCH_MARKER entries since not needed for dynamic updates */
static INLINE_ONCE void
remove_assembled_patch_markers(dcontext_t *dcontext, patch_list_t *patch)
{
ushort i = 0, j = 0;
/* we can remove the PATCH_MARKER entries after encoding,
and so patch_emitted_code won't even need to check for PATCH_MARKER
*/
while (j < patch->num_relocations) {
if (TEST(PATCH_MARKER, patch->entry[j].patch_flags)) {
LOG(THREAD, LOG_EMIT, 4,
"remove_assembled_patch_markers: removing marker %d\n", j);
} else {
patch->entry[i] = patch->entry[j];
i++;
}
j++;
}
LOG(THREAD, LOG_EMIT, 3,
"remove_assembled_patch_markers: relocations %d, left only %d\n",
patch->num_relocations, i);
patch->num_relocations = i;
}
/* Indirect all instructions instead of later patching */
static void
relocate_patch_list(dcontext_t *dcontext, patch_list_t *patch, instrlist_t *ilist)
{
instr_t *inst;
uint cur = 0;
LOG(THREAD, LOG_EMIT, 3, "relocate_patch_list [" PFX "]\n", patch);
/* go through the instructions and "relocate" by indirectly using XDI */
for (inst = instrlist_first(ilist); inst; inst = instr_get_next(inst)) {
if (cur < patch->num_relocations && inst == patch->entry[cur].where.instr) {
ASSERT(!TEST(PATCH_OFFSET_VALID, patch->entry[cur].patch_flags));
if (!TEST(PATCH_MARKER, patch->entry[cur].patch_flags)) {
opnd_t opnd;
ASSERT(instr_num_srcs(inst) > 0);
opnd = instr_get_src(inst, 0);
DOLOG(4, LOG_EMIT, {
LOG(THREAD, LOG_EMIT, 2,
"encode_with_patch_list: patch_entry_t[%d] before update \n");
instr_disassemble(dcontext, inst, THREAD);
LOG(THREAD, LOG_EMIT, 2, "\n");
});
/* we assume that per_thread_t will be in XDI,
and the displacement is in value_location_offset */
IF_X64(ASSERT(
CHECK_TRUNCATE_TYPE_int(patch->entry[cur].value_location_offset)));
if (opnd_is_near_base_disp(opnd)) {
/* indirect through XDI and update displacement */
opnd_set_disp(&opnd, (int)patch->entry[cur].value_location_offset);
opnd_replace_reg(&opnd, REG_NULL, SCRATCH_REG5 /*xdi/r5*/);
} else if (opnd_is_immed_int(opnd)) {
/* indirect through XDI and set displacement */
/* converting AND $0x00003fff, %xcx -> %xcx
into AND mask(%xdi), %xcx -> %xcx
*/
opnd = opnd_create_base_disp(
SCRATCH_REG5 /*xdi/r5*/, REG_NULL, 0,
(int)patch->entry[cur].value_location_offset, OPSZ_4);
}
instr_set_src(inst, 0, opnd);
DOLOG(3, LOG_EMIT, {
LOG(THREAD, LOG_EMIT, 2,
"encode_with_patch_list: patch_entry_t[%d] after update \n");
instr_disassemble(dcontext, inst, THREAD);
LOG(THREAD, LOG_EMIT, 2, "\n");
});
}
cur++;
}
}
}
/* Updates patch list with offsets in assembled instruction list */
/* Cf: instrlist_encode which does not support a patch list */
/* Returns length of emitted code */
int
encode_with_patch_list(dcontext_t *dcontext, patch_list_t *patch, instrlist_t *ilist,
cache_pc start_pc)
{
instr_t *inst;
uint len;
uint cur;
cache_pc pc = start_pc;
ASSERT(patch->num_relocations < MAX_PATCH_ENTRIES);
if (patch->type == PATCH_TYPE_INDIRECT_XDI) {
relocate_patch_list(dcontext, patch, ilist);
}
/* now encode the instructions */
/* must set note fields first with offset */
len = 0;
for (inst = instrlist_first(ilist); inst; inst = instr_get_next(inst)) {
instr_set_note(inst, (void *)(ptr_uint_t)len);
len += instr_length(dcontext, inst);
}
cur = 0;
/* after instruction list is assembled we collect the offsets */
for (inst = instrlist_first(ilist); inst; inst = instr_get_next(inst)) {
short offset_in_instr = patch->entry[cur].instr_offset;
byte *nxt_writable_pc =
instr_encode_to_copy(dcontext, inst, vmcode_get_writable_addr(pc), pc);
byte *nxt_pc = vmcode_get_executable_addr(nxt_writable_pc);
ASSERT(nxt_pc != NULL);
len = (int)(nxt_pc - pc);
pc = nxt_pc;
if (cur < patch->num_relocations && inst == patch->entry[cur].where.instr) {
ASSERT(!TEST(PATCH_OFFSET_VALID, patch->entry[cur].patch_flags));
/* support positive offsets from beginning and negative -
* from end of instruction
*/
if (offset_in_instr < 0) {
/* grab offset offset_in_instr bytes from the end of instruction */
/* most commonly -4 for a 32bit immediate */
patch->entry[cur].where.offset = ((pc + offset_in_instr) - start_pc);
} else {
/* grab offset after skipping offset_in_instr from beginning of
* instruction
*/
patch->entry[cur].where.offset =
((pc - len + offset_in_instr) - start_pc);
}
patch->entry[cur].patch_flags |= PATCH_OFFSET_VALID;
LOG(THREAD, LOG_EMIT, 4,
"encode_with_patch_list: patch_entry_t[%d] offset=" PFX "\n", cur,
patch->entry[cur].where.offset);
if (TEST(PATCH_MARKER, patch->entry[cur].patch_flags)) {
/* treat value_location_offset as an output argument
and store there the computed offset,
*/
ptr_uint_t *output_value =
(ptr_uint_t *)patch->entry[cur].value_location_offset;
ptr_uint_t output_offset = patch->entry[cur].where.offset;
if (TEST(PATCH_ASSEMBLE_ABSOLUTE, patch->entry[cur].patch_flags)) {
ASSERT(!TEST(PATCH_UINT_SIZED, patch->entry[cur].patch_flags));
output_offset += (ptr_uint_t)vmcode_get_executable_addr(start_pc);
}
if (TEST(PATCH_UINT_SIZED, patch->entry[cur].patch_flags)) {
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(output_offset)));
*((uint *)output_value) = (uint)output_offset;
} else
*output_value = output_offset;
}
LOG(THREAD, LOG_EMIT, 4,
"encode_with_patch_list [%d] extras patch_flags=0x%x value_offset=" PFX
"\n",
cur, patch->entry[cur].patch_flags,
patch->entry[cur].value_location_offset);
cur++;
}
}
/* assuming patchlist is in the same order as ilist, we should have seen all */
LOG(THREAD, LOG_EMIT, 4, "cur %d, num %d", cur, patch->num_relocations);
ASSERT(cur == patch->num_relocations);
remove_assembled_patch_markers(dcontext, patch);
ASSERT(CHECK_TRUNCATE_TYPE_int(pc - start_pc));
return (int)(pc - start_pc);
}
#ifdef DEBUG
void
print_patch_list(patch_list_t *patch)
{
uint i;
LOG(THREAD_GET, LOG_EMIT, 4, "patch=" PFX " num_relocations=%d\n", patch,
patch->num_relocations);
for (i = 0; i < patch->num_relocations; i++) {
ASSERT(TEST(PATCH_OFFSET_VALID, patch->entry[i].patch_flags));
LOG(THREAD_GET, LOG_EMIT, 4,
"patch_list [%d] offset=" PFX " patch_flags=%d value_offset=" PFX "\n", i,
patch->entry[i].where.offset, patch->entry[i].patch_flags,
patch->entry[i].value_location_offset);
}
}
# ifdef INTERNAL
/* disassembles code adding patch list labels */
static void
disassemble_with_annotations(dcontext_t *dcontext, patch_list_t *patch, byte *start_pc,
byte *end_pc)
{
byte *pc = start_pc;
uint cur = 0;
do {
if (cur < patch->num_relocations &&
pc >= start_pc + patch->entry[cur].where.offset) {
ASSERT(TEST(PATCH_OFFSET_VALID, patch->entry[cur].patch_flags));
/* this is slightly off - we'll mark next instruction,
but is good enough for this purpose */
LOG(THREAD, LOG_EMIT, 2, "%d:", cur);
cur++;
} else {
LOG(THREAD, LOG_EMIT, 2, " ");
}
pc = disassemble_with_bytes(dcontext, pc, THREAD);
} while (pc < end_pc);
LOG(THREAD, LOG_EMIT, 2, "\n");
}
# endif
#endif
/* updates emitted code according to patch list */
static void
patch_emitted_code(dcontext_t *dcontext, patch_list_t *patch, byte *start_pc)
{
uint i;
/* FIXME: can get this as a patch list entry through indirection */
per_thread_t *pt = (per_thread_t *)dcontext->fragment_field;
ASSERT(dcontext != GLOBAL_DCONTEXT && dcontext != NULL);
LOG(THREAD, LOG_EMIT, 2, "patch_emitted_code start_pc=" PFX " pt=" PFX "\n",
start_pc);
if (patch->type != PATCH_TYPE_ABSOLUTE) {
LOG(THREAD, LOG_EMIT, 2,
"patch_emitted_code type=%d indirected, nothing to patch\n", patch->type);
/* FIXME: propagate the check earlier to save the extraneous calls
to update_indirect_exit_stub and update_indirect_branch_lookup
*/
return;
}
DOLOG(4, LOG_EMIT, { print_patch_list(patch); });
for (i = 0; i < patch->num_relocations; i++) {
byte *pc = start_pc + patch->entry[i].where.offset;
/* value address, (think for example of pt->trace.hash_mask) */
ptr_uint_t value;
char *vaddr = NULL;
if (TEST(PATCH_PER_THREAD, patch->entry[i].patch_flags)) {
vaddr = (char *)pt + patch->entry[i].value_location_offset;
} else if (TEST(PATCH_UNPROT_STAT, patch->entry[i].patch_flags)) {
/* separate the two parts of the stat */
uint unprot_offs = (uint)(patch->entry[i].value_location_offset) >> 16;
uint field_offs = (uint)(patch->entry[i].value_location_offset) & 0xffff;
IF_X64(
ASSERT(CHECK_TRUNCATE_TYPE_uint(patch->entry[i].value_location_offset)));
vaddr = (*((char **)((char *)pt + unprot_offs))) + field_offs;
LOG(THREAD, LOG_EMIT, 4,
"patch_emitted_code [%d] value " PFX " => 0x%x 0x%x => " PFX "\n", i,
patch->entry[i].value_location_offset, unprot_offs, field_offs, vaddr);
} else
ASSERT_NOT_REACHED();
ASSERT(TEST(PATCH_OFFSET_VALID, patch->entry[i].patch_flags));
ASSERT(!TEST(PATCH_MARKER, patch->entry[i].patch_flags));
if (!TEST(PATCH_TAKE_ADDRESS, patch->entry[i].patch_flags)) {
/* use value pointed by computed address */
if (TEST(PATCH_UINT_SIZED, patch->entry[i].patch_flags))
value = (ptr_uint_t) * ((uint *)vaddr);
else
value = *(ptr_uint_t *)vaddr;
} else {
ASSERT(!TEST(PATCH_UINT_SIZED, patch->entry[i].patch_flags));
value = (ptr_uint_t)vaddr; /* use computed address */
}
LOG(THREAD, LOG_EMIT, 4,
"patch_emitted_code [%d] offset=" PFX " patch_flags=%d value_offset=" PFX
" vaddr=" PFX " value=" PFX "\n",
i, patch->entry[i].where.offset, patch->entry[i].patch_flags,
patch->entry[i].value_location_offset, vaddr, value);
if (TEST(PATCH_UINT_SIZED, patch->entry[i].patch_flags)) {
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(value)));
*((uint *)pc) = (uint)value;
} else
*((ptr_uint_t *)pc) = value;
LOG(THREAD, LOG_EMIT, 4, "patch_emitted_code: updated pc *" PFX " = " PFX "\n",
pc, value);
}
STATS_INC(emit_patched_fragments);
DOSTATS({
/* PR 217008: avoid gcc warning from truncation assert in XSTATS_ADD_DC */
int tmp_num = patch->num_relocations;
STATS_ADD(emit_patched_relocations, tmp_num);
});
LOG(THREAD, LOG_EMIT, 4, "patch_emitted_code done\n");
}
/* Updates an indirect branch exit stub with the latest hashtable mask
* and hashtable address
* See also update_indirect_branch_lookup
*/
void
update_indirect_exit_stub(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
generated_code_t *code =
get_emitted_routines_code(dcontext _IF_X86_64(FRAGMENT_GENCODE_MODE(f->flags)));
byte *start_pc = (byte *)EXIT_STUB_PC(dcontext, f, l);
ibl_branch_type_t branch_type;
ASSERT(linkstub_owned_by_fragment(dcontext, f, l));
ASSERT(LINKSTUB_INDIRECT(l->flags));
ASSERT(EXIT_HAS_STUB(l->flags, f->flags));
/* Shared use indirection so no patching needed -- caller should check */
ASSERT(!TEST(FRAG_SHARED, f->flags));
#ifdef WINDOWS
/* Do not touch shared_syscall */
if (EXIT_TARGET_TAG(dcontext, f, l) ==
shared_syscall_routine_ex(dcontext _IF_X86_64(FRAGMENT_GENCODE_MODE(f->flags))))
return;
#endif
branch_type = extract_branchtype(l->flags);
LOG(THREAD, LOG_EMIT, 4, "update_indirect_exit_stub: f->tag=" PFX "\n", f->tag);
if (DYNAMO_OPTION(disable_traces) && !code->bb_ibl[branch_type].ibl_head_is_inlined) {
return;
}
if (TEST(FRAG_IS_TRACE, f->flags)) {
ASSERT(code->trace_ibl[branch_type].ibl_head_is_inlined);
patch_emitted_code(dcontext, &code->trace_ibl[branch_type].ibl_stub_patch,
start_pc);
} else {
ASSERT(code->bb_ibl[branch_type].ibl_head_is_inlined);
patch_emitted_code(dcontext, &code->bb_ibl[branch_type].ibl_stub_patch, start_pc);
}
}
/*###########################################################################
*
* fragment_t Prefixes
*
* Two types: indirect branch target, which restores eflags and xcx, and
* normal prefix, which just restores xcx
*/
int
fragment_prefix_size(uint flags)
{
#ifdef AARCH64
/* For AArch64, there is no need to save the flags
* so we always have the same ibt prefix. */
return fragment_ibt_prefix_size(flags);
#else
if (use_ibt_prefix(flags)) {
return fragment_ibt_prefix_size(flags);
} else {
if (dynamo_options.bb_prefixes)
return FRAGMENT_BASE_PREFIX_SIZE(flags);
else
return 0;
}
#endif
}
#ifdef PROFILE_RDTSC
/***************************************************************************
***************************************************************************
** PROFILING USING RDTSC
**
**/
/*
We want the profile code to not count towards fragment times.
So we stop time as quickly as possible, in assembly here instead of
in the profile_fragment_enter function, and start time again as late
as possible:
mov %eax, eax_offset(dcontext) # save eax
mov %edx, edx_offset(dcontext) # save edx
rdtsc # stop time
switch to dynamo stack
pushfl # save eflags (call will clobber)
mov %ecx, ecx_offset(dcontext) # save ecx
pushl %edx # pass time as arg
pushl %eax
pushil &fragment_address # pass &frag as arg
call profile_fragment_enter #
addl $0xc, %esp # clean up args
popl %ecx # restore ecx
popfl # restore eflags
restore app stack
rdtsc # start time
movl %eax, start_time_OFFS(dcontext) # store time value
movl %edx, 4+start_time_OFFS(dcontext) # store time value
mov eax_offset(dcontext), %eax # restore eax
mov edx_offset(dcontext), %edx # restore edx
mov ecx_offset(dcontext), %ecx # restore ecx
*/
static uint profile_call_length = 0;
static int profile_call_fragment_offset = 0;
static int profile_call_call_offset = 0;
static byte profile_call_buf[128];
static dcontext_t *buffer_dcontext;
static void
build_profile_call_buffer(void);
uint
profile_call_size()
{
/* XXX i#1566: For -satisfy_w_xor_x we'd need to change the
* instr_encode calls and possibly more. Punting for now.
*/
ASSERT_NOT_IMPLEMENTED(!DYNAMO_OPTION(satisfy_w_xor_x),
"PROFILE_RDTSC is not supported with -satisfy_w_xor_x");
if (profile_call_length == 0)
build_profile_call_buffer();
return profile_call_length;
}
/* if insert_profile_call emits its code into the trace buffer, this
* routine must be called once the fragment is created and the code is
* in the fcache
*/
void
finalize_profile_call(dcontext_t *dcontext, fragment_t *f)
{
byte *start_pc = (byte *)FCACHE_ENTRY_PC(f);
byte *pc;
byte *prev_pc;
instr_t instr;
instr_init(dcontext, &instr);
/* fill in address of owning fragment now that that fragment exists */
pc = start_pc + profile_call_fragment_offset;
/* PR 248210: unsupported feature on x64 */
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
*((int *)pc) = (uint)f;
/* fill in call's proper pc-relative offset now that code is
* in its final location in fcache
*/
pc = start_pc + profile_call_call_offset;
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
*((int *)pc) = (int)&profile_fragment_enter - (int)pc - 4;
/* must fix up all dcontext references to point to the right dcontext */
pc = start_pc;
do {
prev_pc = pc;
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
ASSERT(instr_valid(&instr)); /* our own code! */
/* look for loads and stores that reference buffer_dcontext */
if (instr_get_opcode(&instr) == OP_mov_ld &&
opnd_is_near_base_disp(instr_get_src(&instr, 0)) &&
opnd_get_base(instr_get_src(&instr, 0)) == REG_NULL &&
opnd_get_index(instr_get_src(&instr, 0)) == REG_NULL) {
/* if not really dcontext value, update_ will return old value */
instr_set_src(&instr, 0,
update_dcontext_address(instr_get_src(&instr, 0),
buffer_dcontext, dcontext));
} else if (instr_get_opcode(&instr) == OP_mov_st &&
opnd_is_near_base_disp(instr_get_dst(&instr, 0)) &&
opnd_get_base(instr_get_dst(&instr, 0)) == REG_NULL &&
opnd_get_index(instr_get_dst(&instr, 0)) == REG_NULL) {
/* if not really dcontext value, update_ will return old value */
instr_set_dst(&instr, 0,
update_dcontext_address(instr_get_dst(&instr, 0),
buffer_dcontext, dcontext));
}
if (!instr_raw_bits_valid(&instr)) {
DEBUG_DECLARE(byte * nxt_pc;)
DEBUG_DECLARE(nxt_pc =) instr_encode(dcontext, &instr, prev_pc);
ASSERT(nxt_pc != NULL);
}
} while (pc < start_pc + profile_call_length);
instr_free(dcontext, &instr);
}
void
insert_profile_call(cache_pc start_pc)
{
if (profile_call_length == 0)
build_profile_call_buffer();
memcpy((void *)start_pc, profile_call_buf, profile_call_length);
/* if thread-private, we change to proper dcontext when finalizing */
}
/* This routine builds the profile call code using the instr_t
* abstraction, then emits it into a buffer to be saved.
* The code can then be directly copied whenever needed.
* Assumption: this thread's dcontext must have been created
* before calling this function.
*/
static void
build_profile_call_buffer()
{
byte *pc, *nxt_pc;
instrlist_t ilist;
instr_t *inst;
int start_time_offs;
dcontext_t *dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
/* remember dcontext for easy replacement when finalizing: */
buffer_dcontext = dcontext;
/* we require a dcontext to find this offset because it may
* or may not be pushed to a quadword boundary, making it
* hard to hardcode it
*/
start_time_offs = (int)(&(dcontext->start_time)) - (int)dcontext;
/* initialize the ilist */
instrlist_init(&ilist);
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_EAX, SCRATCH_REG0_OFFS));
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_EDX, SCRATCH_REG3_OFFS));
/* get time = rdtsc */
APP(&ilist, INSTR_CREATE_rdtsc(dcontext));
/* swap to dstack */
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_ESP, XSP_OFFSET));
APP(&ilist, instr_create_restore_dynamo_stack(dcontext));
/* finish saving caller-saved registers
* The profile_fragment_enter function will save the callee-saved
* regs (ebx, ebp, esi, edi) and will restore ebp and esp, but we need
* to explicitly save eax, ecx, and edx
*/
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_ECX, SCRATCH_REG2_OFFS));
/* save eflags (call will clobber) */
APP(&ilist, INSTR_CREATE_RAW_pushf(dcontext));
# ifdef WINDOWS
/* must preserve the LastErrorCode (if the profile procedure
* calls a Win32 API routine it could overwrite the app's error code)
* currently this is done in the profile routine itself --
* if you want to move it here, look at the code in profile.c
*/
# endif
/* push time as 2nd argument for call */
APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_EDX)));
APP(&ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_EAX)));
/* push fragment address as 1st argument for call
* fragment isn't built yet, we fill it in in finalize_profile_call
*/
APP(&ilist, INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(0)));
/* call near rel: 4-byte pc-relative offset from start of next instr
* we don't have that offset now so we fill it in later (in
* finalize_profile_call)
*/
APP(&ilist, INSTR_CREATE_call(dcontext, opnd_create_pc(NULL)));
/* pop arguments: addl $0xc, %esp */
APP(&ilist,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_ESP), OPND_CREATE_INT8(0xc)));
/* restore eflags */
APP(&ilist, INSTR_CREATE_RAW_popf(dcontext));
/* restore caller-saved registers */
APP(&ilist, instr_create_restore_from_dcontext(dcontext, REG_ECX, SCRATCH_REG2_OFFS));
/* restore app stack */
APP(&ilist, instr_create_restore_from_dcontext(dcontext, REG_ESP, XSP_OFFSET));
/* get start time = rdtsc */
APP(&ilist, INSTR_CREATE_rdtsc(dcontext));
/* copy start time into dcontext */
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_EAX, start_time_offs));
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_EDX, start_time_offs + 4));
/* finish restoring caller-saved registers */
APP(&ilist, instr_create_restore_from_dcontext(dcontext, REG_EDX, SCRATCH_REG3_OFFS));
APP(&ilist, instr_create_restore_from_dcontext(dcontext, REG_EAX, SCRATCH_REG0_OFFS));
/* now encode the instructions */
pc = profile_call_buf;
for (inst = instrlist_first(&ilist); inst; inst = instr_get_next(inst)) {
if (instr_is_call_direct(inst)) {
/* push_immed was just before us, so fragment address
* starts 4 bytes before us:
*/
profile_call_fragment_offset = (int)(pc - 4 - profile_call_buf);
/* call opcode is 1 byte, offset is next: */
profile_call_call_offset = (int)(pc + 1 - profile_call_buf);
}
/* we have no jumps with instr_t targets so we don't need to set note
* field in order to use instr_encode
*/
nxt_pc = instr_encode(dcontext, inst, (void *)pc);
ASSERT(nxt_pc != NULL);
profile_call_length += nxt_pc - pc;
pc = nxt_pc;
ASSERT(profile_call_length < 128);
}
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
}
#endif /* PROFILE_RDTSC */
#ifdef WINDOWS
/* Leaving in place old notes on LastError preservation: */
/* inlined versions of save/restore last error by reading of TIB */
/* If our inlined version fails on a later version of windows
should verify [GS]etLastError matches the disassembly below.
*/
/* Win2000: kernel32!SetLastError: */
/* 77E87671: 55 push ebp */
/* 77E87672: 8B EC mov ebp,esp */
/* 77E87674: 64 A1 18 00 00 00 mov eax,fs:[00000018] */
/* 77E8767A: 8B 4D 08 mov ecx,dword ptr [ebp+8] */
/* 77E8767D: 89 48 34 mov dword ptr [eax+34h],ecx */
/* 77E87680: 5D pop ebp */
/* 77E87681: C2 04 00 ret 4 */
/* Win2003: ntdll!RtlSetLastWin32Error: optimized to */
/* 77F45BB4: 64 A1 18 00 00 00 mov eax,fs:[00000018] */
/* 77F45BBA: 8B 4C 24 04 mov ecx,dword ptr [esp+4] */
/* 77F45BBE: 89 48 34 mov dword ptr [eax+34h],ecx */
/* 77F45BC1: C2 04 00 ret 4 */
/* See InsideWin2k, p. 329 SelfAddr fs:[18h] simply has the linear address of the TIB
while we're interested only in LastError which is at fs:[34h] */
/* Therefore all we need is a single instruction! */
/* 64 a1 34 00 00 00 mov dword ptr fs:[34h],errno_register */
/* Overall savings: 7 instructions, 5 data words */
/*kernel32!GetLastError:*/
/* 77E87684: 64 A1 18 00 00 00 mov eax,fs:[00000018] */
/* 77E8768A: 8B 40 34 mov eax,dword ptr [eax+34h] */
/* 77E8768D: C3 ret */
/* All we need is a single instruction: */
/* 77F45BBE: 89 48 34 mov reg_result, dword ptr fs:[34h] */
/* i#249: isolate app's PEB+TEB by keeping our own copy and swapping on cxt switch
* For clean calls we share this in clean_call_{save,restore} (i#171, i#1349).
*/
void
preinsert_swap_peb(dcontext_t *dcontext, instrlist_t *ilist, instr_t *next, bool absolute,
reg_id_t reg_dr, reg_id_t reg_scratch, bool to_priv)
{
/* We assume PEB is globally constant and we don't need per-thread pointers
* and can use use absolute pointers known at init time
*/
PEB *tgt_peb = to_priv ? get_private_peb() : get_own_peb();
reg_id_t scratch32 = IF_X64_ELSE(reg_64_to_32(reg_scratch), reg_scratch);
ASSERT(INTERNAL_OPTION(private_peb));
ASSERT(reg_dr != REG_NULL && reg_scratch != REG_NULL);
if (should_swap_peb_pointer()) {
/* can't store 64-bit immed, so we use scratch reg, for 32-bit too since
* long 32-bit-immed-store instr to fs:offs is slow to decode
*/
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(reg_scratch),
OPND_CREATE_INTPTR((ptr_int_t)tgt_peb)));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0,
PEB_TIB_OFFSET, OPSZ_PTR),
opnd_create_reg(reg_scratch)));
}
/* See the comment at the definition of SWAP_TEB_STACKLIMIT() for full
* discussion of which stack fields we swap.
*/
if (SWAP_TEB_STACKLIMIT()) {
if (to_priv) {
PRE(ilist, next,
XINST_CREATE_load(dcontext, opnd_create_reg(reg_scratch),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, BASE_STACK_TIB_OFFSET,
OPSZ_PTR)));
PRE(ilist, next,
SAVE_TO_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
APP_STACK_LIMIT_OFFSET));
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
DSTACK_OFFSET));
PRE(ilist, next,
INSTR_CREATE_lea(dcontext, opnd_create_reg(reg_scratch),
opnd_create_base_disp(reg_scratch, REG_NULL, 0,
-(int)DYNAMORIO_STACK_SIZE,
OPSZ_lea)));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, BASE_STACK_TIB_OFFSET,
OPSZ_PTR),
opnd_create_reg(reg_scratch)));
} else {
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
APP_STACK_LIMIT_OFFSET));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, BASE_STACK_TIB_OFFSET,
OPSZ_PTR),
opnd_create_reg(reg_scratch)));
}
}
if (SWAP_TEB_STACKBASE()) {
if (to_priv) {
PRE(ilist, next,
XINST_CREATE_load(dcontext, opnd_create_reg(reg_scratch),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, TOP_STACK_TIB_OFFSET,
OPSZ_PTR)));
PRE(ilist, next,
SAVE_TO_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
APP_STACK_BASE_OFFSET));
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
DSTACK_OFFSET));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, TOP_STACK_TIB_OFFSET,
OPSZ_PTR),
opnd_create_reg(reg_scratch)));
} else {
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
APP_STACK_BASE_OFFSET));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, TOP_STACK_TIB_OFFSET,
OPSZ_PTR),
opnd_create_reg(reg_scratch)));
}
}
if (should_swap_teb_nonstack_fields()) {
/* Preserve app's TEB->LastErrorValue. We used to do this separately b/c
* DR at one point long ago made some win32 API calls: now we only have to
* do this when loading private libraries. We assume no private library
* code needs to preserve LastErrorCode across app execution.
*/
if (to_priv) {
/* yes errno is 32 bits even on x64 */
PRE(ilist, next,
XINST_CREATE_load(dcontext, opnd_create_reg(scratch32),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, ERRNO_TIB_OFFSET,
OPSZ_4)));
PRE(ilist, next,
SAVE_TO_DC_VIA_REG(absolute, dcontext, reg_dr, scratch32,
APP_ERRNO_OFFSET));
} else {
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, scratch32,
APP_ERRNO_OFFSET));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, ERRNO_TIB_OFFSET, OPSZ_4),
opnd_create_reg(scratch32)));
}
/* We also swap TEB->FlsData. Unlike TEB->ProcessEnvironmentBlock, which is
* constant, and TEB->LastErrorCode, which is not peristent, we have to maintain
* both values and swap between them which is expensive.
*/
PRE(ilist, next,
XINST_CREATE_load(dcontext, opnd_create_reg(reg_scratch),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0,
FLS_DATA_TIB_OFFSET, OPSZ_PTR)));
PRE(ilist, next,
SAVE_TO_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
to_priv ? APP_FLS_OFFSET : PRIV_FLS_OFFSET));
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
to_priv ? PRIV_FLS_OFFSET : APP_FLS_OFFSET));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0,
FLS_DATA_TIB_OFFSET, OPSZ_PTR),
opnd_create_reg(reg_scratch)));
/* We swap TEB->ReservedForNtRpc as well. Hopefully there won't be many
* more we'll have to swap.
*/
PRE(ilist, next,
XINST_CREATE_load(dcontext, opnd_create_reg(reg_scratch),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0,
NT_RPC_TIB_OFFSET, OPSZ_PTR)));
PRE(ilist, next,
SAVE_TO_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
to_priv ? APP_RPC_OFFSET : PRIV_RPC_OFFSET));
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
to_priv ? PRIV_RPC_OFFSET : APP_RPC_OFFSET));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0,
NT_RPC_TIB_OFFSET, OPSZ_PTR),
opnd_create_reg(reg_scratch)));
/* We also swap TEB->NlsCache. */
PRE(ilist, next,
XINST_CREATE_load(dcontext, opnd_create_reg(reg_scratch),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0,
NLS_CACHE_TIB_OFFSET, OPSZ_PTR)));
PRE(ilist, next,
SAVE_TO_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
to_priv ? APP_NLS_CACHE_OFFSET : PRIV_NLS_CACHE_OFFSET));
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
to_priv ? PRIV_NLS_CACHE_OFFSET
: APP_NLS_CACHE_OFFSET));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0,
NLS_CACHE_TIB_OFFSET, OPSZ_PTR),
opnd_create_reg(reg_scratch)));
}
if (should_swap_teb_static_tls()) {
/* We also have to swap TEB->ThreadLocalStoragePointer. Unlike the other
* fields, we control this private one so we never set it from the TEB field.
*/
if (to_priv) {
PRE(ilist, next,
XINST_CREATE_load(dcontext, opnd_create_reg(reg_scratch),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, STATIC_TLS_TIB_OFFSET,
OPSZ_PTR)));
PRE(ilist, next,
SAVE_TO_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
APP_STATIC_TLS_OFFSET));
}
PRE(ilist, next,
RESTORE_FROM_DC_VIA_REG(absolute, dcontext, reg_dr, reg_scratch,
to_priv ? PRIV_STATIC_TLS_OFFSET
: APP_STATIC_TLS_OFFSET));
PRE(ilist, next,
XINST_CREATE_store(dcontext,
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0,
STATIC_TLS_TIB_OFFSET, OPSZ_PTR),
opnd_create_reg(reg_scratch)));
}
}
#endif /* WINDOWS */
/***************************************************************************/
/* THREAD-PRIVATE/SHARED ROUTINE GENERATION */
/***************************************************************************/
/* register for holding dcontext on fcache enter/return */
#define REG_DCTXT SCRATCH_REG5
/* append instructions to setup fcache target
* if (!absolute)
* # put target somewhere we can be absolute about
* RESTORE_FROM_UPCONTEXT next_tag_OFFSET,%xax
* if (shared)
* mov %xax,fs:xax_OFFSET
* endif
* endif
*/
static void
append_setup_fcache_target(dcontext_t *dcontext, instrlist_t *ilist, bool absolute,
bool shared)
{
if (absolute)
return;
/* put target into special slot that we can be absolute about */
APP(ilist, RESTORE_FROM_DC(dcontext, SCRATCH_REG0, NEXT_TAG_OFFSET));
if (shared) {
APP(ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG0, FCACHE_ENTER_TARGET_SLOT));
} else {
#ifdef WINDOWS
/* absolute into main dcontext (not one in REG_DCTXT) */
APP(ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG0,
NONSWAPPED_SCRATCH_OFFSET));
#else
/* no special scratch slot! */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* !WINDOWS */
}
}
/* append instructions to jump to target in code cache
* ifdef X64 and (target is x86 mode)
* # we can't indirect through a register since we couldn't restore
* # the high bits (PR 283152)
* mov gencode-jmp86-value, fs:xbx_OFFSET
* far jmp to next instr, stored w/ 32-bit cs selector in fs:xbx_OFFSET
* endif
*
* # jump indirect through dcontext->next_tag, set by d_r_dispatch()
* if (absolute)
* JUMP_VIA_DCONTEXT next_tag_OFFSET
* else
* if (shared)
* jmp *fs:xax_OFFSET
* else
* JUMP_VIA_DCONTEXT nonswapped_scratch_OFFSET
* endif
* endif
*/
static void
append_jmp_to_fcache_target(dcontext_t *dcontext, instrlist_t *ilist,
generated_code_t *code, bool absolute, bool shared,
patch_list_t *patch _IF_X86_64(byte **jmp86_store_addr)
_IF_X86_64(byte **jmp86_target_addr))
{
#ifdef X86_64
if (GENCODE_IS_X86(code->gencode_mode)) {
instr_t *label = INSTR_CREATE_label(dcontext);
instr_t *store;
/* We must use an indirect jmp (far direct are illegal in x64) and
* we can't indirect through a register since we couldn't restore the
* high bits (PR 283152) so we write the 6-byte far address to TLS.
*/
/* AMD only supports 32-bit address for far jmp */
store = XINST_CREATE_store(dcontext, OPND_TLS_FIELD_SZ(TLS_REG1_SLOT, OPSZ_4),
OPND_CREATE_INT32(0 /*placeholder*/));
APP(ilist, store);
APP(ilist,
XINST_CREATE_store(dcontext, OPND_TLS_FIELD_SZ(TLS_REG1_SLOT + 4, OPSZ_2),
OPND_CREATE_INT16((ushort)CS32_SELECTOR)));
APP(ilist,
INSTR_CREATE_jmp_far_ind(dcontext, OPND_TLS_FIELD_SZ(TLS_REG1_SLOT, OPSZ_6)));
APP(ilist, label);
/* We need a patch that involves two instrs, which is not supported,
* so we get both addresses involved into local vars and do the patch
* by hand after emitting.
*/
add_patch_marker(patch, store, PATCH_ASSEMBLE_ABSOLUTE, -4 /* 4 bytes from end */,
(ptr_uint_t *)jmp86_store_addr);
add_patch_marker(patch, label, PATCH_ASSEMBLE_ABSOLUTE, 0 /* start of label */,
(ptr_uint_t *)jmp86_target_addr);
}
#endif /* X64 */
/* Jump indirect through next_tag. Dispatch set this value with
* where we want to go next in the fcache_t.
*/
if (absolute) {
APP(ilist, instr_create_jump_via_dcontext(dcontext, NEXT_TAG_OFFSET));
} else {
if (shared) {
/* next_tag placed into tls slot earlier in this routine */
#ifdef AARCH64
/* Load next_tag from FCACHE_ENTER_TARGET_SLOT, stored by
* append_setup_fcache_target.
*/
APP(ilist,
instr_create_restore_from_tls(dcontext, DR_REG_X0,
FCACHE_ENTER_TARGET_SLOT));
/* br x0 */
APP(ilist, INSTR_CREATE_br(dcontext, opnd_create_reg(DR_REG_X0)));
#else
APP(ilist,
XINST_CREATE_jump_mem(dcontext,
OPND_TLS_FIELD(FCACHE_ENTER_TARGET_SLOT)));
#endif
} else {
#ifdef WINDOWS
/* FIXME: we could just use tls, right? no real need for the "shared"
* parameter?
*/
/* need one absolute ref using main dcontext (not one in edi):
* it's the final jmp, using the special slot we set up earlier
*/
APP(ilist,
instr_create_jump_via_dcontext(dcontext, NONSWAPPED_SCRATCH_OFFSET));
#else /* !WINDOWS */
/* no special scratch slot! */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* !WINDOWS */
}
}
}
/* Our context switch to and from the fragment cache are arranged such
* that there is no persistent state kept on the dstack, allowing us to
* start with a clean slate on exiting the cache. This eliminates the
* need to protect our dstack from inadvertent or malicious writes.
*
* We do not bother to save any DynamoRIO state, even the eflags. We clear
* them in fcache_return, assuming that a cleared state is always the
* proper value (df is never set across the cache, etc.)
*
* The code is split into several helper functions.
*
* # Used by d_r_dispatch to begin execution in fcache at dcontext->next_tag
* fcache_enter(dcontext_t *dcontext)
*
* # append_fcache_enter_prologue
* mov SCRATCH_REG5, xax # save callee-saved reg in case return for signal
* if (!absolute)
* mov ARG1, SCRATCH_REG5 # dcontext param
* if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask))
* RESTORE_FROM_UPCONTEXT PROT_OFFSET, %xsi
* endif
* endif
* cmp signals_pending_OFFSET(SCRATCH_REG5), 0
* jle no_signals
* mov xax, SCRATCH_REG5 # restore callee-saved reg
* ret
* no_signals:
*
* # append_load_tls_base (ARM only)
* mrc p15, 0, r0, c13, c0, 2
* ldr r10, [r10, TLS_SWAP_SLOT_OFFSET]
* ldr r1, [r0, offsetof(app_tls_swap)]
* str r1, [r10, TLS_SWAP_SLOT_OFFSET]
*
* # append_setup_fcache_target
* if (!absolute)
* # put target somewhere we can be absolute about
* RESTORE_FROM_UPCONTEXT next_tag_OFFSET, SCRATCH_REG0
* if (shared)
* mov SCRATCH_REG0, fs:xax_OFFSET
* endif
* endif
*
* # append_call_exit_dr_hook
* if (EXIT_DR_HOOK != NULL && !dcontext->ignore_enterexit)
* if (!absolute)
* push %xdi
* push %xsi
* else
* # support for skipping the hook
* RESTORE_FROM_UPCONTEXT ignore_enterexit_OFFSET,%edi
* cmpl %edi,0
* jnz post_hook
* endif
* call EXIT_DR_HOOK # for x64 windows, reserve 32 bytes stack space for call
* if (!absolute)
* pop %xsi
* pop %xdi
* endif
* endif
*
* post_hook:
*
* # restore the original register state
*
* # append_restore_simd_reg
* if preserve_xmm_caller_saved
* if (ZMM_ENABLED()) # this is evaluated at *generation time*
* if (!d_r_is_avx512_code_in_use()) # this is evaluated at *runtime*
* RESTORE_FROM_UPCONTEXT simd_OFFSET+0*64,%ymm0
* RESTORE_FROM_UPCONTEXT simd_OFFSET+1*64,%ymm1
* RESTORE_FROM_UPCONTEXT simd_OFFSET+2*64,%ymm2
* RESTORE_FROM_UPCONTEXT simd_OFFSET+3*64,%ymm3
* RESTORE_FROM_UPCONTEXT simd_OFFSET+4*64,%ymm4
* RESTORE_FROM_UPCONTEXT simd_OFFSET+5*64,%ymm5
* RESTORE_FROM_UPCONTEXT simd_OFFSET+6*64,%ymm6
* RESTORE_FROM_UPCONTEXT simd_OFFSET+7*64,%ymm7 # 32-bit Linux
* ifdef X64
* RESTORE_FROM_UPCONTEXT simd_OFFSET+8*64,%ymm8
* RESTORE_FROM_UPCONTEXT simd_OFFSET+9*64,%ymm9
* RESTORE_FROM_UPCONTEXT simd_OFFSET+10*64,%ymm10
* RESTORE_FROM_UPCONTEXT simd_OFFSET+11*64,%ymm11
* RESTORE_FROM_UPCONTEXT simd_OFFSET+12*64,%ymm12
* RESTORE_FROM_UPCONTEXT simd_OFFSET+13*64,%ymm13
* RESTORE_FROM_UPCONTEXT simd_OFFSET+14*64,%ymm14
* RESTORE_FROM_UPCONTEXT simd_OFFSET+15*64,%ymm15 # 64-bit Linux
* endif
* else # d_r_is_avx512_code_in_use()
* RESTORE_FROM_UPCONTEXT simd_OFFSET+0*64,%zmm0
* RESTORE_FROM_UPCONTEXT simd_OFFSET+1*64,%zmm1
* RESTORE_FROM_UPCONTEXT simd_OFFSET+2*64,%zmm2
* RESTORE_FROM_UPCONTEXT simd_OFFSET+3*64,%zmm3
* RESTORE_FROM_UPCONTEXT simd_OFFSET+4*64,%zmm4
* RESTORE_FROM_UPCONTEXT simd_OFFSET+5*64,%zmm5
* RESTORE_FROM_UPCONTEXT simd_OFFSET+6*64,%zmm6
* RESTORE_FROM_UPCONTEXT simd_OFFSET+7*64,%zmm7 # 32-bit Linux
* ifdef X64
* RESTORE_FROM_UPCONTEXT simd_OFFSET+8*64,%zmm8
* RESTORE_FROM_UPCONTEXT simd_OFFSET+9*64,%zmm9
* RESTORE_FROM_UPCONTEXT simd_OFFSET+10*64,%zmm10
* RESTORE_FROM_UPCONTEXT simd_OFFSET+11*64,%zmm11
* RESTORE_FROM_UPCONTEXT simd_OFFSET+12*64,%zmm12
* RESTORE_FROM_UPCONTEXT simd_OFFSET+13*64,%zmm13
* RESTORE_FROM_UPCONTEXT simd_OFFSET+14*64,%zmm14
* RESTORE_FROM_UPCONTEXT simd_OFFSET+15*64,%zmm15
* RESTORE_FROM_UPCONTEXT simd_OFFSET+16*64,%zmm16
* RESTORE_FROM_UPCONTEXT simd_OFFSET+17*64,%zmm17
* RESTORE_FROM_UPCONTEXT simd_OFFSET+18*64,%zmm18
* RESTORE_FROM_UPCONTEXT simd_OFFSET+19*64,%zmm19
* RESTORE_FROM_UPCONTEXT simd_OFFSET+20*64,%zmm20
* RESTORE_FROM_UPCONTEXT simd_OFFSET+21*64,%zmm21
* RESTORE_FROM_UPCONTEXT simd_OFFSET+22*64,%zmm22
* RESTORE_FROM_UPCONTEXT simd_OFFSET+23*64,%zmm23
* RESTORE_FROM_UPCONTEXT simd_OFFSET+24*64,%zmm24
* RESTORE_FROM_UPCONTEXT simd_OFFSET+25*64,%zmm25
* RESTORE_FROM_UPCONTEXT simd_OFFSET+26*64,%zmm26
* RESTORE_FROM_UPCONTEXT simd_OFFSET+27*64,%zmm27
* RESTORE_FROM_UPCONTEXT simd_OFFSET+28*64,%zmm28
* RESTORE_FROM_UPCONTEXT simd_OFFSET+29*64,%zmm29
* RESTORE_FROM_UPCONTEXT simd_OFFSET+30*64,%zmm30
* RESTORE_FROM_UPCONTEXT simd_OFFSET+31*64,%zmm31 # 64-bit Linux
* endif
* RESTORE_FROM_UPCONTEXT opmask_OFFSET+0*8,%k0
* RESTORE_FROM_UPCONTEXT opmask_OFFSET+1*8,%k1
* RESTORE_FROM_UPCONTEXT opmask_OFFSET+2*8,%k2
* RESTORE_FROM_UPCONTEXT opmask_OFFSET+3*8,%k3
* RESTORE_FROM_UPCONTEXT opmask_OFFSET+4*8,%k4
* RESTORE_FROM_UPCONTEXT opmask_OFFSET+5*8,%k5
* RESTORE_FROM_UPCONTEXT opmask_OFFSET+6*8,%k6
* RESTORE_FROM_UPCONTEXT opmask_OFFSET+7*8,%k7
* endif
* endif
* endif
*
* # append_restore_xflags
* RESTORE_FROM_UPCONTEXT xflags_OFFSET,%xax
* push %xax
* popf # restore eflags temporarily using dstack
*
* # append_restore_gpr
* ifdef X64
* RESTORE_FROM_UPCONTEXT r8_OFFSET,%r8
* RESTORE_FROM_UPCONTEXT r9_OFFSET,%r9
* RESTORE_FROM_UPCONTEXT r10_OFFSET,%r10
* RESTORE_FROM_UPCONTEXT r11_OFFSET,%r11
* RESTORE_FROM_UPCONTEXT r12_OFFSET,%r12
* RESTORE_FROM_UPCONTEXT r13_OFFSET,%r13
* RESTORE_FROM_UPCONTEXT r14_OFFSET,%r14
* RESTORE_FROM_UPCONTEXT r15_OFFSET,%r15
* endif
* RESTORE_FROM_UPCONTEXT xax_OFFSET,%xax
* RESTORE_FROM_UPCONTEXT xbx_OFFSET,%xbx
* RESTORE_FROM_UPCONTEXT xcx_OFFSET,%xcx
* RESTORE_FROM_UPCONTEXT xdx_OFFSET,%xdx
* if (absolute || !TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask))
* RESTORE_FROM_UPCONTEXT xsi_OFFSET,%xsi
* endif
* if (absolute || TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask))
* RESTORE_FROM_UPCONTEXT xdi_OFFSET,%xdi
* endif
* RESTORE_FROM_UPCONTEXT xbp_OFFSET,%xbp
* RESTORE_FROM_UPCONTEXT xsp_OFFSET,%xsp
* if (!absolute)
* if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask))
* RESTORE_FROM_UPCONTEXT xsi_OFFSET,%xsi
* else
* RESTORE_FROM_UPCONTEXT xdi_OFFSET,%xdi
* endif
* endif
*
* # append_jmp_to_fcache_target
* ifdef X64 and (target is x86 mode)
* # we can't indirect through a register since we couldn't restore
* # the high bits (PR 283152)
* mov gencode-jmp86-value, fs:xbx_OFFSET
* far jmp to next instr, stored w/ 32-bit cs selector in fs:xbx_OFFSET
* endif
*
* # jump indirect through dcontext->next_tag, set by d_r_dispatch()
* if (absolute)
* JUMP_VIA_DCONTEXT next_tag_OFFSET
* else
* if (shared)
* jmp *fs:xax_OFFSET
* else
* JUMP_VIA_DCONTEXT nonswapped_scratch_OFFSET
* endif
* endif
*
* # now executing in fcache
*/
static byte *
emit_fcache_enter_common(dcontext_t *dcontext, generated_code_t *code, byte *pc,
bool absolute, bool shared)
{
int len;
instrlist_t ilist;
patch_list_t patch;
#if defined(X86) && defined(X64)
byte *jmp86_store_addr = NULL;
byte *jmp86_target_addr = NULL;
#endif /* X64 */
init_patch_list(&patch, absolute ? PATCH_TYPE_ABSOLUTE : PATCH_TYPE_INDIRECT_XDI);
instrlist_init(&ilist);
/* no support for absolute addresses on x64/ARM: we always use tls */
IF_X64(ASSERT_NOT_IMPLEMENTED(!absolute && shared));
IF_ARM(ASSERT_NOT_IMPLEMENTED(!absolute && shared));
append_fcache_enter_prologue(dcontext, &ilist, absolute);
append_setup_fcache_target(dcontext, &ilist, absolute, shared);
append_call_exit_dr_hook(dcontext, &ilist, absolute, shared);
#ifdef WINDOWS
/* i#249: isolate the PEB and TEB */
preinsert_swap_peb(dcontext, &ilist, NULL, absolute, SCRATCH_REG5,
SCRATCH_REG0 /*scratch*/, false /*to app*/);
#endif
#ifdef AARCH64
/* Put app's X0, X1 in TLS_REG0_SLOT, TLS_REG1_SLOT; this is required by
* the fragment prefix.
*/
/* ldp x0, x1, [x5] */
APP(&ilist,
XINST_CREATE_load_pair(
dcontext, opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X1),
opnd_create_base_disp(DR_REG_X5, DR_REG_NULL, 0, 0, OPSZ_16)));
/* stp x0, x1, [x28] */
APP(&ilist,
XINST_CREATE_store_pair(
dcontext, opnd_create_base_disp(dr_reg_stolen, DR_REG_NULL, 0, 0, OPSZ_16),
opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X1)));
#endif
/* restore the original register state */
append_restore_simd_reg(dcontext, &ilist, absolute);
/* Please note that append_restore_simd_reg may change the flags. Therefore, the
* order matters.
*/
append_restore_xflags(dcontext, &ilist, absolute);
append_restore_gpr(dcontext, &ilist, absolute);
append_jmp_to_fcache_target(dcontext, &ilist, code, absolute, shared,
&patch _IF_X86_64(&jmp86_store_addr)
_IF_X86_64(&jmp86_target_addr));
/* now encode the instructions */
len = encode_with_patch_list(dcontext, &patch, &ilist, pc);
ASSERT(len != 0);
#if defined(X86) && defined(X64)
if (GENCODE_IS_X86(code->gencode_mode)) {
/* Put the absolute address in place */
ASSERT(jmp86_target_addr != NULL && jmp86_store_addr != NULL);
ASSERT(CHECK_TRUNCATE_TYPE_uint((ptr_uint_t)jmp86_target_addr));
*((uint *)jmp86_store_addr) = (uint)(ptr_uint_t)jmp86_target_addr;
}
#endif
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc + len;
}
byte *
emit_fcache_enter(dcontext_t *dcontext, generated_code_t *code, byte *pc)
{
return emit_fcache_enter_common(dcontext, code, pc, true /*absolute*/,
false /*!shared*/);
}
/* Generate a shared prologue for grabbing the dcontext into XDI
TODO: Should be used by fcache_return and shared IBL routines,
but for now some assumptions are not quite the same.
Only assumption is that xcx cannot be touched (IBL expects looked up address)
if save_xdi we assume DCONTEXT_BASE_SPILL_SLOT can be clobbered
OUTPUT: xdi contains dcontext
if save_xdi DCONTEXT_BASE_SPILL_SLOT will contain saved value
FIXME: xdx is the spill slot -- switch over to xdx as base reg?
Have to measure perf effect first (case 5239)
00: mov xdi, tls_slot_scratch2 64 89 3d 0c 0f 00 00 mov %edi -> %fs:0xf0c
07: mov tls_slot_dcontext, xdi 64 8b 3d 14 0f 00 00 mov %fs:0xf14 -> %edi
if TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)
ASSERT_NOT_TESTED
endif
*/
void
insert_shared_get_dcontext(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
bool save_xdi)
{
/* needed to support grabbing the dcontext w/ shared cache */
if (save_xdi) {
PRE(ilist, where,
SAVE_TO_TLS(dcontext, SCRATCH_REG5 /*xdi/r5*/, DCONTEXT_BASE_SPILL_SLOT));
}
PRE(ilist, where,
RESTORE_FROM_TLS(dcontext, SCRATCH_REG5 /*xdi/r5*/, TLS_DCONTEXT_SLOT));
if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
#ifdef X86
bool absolute = false;
/* PR 224798: we could avoid extra indirection by storing
* unprotected_context_t in TLS_DCONTEXT_SLOT instead of dcontext_t
*/
ASSERT_NOT_TESTED();
/* we'd need a 3rd slot in order to nicely get unprot ptr into esi
* we can do it w/ only 2 slots by clobbering dcontext ptr
* (we could add base reg info to RESTORE_FROM_DC/SAVE_TO_DC and go
* straight through esi to begin w/ and subtract one instr (xchg)
*/
PRE(ilist, where, RESTORE_FROM_DC(dcontext, SCRATCH_REG5, PROT_OFFS));
PRE(ilist, where,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(SCRATCH_REG4),
opnd_create_reg(SCRATCH_REG5)));
PRE(ilist, where, SAVE_TO_DC(dcontext, SCRATCH_REG5, SCRATCH_REG4_OFFS));
PRE(ilist, where, RESTORE_FROM_TLS(dcontext, SCRATCH_REG5, TLS_DCONTEXT_SLOT));
#elif defined(ARM)
/* FIXMED i#1551: NYI on ARM */
ASSERT_NOT_REACHED();
#endif
}
}
/* restore XDI through TLS */
void
insert_shared_restore_dcontext_reg(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *where)
{
PRE(ilist, where,
RESTORE_FROM_TLS(dcontext, SCRATCH_REG5 /*xdi/r5*/, DCONTEXT_BASE_SPILL_SLOT));
}
/* append instructions to prepare for fcache return:
* i.e., far jump to switch mode, load dcontext, etc.
*
* # on X86
* ifdef X64 and (source is x86 mode)
* far direct jmp to next instr w/ 64-bit switch
* endif
*
* if (!absolute)
* mov %xdi,fs:xdx_OFFSET
* mov fs:dcontext,%xdi
* if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask))
* RESTORE_FROM_DCONTEXT PROT_OFFSET,%xdi
* xchg %xsi,%xdi
* SAVE_TO_UPCONTEXT %xdi,xsi_OFFSET
* mov fs:dcontext,%xdi
* endif
* # get xax and xdi into their real slots, via xbx
* SAVE_TO_UPCONTEXT %xbx,xbx_OFFSET
* mov fs:xax_OFFSET,%xbx
* SAVE_TO_UPCONTEXT %xbx,xax_OFFSET
* mov fs:xdx_OFFSET,%xbx
* SAVE_TO_UPCONTEXT %xbx,xdi_OFFSET
* endif
*/
static bool
append_prepare_fcache_return(dcontext_t *dcontext, generated_code_t *code,
instrlist_t *ilist, bool absolute, bool shared)
{
bool instr_targets = false;
#ifdef X86_64
if (GENCODE_IS_X86(code->gencode_mode)) {
instr_t *label = INSTR_CREATE_label(dcontext);
instr_t *ljmp =
INSTR_CREATE_jmp_far(dcontext, opnd_create_far_instr(CS64_SELECTOR, label));
instr_set_x86_mode(ljmp, true /*x86*/);
APP(ilist, ljmp);
APP(ilist, label);
instr_targets = true;
}
#endif /* X86_64 */
if (absolute)
return instr_targets;
/* only support non-absolute w/ shared cache */
ASSERT_NOT_IMPLEMENTED(shared);
/* xax is in 1 scratch slot, so we have to use a 2nd scratch
* slot in order to get dcontext into xdi
*/
APP(ilist, SAVE_TO_TLS(dcontext, REG_DCTXT, DCONTEXT_BASE_SPILL_SLOT));
APP(ilist, RESTORE_FROM_TLS(dcontext, REG_DCTXT, TLS_DCONTEXT_SLOT));
if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)) {
#ifdef X86
/* we'd need a 3rd slot in order to nicely get unprot ptr into xsi
* we can do it w/ only 2 slots by clobbering dcontext ptr
* (we could add base reg info to RESTORE_FROM_DC/SAVE_TO_DC and go
* straight through xsi to begin w/ and subtract one instr (xchg)
*/
ASSERT_NOT_TESTED();
APP(ilist, RESTORE_FROM_DC(dcontext, SCRATCH_REG5, PROT_OFFS));
APP(ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(SCRATCH_REG4),
opnd_create_reg(SCRATCH_REG5)));
APP(ilist, SAVE_TO_DC(dcontext, SCRATCH_REG5, SCRATCH_REG4_OFFS));
APP(ilist, RESTORE_FROM_TLS(dcontext, SCRATCH_REG5, TLS_DCONTEXT_SLOT));
#elif defined(ARM)
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_REACHED();
#endif /* X86/ARM */
}
return instr_targets;
}
static void
append_call_dispatch(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
/* call central d_r_dispatch routine */
/* for x64 linux we could optimize and avoid the "mov rdi, rdi" */
/* for ARM we use _noreturn to avoid storing to %lr */
dr_insert_call_noreturn(
(void *)dcontext, ilist, NULL /*append*/, (void *)d_r_dispatch, 1,
absolute ? OPND_CREATE_INTPTR((ptr_int_t)dcontext) : opnd_create_reg(REG_DCTXT));
/* d_r_dispatch() shouldn't return! */
insert_reachable_cti(dcontext, ilist, NULL, vmcode_get_start(),
(byte *)unexpected_return, true /*jmp*/, false /*!returns*/,
false /*!precise*/, DR_REG_R11 /*scratch*/, NULL);
}
/*
* # fcache_return: context switch back to DynamoRIO.
* # Invoked via
* # a) from the fcache via a fragment exit stub,
* # b) from indirect_branch_lookup().
* # Invokes d_r_dispatch() with a clean dstack.
* # Assumptions:
* # 1) app's value in xax/r0 already saved in dcontext.
* # 2) xax/r0 holds the linkstub ptr
* #
*
* fcache_return:
* # append_fcache_return_prologue
* ifdef X64 and (source is x86 mode)
* far direct jmp to next instr w/ 64-bit switch
* endif
*
* if (!absolute)
* mov %xdi,fs:xdx_OFFSET
* mov fs:dcontext,%xdi
* if (TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask))
* RESTORE_FROM_DCONTEXT PROT_OFFSET,%xdi
* xchg %xsi,%xdi
* SAVE_TO_UPCONTEXT %xdi,xsi_OFFSET
* mov fs:dcontext,%xdi
* endif
* endif
*
* # append_save_gpr
* if (!absolute)
* # get xax and xdi into their real slots, via xbx
* SAVE_TO_UPCONTEXT %xbx,xbx_OFFSET
* mov fs:xax_OFFSET,%xbx
* SAVE_TO_UPCONTEXT %xbx,xax_OFFSET
* mov fs:xdx_OFFSET,%xbx
* SAVE_TO_UPCONTEXT %xbx,xdi_OFFSET
* endif
*
* # save the current register state to context->regs
* # xax already in context
*
* if (absolute)
* SAVE_TO_UPCONTEXT %xbx,xbx_OFFSET
* endif
* SAVE_TO_UPCONTEXT %xcx,xcx_OFFSET
* SAVE_TO_UPCONTEXT %xdx,xdx_OFFSET
* if (absolute || !TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask))
* SAVE_TO_UPCONTEXT %xsi,xsi_OFFSET
* endif
* if (absolute)
* SAVE_TO_UPCONTEXT %xdi,xdi_OFFSET
* endif
* SAVE_TO_UPCONTEXT %xbp,xbp_OFFSET
* SAVE_TO_UPCONTEXT %xsp,xsp_OFFSET
* ifdef X64
* SAVE_TO_UPCONTEXT %r8,r8_OFFSET
* SAVE_TO_UPCONTEXT %r9,r9_OFFSET
* SAVE_TO_UPCONTEXT %r10,r10_OFFSET
* SAVE_TO_UPCONTEXT %r11,r11_OFFSET
* SAVE_TO_UPCONTEXT %r12,r12_OFFSET
* SAVE_TO_UPCONTEXT %r13,r13_OFFSET
* SAVE_TO_UPCONTEXT %r14,r14_OFFSET
* SAVE_TO_UPCONTEXT %r15,r15_OFFSET
* endif
*
* # switch to clean dstack
* RESTORE_FROM_DCONTEXT dstack_OFFSET,%xsp
*
* # append_save_clear_xflags
* # now save eflags -- too hard to do without a stack!
* pushf # push eflags on stack
* pop %xbx # grab eflags value
* SAVE_TO_UPCONTEXT %xbx,xflags_OFFSET # save eflags value
*
* # append_save_simd_reg
* if preserve_xmm_caller_saved
* if (ZMM_ENABLED()) # this is evaluated at *generation time*
* if (!d_r_is_avx512_code_in_use()) # this is evaluated at *runtime*
* SAVE_TO_UPCONTEXT %ymm0,simd_OFFSET+0*64
* SAVE_TO_UPCONTEXT %ymm1,simd_OFFSET+1*64
* SAVE_TO_UPCONTEXT %ymm2,simd_OFFSET+2*64
* SAVE_TO_UPCONTEXT %ymm3,simd_OFFSET+3*64
* SAVE_TO_UPCONTEXT %ymm4,simd_OFFSET+4*64
* SAVE_TO_UPCONTEXT %ymm5,simd_OFFSET+5*64
* SAVE_TO_UPCONTEXT %ymm6,simd_OFFSET+6*64
* SAVE_TO_UPCONTEXT %ymm7,simd_OFFSET+7*64 # 32-bit Linux
* ifdef X64
* SAVE_TO_UPCONTEXT %ymm8,simd_OFFSET+8*64
* SAVE_TO_UPCONTEXT %ymm9,simd_OFFSET+9*64
* SAVE_TO_UPCONTEXT %ymm10,simd_OFFSET+10*64
* SAVE_TO_UPCONTEXT %ymm11,simd_OFFSET+11*64
* SAVE_TO_UPCONTEXT %ymm12,simd_OFFSET+12*64
* SAVE_TO_UPCONTEXT %ymm13,simd_OFFSET+13*64
* SAVE_TO_UPCONTEXT %ymm14,simd_OFFSET+14*64
* SAVE_TO_UPCONTEXT %ymm15,simd_OFFSET+15*64
* endif
* else # d_r_is_avx512_code_in_use()
* SAVE_TO_UPCONTEXT %zmm0,simd_OFFSET+0*64
* SAVE_TO_UPCONTEXT %zmm1,simd_OFFSET+1*64
* SAVE_TO_UPCONTEXT %zmm2,simd_OFFSET+2*64
* SAVE_TO_UPCONTEXT %zmm3,simd_OFFSET+3*64
* SAVE_TO_UPCONTEXT %zmm4,simd_OFFSET+4*64
* SAVE_TO_UPCONTEXT %zmm5,simd_OFFSET+5*64
* SAVE_TO_UPCONTEXT %zmm6,simd_OFFSET+6*64
* SAVE_TO_UPCONTEXT %zmm7,simd_OFFSET+7*64
* ifdef X64
* SAVE_TO_UPCONTEXT %zmm8,simd_OFFSET+8*64
* SAVE_TO_UPCONTEXT %zmm9,simd_OFFSET+9*64
* SAVE_TO_UPCONTEXT %zmm10,simd_OFFSET+10*64
* SAVE_TO_UPCONTEXT %zmm11,simd_OFFSET+11*64
* SAVE_TO_UPCONTEXT %zmm12,simd_OFFSET+12*64
* SAVE_TO_UPCONTEXT %zmm13,simd_OFFSET+13*64
* SAVE_TO_UPCONTEXT %zmm14,simd_OFFSET+14*64
* SAVE_TO_UPCONTEXT %zmm15,simd_OFFSET+15*64
* SAVE_TO_UPCONTEXT %zmm16,simd_OFFSET+16*64
* SAVE_TO_UPCONTEXT %zmm17,simd_OFFSET+17*64
* SAVE_TO_UPCONTEXT %zmm18,simd_OFFSET+18*64
* SAVE_TO_UPCONTEXT %zmm19,simd_OFFSET+19*64
* SAVE_TO_UPCONTEXT %zmm20,simd_OFFSET+20*64
* SAVE_TO_UPCONTEXT %zmm21,simd_OFFSET+21*64
* SAVE_TO_UPCONTEXT %zmm22,simd_OFFSET+22*64
* SAVE_TO_UPCONTEXT %zmm23,simd_OFFSET+23*64
* SAVE_TO_UPCONTEXT %zmm24,simd_OFFSET+24*64
* SAVE_TO_UPCONTEXT %zmm25,simd_OFFSET+25*64
* SAVE_TO_UPCONTEXT %zmm26,simd_OFFSET+26*64
* SAVE_TO_UPCONTEXT %zmm27,simd_OFFSET+27*64
* SAVE_TO_UPCONTEXT %zmm28,simd_OFFSET+28*64
* SAVE_TO_UPCONTEXT %zmm29,simd_OFFSET+29*64
* SAVE_TO_UPCONTEXT %zmm30,simd_OFFSET+30*64
* SAVE_TO_UPCONTEXT %zmm31,simd_OFFSET+31*64
* endif
* SAVE_TO_UPCONTEXT %k0,opmask_OFFSET+0*8
* SAVE_TO_UPCONTEXT %k1,opmask_OFFSET+1*8
* SAVE_TO_UPCONTEXT %k2,opmask_OFFSET+2*8
* SAVE_TO_UPCONTEXT %k3,opmask_OFFSET+3*8
* SAVE_TO_UPCONTEXT %k4,opmask_OFFSET+4*8
* SAVE_TO_UPCONTEXT %k5,opmask_OFFSET+5*8
* SAVE_TO_UPCONTEXT %k6,opmask_OFFSET+6*8
* SAVE_TO_UPCONTEXT %k7,opmask_OFFSET+7*8
* endif
* endif
* endif
*
* # clear eflags now to avoid app's eflags messing up our ENTER_DR_HOOK
* # FIXME: this won't work at CPL0 if we ever run there!
* push 0
* popf
*
* # append_call_enter_dr_hook
* if (ENTER_DR_HOOK != NULL && !dcontext->ignore_enterexit)
* # don't bother to save any registers around call except for xax
* # and xcx, which holds next_tag
* push %xcx
* if (!absolute)
* push %xdi
* push %xsi
* endif
* push %xax
* if (absolute)
* # support for skipping the hook (note: 32-bits even on x64)
* RESTORE_FROM_UPCONTEXT ignore_enterexit_OFFSET,%edi
* cmp %edi,0
* jnz post_hook
* endif
* # for x64 windows, reserve 32 bytes stack space for call prior to call
* call ENTER_DR_HOOK
*
* post_hook:
* pop %xax
* if (!absolute)
* pop %xsi
* pop %xdi
* endif
* pop %xcx
* endif
*
* # save last_exit, currently in eax, into dcontext->last_exit
* SAVE_TO_DCONTEXT %xax,last_exit_OFFSET
*
* .ifdef WINDOWS
* swap_peb
* .endif
*
* .ifdef SIDELINE
* # clear cur-trace field so we don't think cur trace is still running
* movl $0, _sideline_trace
* .endif
*
* # call central d_r_dispatch routine w/ dcontext as an argument
* if (absolute)
* push <dcontext>
* else
* push %xdi # for x64, mov %xdi, ARG1
* endif
* call d_r_dispatch # for x64 windows, reserve 32 bytes stack space for call
* # d_r_dispatch() shouldn't return!
* jmp unexpected_return
*/
/* N.B.: this routine is used to generate both the regular fcache_return
* and a slightly different copy that is used for the miss/unlinked paths
* for indirect_branch_lookup for self-protection.
* ibl_end should be true only for that end of the lookup routine.
*
* If linkstub != NULL, used for coarse fragments, this routine assumes that:
* - app xax is still in %xax
* - next target pc is in DIRECT_STUB_SPILL_SLOT tls
* - linkstub is the linkstub_t to pass back to d_r_dispatch
* - if coarse_info:
* - app xcx is in MANGLE_XCX_SPILL_SLOT
* - source coarse info is in %xcx
*
* We assume this routine does not use TLS slot FLOAT_PC_STATE_SLOT (TLS_REG1_SLOT).
*/
bool
append_fcache_return_common(dcontext_t *dcontext, generated_code_t *code,
instrlist_t *ilist, bool ibl_end, bool absolute, bool shared,
linkstub_t *linkstub, bool coarse_info)
{
bool instr_targets;
/* no support for absolute addresses on x64: we always use tls */
IF_X64(ASSERT_NOT_IMPLEMENTED(!absolute && shared));
/* currently linkstub is only used for coarse-grain exits */
ASSERT(linkstub == NULL || !absolute);
instr_targets = append_prepare_fcache_return(dcontext, code, ilist, absolute, shared);
append_save_gpr(dcontext, ilist, ibl_end, absolute, code, linkstub, coarse_info);
/* Switch to a clean dstack as part of our scheme to avoid state kept
* unprotected across cache executions.
* FIXME: this isn't perfect: we switch to the dstack BEFORE we call
* the entrance hook that will be used to coordinate other threads,
* so if our hook suspends all other threads to protect vs cross-thread
* attacks, the dstack is not perfectly protected.
*/
#ifdef AARCH64
APP(ilist, RESTORE_FROM_DC(dcontext, DR_REG_X1, DSTACK_OFFSET));
APP(ilist,
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_SP),
opnd_create_reg(DR_REG_X1)));
#else
APP(ilist, RESTORE_FROM_DC(dcontext, REG_XSP, DSTACK_OFFSET));
#endif
append_save_clear_xflags(dcontext, ilist, absolute);
/* Please note that append_save_simd_reg may change the flags. Therefore, the
* order matters.
*/
append_save_simd_reg(dcontext, ilist, absolute);
#ifdef X86
instr_targets = ZMM_ENABLED() || instr_targets;
#endif
instr_targets =
append_call_enter_dr_hook(dcontext, ilist, ibl_end, absolute) || instr_targets;
/* save last_exit, currently in scratch_reg0 into dcontext->last_exit */
APP(ilist, SAVE_TO_DC(dcontext, SCRATCH_REG0, LAST_EXIT_OFFSET));
#ifdef WINDOWS
/* i#249: isolate the PEB and TEB */
preinsert_swap_peb(dcontext, ilist, NULL, absolute, SCRATCH_REG5,
SCRATCH_REG0 /*scratch*/, true /*to priv*/);
#endif
#ifdef SIDELINE
if (dynamo_options.sideline) {
/* clear cur-trace field so we don't think cur trace is still running */
/* PR 248210: unsupported feature on x64 */
IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* PR 244737: fix abs address */
APP(ilist,
XINST_CREATE_store(dcontext,
OPND_CREATE_MEM32(REG_NULL, (int)&sideline_trace),
OPND_CREATE_INT32(0)));
}
#endif
append_call_dispatch(dcontext, ilist, absolute);
return instr_targets;
}
byte *
emit_fcache_return(dcontext_t *dcontext, generated_code_t *code, byte *pc)
{
bool instr_targets;
instrlist_t ilist;
instrlist_init(&ilist);
instr_targets = append_fcache_return_common(
dcontext, code, &ilist, false /*!ibl_end*/, true /*absolute*/, false /*!shared*/,
NULL, false /*not coarse*/);
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, instr_targets);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
byte *
emit_fcache_enter_shared(dcontext_t *dcontext, generated_code_t *code, byte *pc)
{
return emit_fcache_enter_common(dcontext, code, pc, false /*through xdi*/,
true /*shared*/);
}
byte *
emit_fcache_return_shared(dcontext_t *dcontext, generated_code_t *code, byte *pc)
{
bool instr_targets;
instrlist_t ilist;
instrlist_init(&ilist);
instr_targets = append_fcache_return_common(
dcontext, code, &ilist, false /*!ibl_end*/, false /*through xdi*/,
true /*shared*/, NULL, false /*not coarse*/);
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, instr_targets);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
byte *
emit_fcache_return_coarse(dcontext_t *dcontext, generated_code_t *code, byte *pc)
{
bool instr_targets;
linkstub_t *linkstub = (linkstub_t *)get_coarse_exit_linkstub();
instrlist_t ilist;
instrlist_init(&ilist);
instr_targets = append_fcache_return_common(
dcontext, code, &ilist, false /*!ibl_end*/, false /*through xdi*/,
true /*shared*/, linkstub, true /*coarse info in xcx*/);
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, instr_targets);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
byte *
emit_trace_head_return_coarse(dcontext_t *dcontext, generated_code_t *code, byte *pc)
{
/* Could share tail end of coarse_fcache_return instead of duplicating */
bool instr_targets;
linkstub_t *linkstub = (linkstub_t *)get_coarse_trace_head_exit_linkstub();
instrlist_t ilist;
instrlist_init(&ilist);
instr_targets = append_fcache_return_common(
dcontext, code, &ilist, false /*!ibl_end*/, false /*through xdi*/,
true /*shared*/, linkstub, false /*no coarse info*/);
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, instr_targets);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
/* Our coarse entrance stubs have several advantages, such as eliminating
* future fragments, but their accompanying lazy linking does need source
* information that is not available in each stub. We instead have an
* unlinked entrance stub target a per-unit prefix that records the source
* unit. We can then search within the unit to identify the actual source
* entrance stub, which is enough for lazy linking (but does not find the
* unique source tag: case 8565). This also gives us a single indirection
* point in the form of the prefix at which to patch the fcache_return target.
* We also place in the prefix indirection points for trace head cache exit and
* the 3 coarse ibl targets, to keep the cache read-only and (again) make it
* easier to patch when persisting/sharing.
*/
uint
coarse_exit_prefix_size(coarse_info_t *info)
{
#if defined(X86) && defined(X64)
uint flags = COARSE_32_FLAG(info);
#endif
/* FIXME: would be nice to use size calculated in emit_coarse_exit_prefix(),
* but we need to know size before we emit and would have to do a throwaway
* emit, or else set up a template to be patched w/ specific info field.
* Also we'd have to unprot .data as we don't access this until post-init.
*/
/* We don't need to require addr16: in fact it might be better to force
* not using it, so if we persist on P4 but run on Core we don't lose
* performance. We have enough space.
*/
#ifdef X86
return SIZE_MOV_XBX_TO_TLS(flags, false) + SIZE_MOV_PTR_IMM_TO_XAX(flags) +
5 * JMP_LONG_LENGTH;
#else
/* FIXME i#1575: implement coarse-grain support; move to arch-specific dir? */
ASSERT_NOT_IMPLEMENTED(false);
return 0;
#endif
}
byte *
emit_coarse_exit_prefix(dcontext_t *dcontext, byte *pc, coarse_info_t *info)
{
byte *ibl;
DEBUG_DECLARE(byte *start_pc = pc;)
instrlist_t ilist;
patch_list_t patch;
instr_t *fcache_ret_prefix;
#if defined(X86) && defined(X64)
gencode_mode_t mode = FRAGMENT_GENCODE_MODE(COARSE_32_FLAG(info));
#endif
instrlist_init(&ilist);
init_patch_list(&patch, PATCH_TYPE_INDIRECT_FS);
/* prefix looks like this, using xcx instead of xbx just to make
* the fcache_return code simpler (as it already uses xbx early),
* and using the info as we're doing per-cache and not per-unit:
*
* fcache_return_coarse_prefix:
* 6/9 mov %xcx, MANGLE_XCX_SPILL_SLOT
* 5/10 mov <info ptr>, %xcx
* 5 jmp fcache_return_coarse
* trace_head_return_coarse_prefix:
* 5 jmp trace_head_return_coarse
* (if -disable_traces, it jmps to fcache_return_coarse_prefix instead)
* coarse_ibl_ret_prefix:
* 5 jmp coarse_ibl_ret
* coarse_ibl_call_prefix:
* 5 jmp coarse_ibl_call
* coarse_ibl_jmp_prefix:
* 5 jmp coarse_ibl_jmp
*
* We assume that info ptr is at
* trace_head_return_prefix - JMP_LONG_LENGTH - 4
* in patch_coarse_exit_prefix().
* We assume that the ibl prefixes are nothing but jmps in
* coarse_indirect_stub_jmp_target() so we can recover the ibl type.
*
* FIXME case 9647: on P4 our jmp->jmp sequence will be
* elided, but on Core we may want to switch to a jmp*, though
* since we have no register for a base ptr we'd need a reloc
* entry for every single stub
*/
/* entrance stub has put target_tag into xax-slot so we use xcx-slot */
ASSERT(DIRECT_STUB_SPILL_SLOT != MANGLE_XCX_SPILL_SLOT);
fcache_ret_prefix = INSTR_CREATE_label(dcontext);
APP(&ilist, fcache_ret_prefix);
#if defined(X86) && defined(X64)
if (TEST(PERSCACHE_X86_32, info->flags)) {
/* XXX: this won't work b/c opnd size will be wrong */
ASSERT_NOT_IMPLEMENTED(false && "must pass opnd size to SAVE_TO_TLS");
APP(&ilist, SAVE_TO_TLS(dcontext, REG_ECX, MANGLE_XCX_SPILL_SLOT));
/* We assume all our data structures are <4GB which is guaranteed for
* WOW64 processes.
*/
ASSERT(CHECK_TRUNCATE_TYPE_int((ptr_int_t)info));
APP(&ilist,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_INT32((int)(ptr_int_t)info)));
} else { /* default code */
if (GENCODE_IS_X86_TO_X64(mode) && DYNAMO_OPTION(x86_to_x64_ibl_opt))
APP(&ilist, SAVE_TO_REG(dcontext, SCRATCH_REG2, REG_R9));
else
#endif
APP(&ilist,
SAVE_TO_TLS(dcontext, SCRATCH_REG2 /*xcx/r2*/, MANGLE_XCX_SPILL_SLOT));
APP(&ilist,
XINST_CREATE_load_int(dcontext, opnd_create_reg(SCRATCH_REG2 /*xcx/r2*/),
OPND_CREATE_INTPTR((ptr_int_t)info)));
#if defined(X86) && defined(X64)
}
#endif
APP(&ilist,
XINST_CREATE_jump(
dcontext,
opnd_create_pc(get_direct_exit_target(
dcontext, FRAG_SHARED | FRAG_COARSE_GRAIN | COARSE_32_FLAG(info)))));
APP(&ilist, INSTR_CREATE_label(dcontext));
add_patch_marker(&patch, instrlist_last(&ilist), PATCH_ASSEMBLE_ABSOLUTE,
0 /* start of instr */,
(ptr_uint_t *)&info->trace_head_return_prefix);
if (DYNAMO_OPTION(disable_traces) ||
/* i#670: the stub stored the abs addr at persist time. we need
* to adjust to the use-time mod base which we do in d_r_dispatch
* but we need to set the dcontext->coarse_exit so we go through
* the fcache return
*/
(info->frozen && info->mod_shift != 0)) {
/* trace_t heads need to store the info ptr for lazy linking */
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_instr(fcache_ret_prefix)));
} else {
APP(&ilist,
XINST_CREATE_jump(
dcontext,
opnd_create_pc(trace_head_return_coarse_routine(IF_X86_64(mode)))));
}
/* coarse does not support IBL_FAR so we don't bother with get_ibl_entry_type() */
ibl = get_ibl_routine_ex(
dcontext, IBL_LINKED,
get_source_fragment_type(dcontext, FRAG_SHARED | FRAG_COARSE_GRAIN),
IBL_RETURN _IF_X86_64(mode));
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(ibl)));
add_patch_marker(&patch, instrlist_last(&ilist), PATCH_ASSEMBLE_ABSOLUTE,
0 /* start of instr */, (ptr_uint_t *)&info->ibl_ret_prefix);
ibl = get_ibl_routine_ex(
dcontext, IBL_LINKED,
get_source_fragment_type(dcontext, FRAG_SHARED | FRAG_COARSE_GRAIN),
IBL_INDCALL _IF_X86_64(mode));
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(ibl)));
add_patch_marker(&patch, instrlist_last(&ilist), PATCH_ASSEMBLE_ABSOLUTE,
0 /* start of instr */, (ptr_uint_t *)&info->ibl_call_prefix);
ibl = get_ibl_routine_ex(
dcontext, IBL_LINKED,
get_source_fragment_type(dcontext, FRAG_SHARED | FRAG_COARSE_GRAIN),
IBL_INDJMP _IF_X86_64(mode));
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(ibl)));
add_patch_marker(&patch, instrlist_last(&ilist), PATCH_ASSEMBLE_ABSOLUTE,
0 /* start of instr */, (ptr_uint_t *)&info->ibl_jmp_prefix);
/* now encode the instructions */
pc += encode_with_patch_list(dcontext, &patch, &ilist, pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
ASSERT((size_t)(pc - start_pc) == coarse_exit_prefix_size(info));
DOLOG(3, LOG_EMIT, {
byte *dpc = start_pc;
LOG(GLOBAL, LOG_EMIT, 3, "\nprefixes for coarse unit %s:\n", info->module);
do {
if (dpc == info->fcache_return_prefix)
LOG(GLOBAL, LOG_EMIT, 3, "fcache_return_coarse_prefix:\n");
else if (dpc == info->trace_head_return_prefix)
LOG(GLOBAL, LOG_EMIT, 3, "trace_head_return_coarse_prefix:\n");
else if (dpc == info->ibl_ret_prefix)
LOG(GLOBAL, LOG_EMIT, 3, "ibl_coarse_ret_prefix:\n");
else if (dpc == info->ibl_call_prefix)
LOG(GLOBAL, LOG_EMIT, 3, "ibl_coarse_call_prefix:\n");
else if (dpc == info->ibl_jmp_prefix)
LOG(GLOBAL, LOG_EMIT, 3, "ibl_coarse_jmp_prefix:\n");
dpc = disassemble_with_bytes(dcontext, dpc, GLOBAL);
} while (dpc < pc);
LOG(GLOBAL, LOG_EMIT, 3, "\n");
});
return pc;
}
/* Update info pointer in exit prefixes */
void
patch_coarse_exit_prefix(dcontext_t *dcontext, coarse_info_t *info)
{
ptr_uint_t *pc =
(ptr_uint_t *)(info->trace_head_return_prefix - JMP_LONG_LENGTH - sizeof(info));
*pc = (ptr_uint_t)info;
}
#ifdef HASHTABLE_STATISTICS
/* note that arch_thread_init is called before fragment_thread_init,
* so these need to be updated
*/
/* When used in a thread-shared routine, this routine clobbers XDI. The
* caller should spill & restore it or rematerialize it as needed. */
/* NOTE - this routine does NOT save the eflags, which will be clobbered by the
* inc */
void
append_increment_counter(dcontext_t *dcontext, instrlist_t *ilist, ibl_code_t *ibl_code,
patch_list_t *patch,
reg_id_t entry_register, /* register indirect (XCX) or NULL */
/* adjusted to unprot_ht_statistics_t if no entry_register */
uint counter_offset, reg_id_t scratch_register)
{
# ifdef X86
instr_t *counter;
# endif
bool absolute = !ibl_code->thread_shared_routine;
/* no support for absolute addresses on x64: we always use tls/reg */
IF_X64(ASSERT_NOT_IMPLEMENTED(!absolute));
if (!INTERNAL_OPTION(hashtable_ibl_stats))
return;
LOG(THREAD, LOG_EMIT, 3,
"append_increment_counter: hashtable_stats_offset=0x%x counter_offset=0x%x\n",
ibl_code->hashtable_stats_offset, counter_offset);
if (entry_register == REG_NULL) {
/* adjust offset within a unprot_ht_statistics_t structure */
counter_offset += ibl_code->hashtable_stats_offset;
}
if (!absolute) {
opnd_t counter_opnd;
/* get dcontext in register (xdi) */
insert_shared_get_dcontext(dcontext, ilist, NULL, false /* dead register */);
/* XDI now has dcontext */
APP(ilist,
XINST_CREATE_load(
dcontext, opnd_create_reg(SCRATCH_REG5 /*xdi/r5*/),
OPND_DC_FIELD(absolute, dcontext, OPSZ_PTR, FRAGMENT_FIELD_OFFSET)));
/* XDI now has per_thread_t structure */
/* an extra step here: find the unprot_stats field in the fragment_table_t
* could avoid for protect_mask==0 if we always had a copy
* in the per_thread_t struct -- see fragment.h, not worth it
*/
if (entry_register != REG_NULL) {
APP(ilist,
XINST_CREATE_load(
dcontext, opnd_create_reg(SCRATCH_REG5 /*xdi/r5*/),
OPND_CREATE_MEMPTR(SCRATCH_REG5 /*xdi/r5*/,
ibl_code->entry_stats_to_lookup_table_offset)));
/* XDI should now have (entry_stats - lookup_table) value,
* so we need [xdi+xcx] to get an entry reference
*/
counter_opnd = opnd_create_base_disp(SCRATCH_REG5 /*xdi/r5*/, entry_register,
1, counter_offset, OPSZ_4);
} else {
APP(ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG5 /*xdi/r5*/),
OPND_CREATE_MEMPTR(SCRATCH_REG5 /*xdi/r5*/,
ibl_code->unprot_stats_offset)));
/* XDI now has unprot_stats structure */
counter_opnd = OPND_CREATE_MEM32(SCRATCH_REG5 /*xdi/r5*/, counter_offset);
}
# ifdef X86
counter = INSTR_CREATE_inc(dcontext, counter_opnd);
APP(ilist, counter);
# elif defined(ARM)
/* FIXMED i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif
} else {
# ifdef X86
/* TAKE_ADDRESS will in fact add the necessary base to the statistics structure,
hence no explicit indirection needed here */
opnd_t counter_opnd = OPND_CREATE_MEMPTR(entry_register, counter_offset);
counter = INSTR_CREATE_inc(dcontext, counter_opnd);
/* hack to get both this table's unprot offset and the specific stat's offs */
ASSERT(counter_offset < USHRT_MAX);
if (entry_register != REG_NULL) {
/* although we currently don't use counter_offset,
* it doesn't hurt to support as well
*/
ASSERT(ibl_code->entry_stats_to_lookup_table_offset < USHRT_MAX);
add_patch_entry(patch, counter, PATCH_UNPROT_STAT | PATCH_TAKE_ADDRESS,
(ibl_code->entry_stats_to_lookup_table_offset << 16) |
counter_offset);
} else {
ASSERT(ibl_code->unprot_stats_offset < USHRT_MAX);
add_patch_entry(patch, counter, PATCH_UNPROT_STAT | PATCH_TAKE_ADDRESS,
(ibl_code->unprot_stats_offset << 16) | counter_offset);
}
APP(ilist, counter);
# elif defined(ARM)
/* FIXMED i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif
}
}
#endif /* HASHTABLE_STATISTICS */
#ifdef INTERNAL
/* add a slowdown loop to measure if a routine is likely to be on a critical path */
/* note that FLAGS are clobbered */
static void
append_empty_loop(dcontext_t *dcontext, instrlist_t *ilist, uint iterations,
reg_id_t scratch_register)
{
# ifdef X86
instr_t *initloop;
instr_t *loop;
/* mov ebx, iterations */
/* loop: dec ebx */
/* jnz loop */
ASSERT(REG_NULL != scratch_register);
initloop = XINST_CREATE_load_int(dcontext, opnd_create_reg(scratch_register),
OPND_CREATE_INT32(iterations));
loop = INSTR_CREATE_dec(dcontext, opnd_create_reg(scratch_register));
APP(ilist, initloop);
APP(ilist, loop);
APP(ilist, INSTR_CREATE_jcc(dcontext, OP_jnz_short, opnd_create_instr(loop)));
# elif defined(ARM)
/* FIXMED i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
# endif
}
#endif /* INTERNAL */
#if defined(X86) && defined(X64)
void
instrlist_convert_to_x86(instrlist_t *ilist)
{
instr_t *in;
for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) {
instr_set_x86_mode(in, true /*x86*/);
instr_shrink_to_32_bits(in);
}
}
#endif
#ifndef AARCH64
bool
instr_is_ibl_hit_jump(instr_t *instr)
{
/* ARM and x86 use XINST_CREATE_jump_mem() */
return instr_is_jump_mem(instr);
}
#endif
/* what we do on a hit in the hashtable */
/* Restore XBX saved from the indirect exit stub insert_jmp_to_ibl() */
/* Indirect jump through hashtable entry pointed to by XCX */
void
append_ibl_found(dcontext_t *dcontext, instrlist_t *ilist, ibl_code_t *ibl_code,
patch_list_t *patch, uint start_pc_offset, bool collision,
bool only_spill_state_in_tls, /* if true, no table info in TLS;
* indirection off of XDI is used */
bool restore_eflags, instr_t **fragment_found)
{
bool absolute = !ibl_code->thread_shared_routine;
bool target_prefix = true;
/* eflags and xcx are restored in the target's prefix */
/* if thread private routine */
/*>>> RESTORE_FROM_UPCONTEXT xbx_OFFSET,%xbx */
/*>>> jmp *FRAGMENT_START_PC_OFFS(%xcx) */
instr_t *inst = NULL;
IF_X86_64(bool x86_to_x64_ibl_opt =
(ibl_code->x86_to_x64_mode && DYNAMO_OPTION(x86_to_x64_ibl_opt));)
/* no support for absolute addresses on x64: we always use tls/reg */
IF_X64(ASSERT_NOT_IMPLEMENTED(!absolute));
if (absolute) {
inst = RESTORE_FROM_DC(dcontext, SCRATCH_REG1, SCRATCH_REG1_OFFS);
}
if (!ibl_use_target_prefix(ibl_code)) {
target_prefix = false;
restore_eflags = true;
}
#ifdef HASHTABLE_STATISTICS
if (INTERNAL_OPTION(hashtable_ibl_stats) ||
INTERNAL_OPTION(hashtable_ibl_entry_stats)) {
if (!absolute && !only_spill_state_in_tls) {
/* XDI holds app state, not a ptr to dcontext+<some offset> */
APP(ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG5, HTABLE_STATS_SPILL_SLOT));
}
append_increment_counter(dcontext, ilist, ibl_code, patch, REG_NULL,
HASHLOOKUP_STAT_OFFS(hit), SCRATCH_REG1);
if (collision) {
append_increment_counter(dcontext, ilist, ibl_code, patch, REG_NULL,
HASHLOOKUP_STAT_OFFS(collision_hit), SCRATCH_REG1);
}
if (INTERNAL_OPTION(hashtable_ibl_entry_stats)) {
/* &lookup_table[i] - should allow access to &entry_stats[i] */
append_increment_counter(dcontext, ilist, ibl_code, patch, SCRATCH_REG2,
offsetof(fragment_stat_entry_t, hits), SCRATCH_REG1);
}
if (!absolute && !only_spill_state_in_tls)
APP(ilist, RESTORE_FROM_TLS(dcontext, SCRATCH_REG5, HTABLE_STATS_SPILL_SLOT));
}
#endif /* HASHTABLE_STATISTICS */
#ifdef INTERNAL
if (INTERNAL_OPTION(slowdown_ibl_found)) {
/* add a loop here */
append_empty_loop(dcontext, ilist, INTERNAL_OPTION(slowdown_ibl_found),
SCRATCH_REG1 /* dead */);
}
#endif /* INTERNAL */
if (restore_eflags) {
insert_restore_eflags(dcontext, ilist, NULL, 0, IBL_EFLAGS_IN_TLS(),
absolute _IF_X86_64(x86_to_x64_ibl_opt));
}
if (!target_prefix) {
/* We're going to clobber the xax slot */
ASSERT(restore_eflags);
/* For target_delete support with no prefix, since we're
* clobbering all the registers here, we must save something;
* We save the tag, rather than the table entry, to avoid an
* extra load to get the tag in target_delete:
* <save %xbx to xax slot> # put tag in xax slot for target_delete
*/
if (absolute) {
APP(ilist, SAVE_TO_DC(dcontext, SCRATCH_REG1, SCRATCH_REG0_OFFS));
} else {
APP(ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG1, DIRECT_STUB_SPILL_SLOT));
}
}
#if defined(X86) && defined(X64)
if (x86_to_x64_ibl_opt) {
APP(ilist, RESTORE_FROM_REG(dcontext, SCRATCH_REG1, REG_R10));
} else
#endif
if (absolute) {
/* restore XBX through dcontext */
APP(ilist, inst);
} else {
/* restore XBX through INDIRECT_STUB_SPILL_SLOT */
APP(ilist, RESTORE_FROM_TLS(dcontext, SCRATCH_REG1, INDIRECT_STUB_SPILL_SLOT));
DOCHECK(1, {
if (!SHARED_IB_TARGETS())
ASSERT(only_spill_state_in_tls);
});
}
if (only_spill_state_in_tls) {
/* If TLS doesn't hold table info, XDI was used for indirection.
* Restore XDI through DCONTEXT_BASE_SPILL_SLOT */
insert_shared_restore_dcontext_reg(dcontext, ilist, NULL);
}
if (target_prefix) {
/* FIXME: do we want this? seems to be a problem, I'm disabling:
* ASSERT(!collision || start_pc_offset == FRAGMENT_START_PC_OFFS)
*/
#ifdef AARCH64
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
#else
APP(ilist,
XINST_CREATE_jump_mem(dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG2, start_pc_offset)));
#endif
} else {
/* There is no prefix so we must restore all and jmp through memory:
* mov start_pc_offset(%xcx), %xcx
* <save %xcx to xbx slot> # put target in xbx slot for later jmp
* <restore %xcx from xcx slot>
* jmp* <xbx slot>
*/
APP(ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2),
OPND_CREATE_MEMPTR(SCRATCH_REG2, start_pc_offset)));
if (absolute) {
#ifdef X86
APP(ilist, SAVE_TO_DC(dcontext, SCRATCH_REG2, SCRATCH_REG2_OFFS));
if (IF_X64_ELSE(x86_to_x64_ibl_opt, false))
APP(ilist, RESTORE_FROM_REG(dcontext, SCRATCH_REG2, REG_R9));
else if (XCX_IN_TLS(0 /*!FRAG_SHARED*/)) {
APP(ilist,
RESTORE_FROM_TLS(dcontext, SCRATCH_REG2, MANGLE_XCX_SPILL_SLOT));
} else
APP(ilist, RESTORE_FROM_DC(dcontext, SCRATCH_REG2, SCRATCH_REG2_OFFS));
APP(ilist,
XINST_CREATE_jump_mem(
dcontext,
OPND_DC_FIELD(absolute, dcontext, OPSZ_PTR, SCRATCH_REG2_OFFS)));
#elif defined(AARCH64)
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569: NYI on AArch64 */
#elif defined(ARM)
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1551: NYI on ARM */
#endif
} else {
APP(ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG2, INDIRECT_STUB_SPILL_SLOT));
#if defined(X86) && defined(X64)
if (x86_to_x64_ibl_opt)
APP(ilist, RESTORE_FROM_REG(dcontext, SCRATCH_REG2, REG_R9));
else
#endif
APP(ilist,
RESTORE_FROM_TLS(dcontext, SCRATCH_REG2, MANGLE_XCX_SPILL_SLOT));
#ifdef AARCH64
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
#else
APP(ilist,
XINST_CREATE_jump_mem(dcontext,
OPND_TLS_FIELD(INDIRECT_STUB_SPILL_SLOT)));
#endif
}
}
if (fragment_found != NULL)
*fragment_found = inst;
}
static inline void
update_ibl_routine(dcontext_t *dcontext, ibl_code_t *ibl_code)
{
if (!ibl_code->initialized)
return;
patch_emitted_code(dcontext, &ibl_code->ibl_patch,
ibl_code->indirect_branch_lookup_routine);
DOLOG(2, LOG_EMIT, {
const char *ibl_name;
const char *ibl_brtype;
ibl_name = get_ibl_routine_name(
dcontext, ibl_code->indirect_branch_lookup_routine, &ibl_brtype);
LOG(THREAD, LOG_EMIT, 2, "Just updated indirect branch lookup\n%s_%s:\n",
ibl_name, ibl_brtype);
disassemble_with_annotations(
dcontext, &ibl_code->ibl_patch, ibl_code->indirect_branch_lookup_routine,
ibl_code->indirect_branch_lookup_routine + ibl_code->ibl_routine_length);
});
if (ibl_code->ibl_head_is_inlined) {
patch_emitted_code(dcontext, &ibl_code->ibl_stub_patch,
ibl_code->inline_ibl_stub_template);
DOLOG(2, LOG_EMIT, {
const char *ibl_name;
const char *ibl_brtype;
ibl_name = get_ibl_routine_name(
dcontext, ibl_code->indirect_branch_lookup_routine, &ibl_brtype);
LOG(THREAD, LOG_EMIT, 2,
"Just updated inlined stub indirect branch lookup\n%s_template_%s:\n",
ibl_name, ibl_brtype);
disassemble_with_annotations(
dcontext, &ibl_code->ibl_stub_patch, ibl_code->inline_ibl_stub_template,
ibl_code->inline_ibl_stub_template + ibl_code->inline_stub_length);
});
}
}
void
update_indirect_branch_lookup(dcontext_t *dcontext)
{
generated_code_t *code = THREAD_GENCODE(dcontext);
ibl_branch_type_t branch_type;
IF_ARM(dr_isa_mode_t old_mode;)
#ifdef X64
ASSERT(is_shared_gencode(code));
return; /* nothing to do: routines are all thread-shared */
#endif
#ifdef ARM
/* We need to switch to the mode of our gencode */
dr_set_isa_mode(dcontext, DEFAULT_ISA_MODE, &old_mode);
#endif
protect_generated_code(code, WRITABLE);
for (branch_type = IBL_BRANCH_TYPE_START; branch_type < IBL_BRANCH_TYPE_END;
branch_type++) {
update_ibl_routine(dcontext, &code->bb_ibl[branch_type]);
if (PRIVATE_TRACES_ENABLED() && !DYNAMO_OPTION(shared_trace_ibl_routine))
update_ibl_routine(dcontext, &code->trace_ibl[branch_type]);
}
#ifdef WINDOWS
/* update mask and table in inlined ibl at end of syscall routine */
if (DYNAMO_OPTION(shared_syscalls)) {
patch_emitted_code(dcontext, &code->shared_syscall_code.ibl_patch,
code->unlinked_shared_syscall);
DOLOG(2, LOG_EMIT, {
LOG(THREAD, LOG_EMIT, 2, "Just updated shared syscall routine:\n");
disassemble_with_annotations(dcontext, &code->shared_syscall_code.ibl_patch,
code->unlinked_shared_syscall,
code->end_shared_syscall);
});
}
#endif
protect_generated_code(code, READONLY);
#ifdef ARM
dr_set_isa_mode(dcontext, old_mode, NULL);
#endif
}
/* i#823: handle far cti transitions. For now only handling known cs values
* for WOW64 when using x64 DR, but we still use this far ibl so that in
* the future we can add general cs change handling outside of the
* fragment (which is much simpler: see below).
*
* One approach is to have the mode change happen in the fragment itself via
* ind branch mangling. But then we have the check for known cs there and
* thus multiple exits some of which are 32-bit and some of which are 64-bit
* which is messy. Instead, we spill another reg, put the selector in it,
* and jump to this ibl prefix routine. One drawback is that by not doing
* the mode transition in the fragment we give up on traces extending through
* it and we must make a far cti a trace barrier.
*
* fragment:
* spill xbx
* movzx selector -> xbx
* spill xcx
* mov target -> xcx
* jmp far_ibl
*
* far_ibl:
* clear top 32 bits of xcx slot
* xchg xcx, xbx
* lea xcx -32_bit_cs -> xcx
* jecxz to_32
* 64: (punting on handling cs o/w)
* xchg xcx, xbx
* restore xbx
* jmp 64-bit ibl
* to-32:
* dcontext -> ecx
* mov $1 -> x86_mode_offs(ecx)
* xchg xcx, xbx
* restore xbx
* far ind jmp through const mem that targets 32-bit ibl
*
* This is much simpler for state xl8: shouldn't need any added support.
* For unlinking: have two versions of the gencode, so the unlink
* is the standard fragment exit cti change only.
*
* For non-mixed-mode, we just jmp straight to ibl. It's simpler to
* generate and always go through this far_ibl though rather than
* having interp up front figure out whether a mode change for direct
* and then have far direct sometimes be direct and sometimes use
* indirect faar-Ibl.
*
* For -x86_to_x64, we assume no 32-bit un-translated code entering here.
*
* FIXME i#865: for mixed-mode (including -x86_to_x64), far ibl must
* preserve the app's r8-r15 during 32-bit execution.
*/
byte *
emit_far_ibl(dcontext_t *dcontext, byte *pc, ibl_code_t *ibl_code,
cache_pc ibl_same_mode_tgt _IF_X86_64(far_ref_t *far_jmp_opnd))
{
instrlist_t ilist;
instrlist_init(&ilist);
#if defined(X86) && defined(X64)
if (mixed_mode_enabled()) {
instr_t *change_mode = INSTR_CREATE_label(dcontext);
bool source_is_x86 =
DYNAMO_OPTION(x86_to_x64) ? ibl_code->x86_to_x64_mode : ibl_code->x86_mode;
short selector = source_is_x86 ? CS64_SELECTOR : CS32_SELECTOR;
/* all scratch space should be in TLS only */
ASSERT(ibl_code->thread_shared_routine || DYNAMO_OPTION(private_ib_in_tls));
if (ibl_code->x86_mode) {
/* we're going to look up rcx in ibl table but we only saved the
* bottom half so zero top half now
*/
APP(&ilist,
INSTR_CREATE_mov_imm(
dcontext,
opnd_create_tls_slot(os_tls_offset(MANGLE_XCX_SPILL_SLOT) + 4),
OPND_CREATE_INT32(0)));
}
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(SCRATCH_REG1),
opnd_create_reg(SCRATCH_REG2)));
/* segment is just 2 bytes but need addr prefix if don't have rex prefix */
APP(&ilist,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(SCRATCH_REG2),
opnd_create_base_disp(SCRATCH_REG2, REG_NULL, 0, -selector, OPSZ_lea)));
APP(&ilist, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(change_mode)));
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(SCRATCH_REG1),
opnd_create_reg(SCRATCH_REG2)));
if (ibl_code->x86_to_x64_mode && DYNAMO_OPTION(x86_to_x64_ibl_opt)) {
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG1),
opnd_create_reg(REG_R10)));
} else {
APP(&ilist, RESTORE_FROM_TLS(dcontext, SCRATCH_REG1, MANGLE_FAR_SPILL_SLOT));
}
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(ibl_same_mode_tgt)));
APP(&ilist, change_mode);
APP(&ilist,
instr_create_restore_from_tls(dcontext, SCRATCH_REG2, TLS_DCONTEXT_SLOT));
/* FIXME: for SELFPROT_DCONTEXT we'll need to exit to d_r_dispatch every time
* and add logic there to set x86_mode based on LINK_FAR.
* We do not want x86_mode sitting in unprotected_context_t.
*/
ASSERT_NOT_IMPLEMENTED(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)));
APP(&ilist,
XINST_CREATE_store(
dcontext,
OPND_CREATE_MEM8(SCRATCH_REG2, (int)offsetof(dcontext_t, isa_mode)),
OPND_CREATE_INT8(source_is_x86 ? DR_ISA_AMD64 : DR_ISA_IA32)));
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(SCRATCH_REG1),
opnd_create_reg(SCRATCH_REG2)));
if (ibl_code->x86_to_x64_mode && DYNAMO_OPTION(x86_to_x64_ibl_opt)) {
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG1),
opnd_create_reg(REG_R10)));
} else {
APP(&ilist, RESTORE_FROM_TLS(dcontext, SCRATCH_REG1, MANGLE_FAR_SPILL_SLOT));
}
if (ibl_code->x86_mode) {
/* FIXME i#865: restore 64-bit regs here */
} else if (ibl_code->x86_to_x64_mode && DYNAMO_OPTION(x86_to_x64_ibl_opt)) {
/* In the current mode, XCX is spilled into R9.
* After mode switch, will use MANGLE_XCX_SPILL_SLOT for spilling XCX.
*/
APP(&ilist, SAVE_TO_TLS(dcontext, REG_R9, MANGLE_XCX_SPILL_SLOT));
/* FIXME i#865: restore 64-bit regs here */
} else {
/* FIXME i#865: save 64-bit regs here */
/* In the current mode, XCX is spilled into MANGLE_XCX_SPILL_SLOT.
* After mode switch, will use R9 for spilling XCX.
*/
APP(&ilist, RESTORE_FROM_TLS(dcontext, REG_R9, MANGLE_XCX_SPILL_SLOT));
}
/* For now we assume we're WOW64 and thus in low 4GB. For general mixed-mode
* and reachability (xref i#774) we will need a trampoline in low 4GB.
* Note that targeting the tail of the not-taken jecxz above doesn't help
* b/c then that needs to be 32-bit reachable.
*/
ASSERT(CHECK_TRUNCATE_TYPE_uint((ptr_uint_t)far_jmp_opnd));
APP(&ilist,
INSTR_CREATE_jmp_far_ind(dcontext,
opnd_create_base_disp(REG_NULL, REG_NULL, 0,
(uint)(ptr_uint_t)far_jmp_opnd,
OPSZ_6)));
/* For -x86_to_x64, we can disallow 32-bit fragments from having
* indirect branches or far branches or system calls, and thus ibl
* is always 64-bit.
* Even if we allow 32-bit indirection, here we have to pick one
* lookup method, and we'd go w/ the most common, which would assume
* a 32-bit target has been translated: so even for a same-mode far
* cti in a 32-bit (untranslated) fragment, we'd want to do a mode
* change here.
*/
/* caller will set target: we just set selector */
far_jmp_opnd->selector =
DYNAMO_OPTION(x86_to_x64) ? CS64_SELECTOR : (ushort)selector;
if (ibl_code->x86_mode) {
instrlist_convert_to_x86(&ilist);
}
} else {
#endif
/* We didn't spill or store into xbx when mangling so just jmp to ibl.
* Note that originally I had the existence of far_ibl, and LINK_FAR,
* as X64 only, and only emitted far_ibl for mixed-mode. But given that
* it's simpler to have far direct as indirect all the time, I decided
* to also go through a far ibl all the time. Eventually to fully
* handle any cs change we'll want it this way.
*
* XXX i#823: store cs into xbx when mangling, and then do cs
* change here.
*/
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(ibl_same_mode_tgt)));
#if defined(X86) && defined(X64)
}
#endif
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, true /*instr targets*/);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
#ifdef X86
static instr_t *
create_int_syscall_instr(dcontext_t *dcontext)
{
# ifdef WINDOWS
/* On windows should already be initialized by syscalls_init() */
ASSERT(get_syscall_method() != SYSCALL_METHOD_UNINITIALIZED);
/* int $0x2e */
if (DYNAMO_OPTION(sygate_int)) {
/* ref case 5217, we call to an existing int in NtYieldExecution
* to avoid tripping up Sygate. */
return INSTR_CREATE_call(dcontext, opnd_create_pc(int_syscall_address));
} else {
return INSTR_CREATE_int(dcontext, opnd_create_immed_int((char)0x2e, OPSZ_1));
}
# else
/* if uninitialized just guess int, we'll patch up later */
return INSTR_CREATE_int(dcontext, opnd_create_immed_int((char)0x80, OPSZ_1));
# endif
}
#endif
instr_t *
create_syscall_instr(dcontext_t *dcontext)
{
int method = get_syscall_method();
#ifdef AARCHXX
if (method == SYSCALL_METHOD_SVC || method == SYSCALL_METHOD_UNINITIALIZED) {
return INSTR_CREATE_svc(dcontext, opnd_create_immed_int((char)0x0, OPSZ_1));
}
#elif defined(X86)
if (method == SYSCALL_METHOD_INT || method == SYSCALL_METHOD_UNINITIALIZED) {
return create_int_syscall_instr(dcontext);
} else if (method == SYSCALL_METHOD_SYSENTER) {
return INSTR_CREATE_sysenter(dcontext);
} else if (method == SYSCALL_METHOD_SYSCALL) {
return INSTR_CREATE_syscall(dcontext);
}
# ifdef WINDOWS
else if (method == SYSCALL_METHOD_WOW64) {
if (get_os_version() < WINDOWS_VERSION_10) {
/* call *fs:0xc0 */
return INSTR_CREATE_call_ind(
dcontext,
opnd_create_far_base_disp(SEG_FS, REG_NULL, REG_NULL, 0, WOW64_TIB_OFFSET,
OPSZ_4_short2));
} else {
/* For Win10 we treat the call* to ntdll!Wow64SystemServiceCall
* (stored in wow64_syscall_call_tgt) as the syscall.
*/
return INSTR_CREATE_call(dcontext, opnd_create_pc(wow64_syscall_call_tgt));
}
}
# endif
#endif /* ARM/X86 */
else {
ASSERT_NOT_REACHED();
return NULL;
}
}
#ifdef WINDOWS
/* Insert instructions after the syscall instruction (e.g., sysenter) to
* restore the next tag target from dcontext XSI slot to %xcx register
* for continue execution.
* See the comment below for emit_shared_syscall about shared syscall
* handling.
*/
static void
insert_restore_target_from_dc(dcontext_t *dcontext, instrlist_t *ilist, bool all_shared)
{
ASSERT(IF_X64_ELSE(all_shared, true)); /* PR 244737 */
if (all_shared) {
APP(ilist,
instr_create_restore_from_dc_via_reg(dcontext, REG_NULL /*default*/,
SCRATCH_REG2, SCRATCH_REG4_OFFS));
} else {
APP(ilist,
instr_create_restore_from_dcontext(dcontext, SCRATCH_REG2,
SCRATCH_REG4_OFFS));
}
/* i#537: we push KiFastSystemCallRet on to the stack and adjust the
* next code to be executed at KiFastSystemCallRet.
*/
if (get_syscall_method() == SYSCALL_METHOD_SYSENTER &&
KiFastSystemCallRet_address != NULL) {
/* push adjusted ecx onto stack */
APP(ilist, INSTR_CREATE_push(dcontext, opnd_create_reg(SCRATCH_REG2)));
APP(ilist,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(SCRATCH_REG2),
OPND_CREATE_INT32(KiFastSystemCallRet_address)));
}
}
/* All system call instructions turn into a jump to an exit stub that
* jumps here, with the xsi slot in dcontext (or the mangle-next-tag tls slot
* for -shared_fragment_shared_syscalls) containing the return address
* after the original system call instr, and xbx containing the linkstub ptr.
*
* Unlinked version of shared_syscall is needed, even though syscalls are
* not part of traces (we unlink for other reasons, like flushing or
* in-trace replacement).
* To make unlinked entry point, have to make completely separate routine
* that calls unlinked_ibl instead of indirect_branch_lookup, or else
* common linked case needs an extra conditional. I chose the latter
* approach. I figure an extra load and jecxz won't be noticeable.
* Another reason is that this approach means there is a single system
* call instruction to check for suspended threads at, instead of two.
* To make the jecxz match forward-not-taken I actually add another store
* on the linked path.
* FIXME: is this a perf hit that makes it worth the code complexity
* of two syscall routines?
* FIXME: The 'target_trace_table' indicates whether the trace or BB IBT
* table should be targetted. If BB2BB IBL is used (when trace building is
* not disabled), then both traces and BBs use the same shared syscall.
* (We emit only one.) So we can't target the BB table since that would
* result in missed opportunities to mark secondary trace heads (trace->BB
* IB transitions after shared syscall). So for BB2BB IBL this could be
* a perf hit, but not a regression compared to not using BB2BB IBL. More
* comments below in the routine.
*
_unlinked_shared_syscall:
SAVE_TO_UPCONTEXT $0,xax_OFFSET # flag: use unlinked ibl; xcx tls if all_shared
jmp skip_linked
_shared_syscall:
SAVE_TO_UPCONTEXT $1,xax_OFFSET # flag: use regular ibl; xcx tls if all_shared
skip_linked:
.ifdef SIDELINE
# clear cur-trace field so we don't think cur trace is still running
mov $0, _sideline_trace
.endif
.if all_shared
SAVE_TO_TLS xdi, xdi_offset
RESTORE_FROM_TLS xdi, dcontext_offset
.endif
.if !all_shared && DYNAMO_OPTION(shared_fragment_shared_syscalls)
.if !sysenter_syscall_method
LOAD_FROM_TLS MANGLE_NEXT_TAG_SLOT,%xdi
SAVE_TO_UPCONTEXT %xdi,xsi_OFFSET
.endif
RESTORE_FROM_TLS xdi_OFFSET
.endif
# make registers have app values for interrupt
.if !INTERNAL_OPTION(shared_syscalls_fastpath)
SAVE_TO_UPCONTEXT %xbx,xdi_OFFSET # save linkstub ptr
.if all_shared
# get next_tag (from xcx tls slot) into upcontext, for callback dcontext swap
RESTORE_FROM_TLS xbx, mangle_next_tag_slot
SAVE_TO_UPCONTEXT xbx, xsi_OFFSET
.endif
# %xbx is stored in TLS if shared fragments can target shared syscall
.if DYNAMO_OPTION(shared_fragment_shared_syscalls)
LOAD_FROM_TLS INDIRECT_STUB_SPILL_SLOT,%xbx # restore app's xbx
.else
RESTORE_FROM_UPCONTEXT xbx_OFFSET,%xbx # restore app's xbx
.endif
.endif
.if sysenter_syscall_method
pop xsi_OFFSET
push <after-syscall-address>
.endif
# even if !DYNAMO_OPTION(syscalls_synch_flush) must set for reset
movl 1, at_syscall_OFFSET # indicate to flusher we're in a syscall
.if all_shared
SAVE_TO_UPCONTEXT xdi, xdi_offset
RESTORE_FROM_TLS xdi, xdi_offset
.endif
# system call itself
int $0x2e
# kernel may decide to run a callback here...but when we come
# back we can't tell the difference
.if all_shared
RESTORE_FROM_TLS xdi, dcontext_offset
.endif
# even if !DYNAMO_OPTION(syscalls_synch_flush) must clear for cbret
movl 0, at_syscall_OFFSET # indicate to flusher/d_r_dispatch we're done w/ syscall
# assume interrupt could have changed register values
.if !inline_ibl_head # else, saved inside inlined ibl
# for shared_fragment_shared_syscalls = true, absolute != true
.if !DYNAMO_OPTION(shared_fragment_shared_syscalls)
SAVE_TO_UPCONTEXT %xbx,xbx_OFFSET
.endif
.if !absolute
SAVE_TO_TLS %xbx,INDIRECT_STUB_SPILL_SLOT
.endif
.if !INTERNAL_OPTION(shared_syscalls_fastpath)
RESTORE_FROM_UPCONTEXT xdi_OFFSET,%xbx # bring back linkstub ptr
.endif
.endif
# now set up for indirect_branch_lookup
.if !DYNAMO_OPTION(shared_fragment_shared_syscalls)
SAVE_TO_UPCONTEXT %xcx,xcx_OFFSET
.endif
.if !absolute && !all_shared
SAVE_TO_TLS %xcx,MANGLE_XCX_SPILL_SLOT
.endif
.if all_shared
xchg xcx-tls, xcx # get link/unlink flag, and save app xcx, at once
.if x64
mov ecx,ecx # clear top 32 bits of flag
.endif
.else
RESTORE_FROM_UPCONTEXT xax_OFFSET,%xcx # get link/unlink flag
.endif
# patch point: jecxz -> jmp for shared_syscall unlink
jecxz unlink
.if INTERNAL_OPTION(shared_syscalls_fastpath)
mov shared-syscalls-bb-linkstub, %xbx # set linkstub ptr
.if inline_ibl_head
SAVE_TO_UPCONTEXT %xbx,xdi_OFFSET # save linkstub ptr
.endif
.endif
# linked code
RESTORE_FROM_UPCONTEXT xsi_OFFSET,%xcx # bring back return address
.if !inline_ibl_head
jmp _indirect_branch_lookup
.else
# inline ibl lookup head here! (don't need unlink/miss, already did
# that work, miss goes straight to ibl routine)
.endif
unlink:
# unlinked code
RESTORE_FROM_UPCONTEXT xsi_OFFSET,%xcx # bring back return address
.if !inline_ibl_head
mov @shared_syscall_unlinked_linkstub,%xbx
.else
.if absolute
SAVE_TO_UPCONTEXT @shared_syscall_unlinked_linkstub,xdi_OFFSET
.else
SAVE_TO_TLS @shared_syscall_unlinked_linkstub,INDIRECT_STUB_SPILL_SLOT
.endif
.if !DYNAMO_OPTION(atomic_inlined_linking)
SAVE_TO_UPCONTEXT %xcx,xbx_offset
movb $0x1, %cl
.else
SAVE_TO_UPCONTEXT %xbx,xbx_OFFSET # could have changed in kernel
.endif
.endif
jmp _unlinked_ib_lookup
*/
byte *
emit_shared_syscall(dcontext_t *dcontext, generated_code_t *code, byte *pc,
ibl_code_t *ibl_code, patch_list_t *patch, byte *ind_br_lookup_pc,
byte *unlinked_ib_lookup_pc, bool target_trace_table,
bool inline_ibl_head, bool thread_shared, byte **shared_syscall_pc)
{
instrlist_t ilist;
byte *start_pc = pc;
instr_t *syscall; /* remember after-syscall pc b/c often suspended there */
/* relative labels */
instr_t *linked, *jecxz, *unlink, *skip_syscall = NULL;
bool absolute = !thread_shared;
uint after_syscall_ptr = 0;
uint syscall_method = get_syscall_method();
instr_t *adjust_tos;
/* thread_shared indicates whether ibl is thread-shared: this bool indicates
* whether this routine itself is all thread-shared */
bool all_shared = IF_X64_ELSE(true, false); /* PR 244737 */
IF_X64(bool x86_to_x64_ibl_opt =
ibl_code->x86_to_x64_mode && DYNAMO_OPTION(x86_to_x64_ibl_opt);)
/* no support for absolute addresses on x64: we always use tls */
IF_X64(ASSERT_NOT_IMPLEMENTED(!absolute));
/* x64 always shares shared_syscall fragments */
IF_X64(ASSERT_NOT_IMPLEMENTED(DYNAMO_OPTION(shared_fragment_shared_syscalls)));
/* PR 248207: haven't updated the inlining to be x64-compliant yet */
IF_X64(ASSERT_NOT_IMPLEMENTED(!inline_ibl_head));
/* i#821/PR 284029: for now we assume there are no syscalls in x86 code.
* To support them we need to update this routine, emit_do_syscall*,
* and emit_detach_callback_code().
*/
IF_X86_64(ASSERT_NOT_IMPLEMENTED(!ibl_code->x86_mode));
/* ibl_code was not initialized by caller */
ibl_code->thread_shared_routine = thread_shared;
ibl_code->branch_type = IBL_SHARED_SYSCALL;
/* initialize the ilist */
instrlist_init(&ilist);
init_patch_list(patch, absolute ? PATCH_TYPE_ABSOLUTE : PATCH_TYPE_INDIRECT_XDI);
/* We should generate some thread-shared code when
* shared_fragment_shared_syscalls=true. */
DOCHECK(1, {
if (DYNAMO_OPTION(shared_fragment_shared_syscalls))
ASSERT(!absolute);
});
LOG(THREAD, LOG_EMIT, 3,
"emit_shared_syscall: pc=" PFX " patch=" PFX
" inline_ibl_head=%d thread shared=%d\n",
pc, patch, inline_ibl_head, thread_shared);
/* FIXME: could save space by storing a single byte, and using movzx into ecx
* below before the jecxz
*/
if (all_shared) {
/* xax and xbx tls slots are taken so we use xcx */
# ifdef X64
if (x86_to_x64_ibl_opt) {
linked = INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_R9D),
OPND_CREATE_INT32(1));
} else {
# endif
linked = XINST_CREATE_store(dcontext,
OPND_TLS_FIELD_SZ(MANGLE_XCX_SPILL_SLOT, OPSZ_4),
OPND_CREATE_INT32(1));
# ifdef X64
}
# endif
} else
linked = instr_create_save_immed32_to_dcontext(dcontext, 1, SCRATCH_REG0_OFFS);
APP(&ilist, linked);
add_patch_marker(patch, instrlist_first(&ilist), PATCH_ASSEMBLE_ABSOLUTE,
0 /* beginning of instruction */, (ptr_uint_t *)shared_syscall_pc);
# ifdef SIDELINE
if (dynamo_options.sideline) {
/* clear cur-trace field so we don't think cur trace is still running */
APP(&ilist,
XINST_CREATE_store(dcontext,
OPND_CREATE_ABSMEM((void *)&sideline_trace, OPSZ_4),
OPND_CREATE_INT32(0)));
}
# endif
if (all_shared) {
/* load %xdi w/ dcontext */
insert_shared_get_dcontext(dcontext, &ilist, NULL, true /*save xdi*/);
}
/* for all-shared we move next tag from tls down below once xbx is dead */
if (!all_shared && DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
if (syscall_method != SYSCALL_METHOD_SYSENTER) {
/* Move the next tag field from TLS into the proper slot. */
APP(&ilist,
XINST_CREATE_load(
dcontext, opnd_create_reg(SCRATCH_REG5),
opnd_create_tls_slot(os_tls_offset(MANGLE_NEXT_TAG_SLOT))));
APP(&ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG5, SCRATCH_REG4_OFFS));
}
/* restore app %xdi */
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
}
/* put linkstub ptr in slot such that when inlined it will be
* in the right place in case of a miss */
if (!INTERNAL_OPTION(shared_syscalls_fastpath) && DYNAMO_OPTION(indirect_stubs)) {
/* even if inline_ibl_head and !absolute, we must put into mcontext
* here since tls is not saved on callback stack
*/
if (all_shared) {
APP(&ilist,
instr_create_save_to_dc_via_reg(dcontext, REG_NULL /*default*/,
SCRATCH_REG1, SCRATCH_REG5_OFFS));
} else {
APP(&ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG1, SCRATCH_REG5_OFFS));
}
} else {
/* FIXME: for -no_indirect_stubs, we need our own complete ibl
* here in order to use our own linkstub_t. For now we just use
* a trace jmp* linkstub_t from the ibl we target, making every
* post-non-ignorable-syscall fragment a trace head.
*/
}
if (all_shared) {
/* move next_tag from tls into dcontext, for callback dcontext swap,
* using dead xbx */
if (!DYNAMO_OPTION(indirect_stubs)) {
/* xbx isn't dead */
APP(&ilist,
instr_create_save_to_tls(dcontext, SCRATCH_REG1,
INDIRECT_STUB_SPILL_SLOT));
}
APP(&ilist,
instr_create_restore_from_tls(dcontext, SCRATCH_REG1, MANGLE_NEXT_TAG_SLOT));
APP(&ilist,
instr_create_save_to_dc_via_reg(dcontext, REG_NULL /*default*/, SCRATCH_REG1,
SCRATCH_REG4_OFFS));
if (!DYNAMO_OPTION(indirect_stubs)) {
/* restore xbx */
APP(&ilist,
instr_create_restore_from_tls(dcontext, SCRATCH_REG1,
INDIRECT_STUB_SPILL_SLOT));
}
}
/* make registers have app values for the interrupt */
/* restore app's xbx (if we went through a stub to get here) */
if (!INTERNAL_OPTION(shared_syscalls_fastpath) && DYNAMO_OPTION(indirect_stubs)) {
if (DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
APP(&ilist,
XINST_CREATE_load(
dcontext, opnd_create_reg(SCRATCH_REG1),
opnd_create_tls_slot(os_tls_offset(INDIRECT_STUB_SPILL_SLOT))));
} else {
APP(&ilist,
instr_create_restore_from_dcontext(dcontext, SCRATCH_REG1,
SCRATCH_REG1_OFFS));
}
}
if (syscall_method == SYSCALL_METHOD_SYSENTER) {
/* PR 248210: not bothering to make x64-ready: if we do, be sure to pop into
* next-tag tls */
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
/* For sysenter, mangle pushed the next tag onto the stack,
* so we pop it into the xsi slot and push the [to-be-patched]
* after-syscall address.
*/
/* We have to save xsp in case a callback is delivered and we later detach
* (since detach expects the callback dcontext xsp to be correct). xref 9889 */
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_XSP, XSP_OFFSET));
APP(&ilist,
INSTR_CREATE_pop(dcontext,
opnd_create_dcontext_field(dcontext, SCRATCH_REG4_OFFS)));
adjust_tos = INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(0));
APP(&ilist, adjust_tos);
add_patch_marker(patch, adjust_tos, PATCH_ASSEMBLE_ABSOLUTE,
1 /* offset of imm field */, (ptr_uint_t *)&after_syscall_ptr);
}
/* even if !DYNAMO_OPTION(syscalls_synch_flush) must set for reset */
ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)));
if (all_shared) {
/* readers of at_syscall are ok w/ us not quite having xdi restored yet */
APP(&ilist,
XINST_CREATE_store(
dcontext,
opnd_create_dcontext_field_via_reg_sz(dcontext, REG_NULL /*default*/,
AT_SYSCALL_OFFSET, OPSZ_1),
OPND_CREATE_INT8(1)));
/* restore app %xdi */
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
} else
APP(&ilist, instr_create_save_immed8_to_dcontext(dcontext, 1, AT_SYSCALL_OFFSET));
if (DYNAMO_OPTION(sygate_sysenter) &&
get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* PR 248210: not bothering to make x64-ready */
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
/* case 5441 hack - set up stack so first return address points to ntdll
* Won't worry about arithmetic eflags since no one should care about
* those at a syscall, will preserve other regs though. */
/* FIXME - what is the perf impact of these extra 5 instructions, we can
* prob. do better. */
/* note we assume xsp == xdx (if doesn't we already have prob. ref
* case 5461) */
/* current state
* xsi_slot = next_pc
* xsp -> after_shared_syscall
* +4 -> app value1
* desired state
* sysenter_storage_slot = app_value1
* xsp -> sysenter_ret_address (ntdll ret)
* +4 -> after_shared_syscall
*/
/* NOTE - the stack mangling must match that of handle_system_call()
* and intercept_nt_continue() as not all routines looking at the stack
* differentiate. */
/* pop stack leaving old value (after_shared_syscall) in place */
APP(&ilist,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_XSP), OPND_CREATE_INT8(4)));
APP(&ilist,
INSTR_CREATE_pop(
dcontext, opnd_create_dcontext_field(dcontext, SYSENTER_STORAGE_OFFSET)));
/* instead of pulling in the existing stack value we could just patch in
* the after syscall imm */
/* see intel docs, source calculated before xsp dec'ed so we're pushing two
* stack slots up into the next slot up */
APP(&ilist, INSTR_CREATE_push(dcontext, OPND_CREATE_MEM32(REG_XSP, -8)));
APP(&ilist,
INSTR_CREATE_push_imm(dcontext,
OPND_CREATE_INTPTR((ptr_int_t)sysenter_ret_address)));
}
/* syscall itself */
APP(&ilist, create_syscall_instr(dcontext));
syscall = instrlist_last(&ilist);
if (DYNAMO_OPTION(sygate_sysenter) &&
get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* PR 248210: not bothering to make x64-ready */
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
/* case 5441 hack - we popped an extra stack slot, need to fill with saved
* app value */
APP(&ilist,
INSTR_CREATE_push(
dcontext, opnd_create_dcontext_field(dcontext, SYSENTER_STORAGE_OFFSET)));
}
/* Now that all instructions from the linked entry point up to and
* including the syscall have been added, prepend the unlinked path
* instructions. We wait until the syscall has been added because when
* shared_syscalls_fastpath = true and "int 2e" syscalls are used, the
* target of the unlinked path's jmp is the syscall itself.
*/
/* these two in reverse order since prepended */
instrlist_prepend(
&ilist, XINST_CREATE_jump(dcontext, opnd_create_instr(instr_get_next(linked))));
if (all_shared) {
/* xax and xbx tls slots are taken so we use xcx */
# ifdef X64
if (x86_to_x64_ibl_opt) {
instrlist_prepend(&ilist,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_R9D),
OPND_CREATE_INT32(0)));
} else {
# endif
instrlist_prepend(
&ilist,
XINST_CREATE_store(dcontext,
/* simpler to do 4 bytes even on x64 */
OPND_TLS_FIELD_SZ(MANGLE_XCX_SPILL_SLOT, OPSZ_4),
OPND_CREATE_INT32(0)));
# ifdef X64
}
# endif
} else {
instrlist_prepend(
&ilist,
instr_create_save_immed32_to_dcontext(dcontext, 0, SCRATCH_REG0_OFFS));
}
/* even if !DYNAMO_OPTION(syscalls_synch_flush) must clear for cbret */
if (all_shared) {
/* readers of at_syscall are ok w/ us spilling xdi first */
insert_shared_get_dcontext(dcontext, &ilist, NULL, true /*save xdi*/);
APP(&ilist,
XINST_CREATE_store(
dcontext,
opnd_create_dcontext_field_via_reg_sz(dcontext, REG_NULL /*default*/,
AT_SYSCALL_OFFSET, OPSZ_1),
OPND_CREATE_INT8(0)));
} else
APP(&ilist, instr_create_save_immed8_to_dcontext(dcontext, 0, AT_SYSCALL_OFFSET));
if (!inline_ibl_head && DYNAMO_OPTION(indirect_stubs)) {
/* FIXME Can we remove the write to the mcontext for the !absolute
* case? Initial tests w/notepad crashed when doing so -- we should
* look deeper.
*/
/* save app's xbx (assume interrupt could have changed it) */
/* Remember, shared_fragment_shared_syscalls=true means absolute=false,
* so for shared_fragment_shared_syscalls=true %xbx is saved in
* the !absolute "if" that follows.
*/
if (!DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
APP(&ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG1, SCRATCH_REG1_OFFS));
}
if (!absolute) {
/* save xbx in TLS so that downstream code can find it */
APP(&ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG1, INDIRECT_STUB_SPILL_SLOT));
}
if (!INTERNAL_OPTION(shared_syscalls_fastpath)) {
if (all_shared) {
APP(&ilist,
instr_create_restore_from_dc_via_reg(
dcontext, REG_NULL /*default*/, SCRATCH_REG1, SCRATCH_REG5_OFFS));
} else {
APP(&ilist,
instr_create_restore_from_dcontext(dcontext, SCRATCH_REG1,
SCRATCH_REG5_OFFS));
}
}
} /* if inlined, xbx will be saved inside inlined ibl; if no indirect stubs,
* xbx will be saved in the ibl routine, or not at all if unlinked
*/
/* set up for indirect_branch_lookup */
/* save app's xcx */
if (!DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
APP(&ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG2, SCRATCH_REG2_OFFS));
}
/* FIXME Can we remove the write to the mcontext for the !absolute
* case, as suggested above? */
if (!absolute && !all_shared /*done later*/) {
/* save xcx in TLS */
# ifdef X64
if (x86_to_x64_ibl_opt)
APP(&ilist, SAVE_TO_REG(dcontext, SCRATCH_REG2, REG_R9));
else
# endif
APP(&ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG2, MANGLE_XCX_SPILL_SLOT));
}
if (!INTERNAL_OPTION(shared_syscalls_fastpath)) {
if (inline_ibl_head && DYNAMO_OPTION(indirect_stubs)) {
/* Need to move linkstub ptr from mcontext->xdi into tls.
* We couldn't put it directly there pre-syscall b/c tls
* is not saved on callback stack!
* We do this now to take advantage of xcx being dead.
*/
APP(&ilist,
instr_create_restore_from_dcontext(dcontext, SCRATCH_REG2,
SCRATCH_REG5_OFFS));
APP(&ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG2, TLS_REG3_SLOT));
}
}
/* get link flag */
unlink = INSTR_CREATE_label(dcontext);
if (all_shared) {
/* we stored 4 bytes so get 4 bytes back; save app xcx at same time */
# ifdef X64
if (x86_to_x64_ibl_opt) {
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_R9),
opnd_create_reg(SCRATCH_REG2)));
} else {
# endif
APP(&ilist,
INSTR_CREATE_xchg(dcontext, OPND_TLS_FIELD(MANGLE_XCX_SPILL_SLOT),
opnd_create_reg(SCRATCH_REG2)));
# ifdef X64
}
/* clear top 32 bits */
APP(&ilist,
XINST_CREATE_store(dcontext, opnd_create_reg(REG_ECX),
opnd_create_reg(REG_ECX)));
# endif
/* app xdi is restored later after we've restored next_tag from xsi slot */
} else {
APP(&ilist,
instr_create_restore_from_dcontext(dcontext, SCRATCH_REG2,
SCRATCH_REG0_OFFS));
}
jecxz = INSTR_CREATE_jecxz(dcontext, opnd_create_instr(unlink));
APP(&ilist, jecxz);
/* put linkstub ptr in xbx */
if (INTERNAL_OPTION(shared_syscalls_fastpath) && DYNAMO_OPTION(indirect_stubs)) {
APP(&ilist,
INSTR_CREATE_mov_imm(
dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_INTPTR((ptr_int_t)get_shared_syscalls_bb_linkstub())));
/* put linkstub ptr in slot such that when inlined it will be
* in the right place in case of a miss */
if (inline_ibl_head) {
if (absolute) {
APP(&ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG1,
SCRATCH_REG5_OFFS));
} else {
APP(&ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG1, TLS_REG3_SLOT));
}
}
} /* else case is up above to use dead xcx reg */
/* Add a patch marker once we know that there's an instr in the ilist
* after the syscall. */
add_patch_marker(patch, instr_get_next(syscall) /* take addr of next instr */,
PATCH_UINT_SIZED /* pc relative */, 0 /* beginning of instruction */,
(ptr_uint_t *)&code->sys_syscall_offs);
add_patch_marker(patch, jecxz, PATCH_UINT_SIZED /* pc relative */,
0 /* point at opcode of jecxz */,
(ptr_uint_t *)&code->sys_unlink_offs);
/* put return address in xcx (was put in xsi slot by mangle.c, or in tls
* by mangle.c and into xsi slot before syscall for all_shared) */
/* we duplicate the restore from dc and restore of xdi on the link
* and unlink paths, rather than putting next_tag back into tls here
* (can't rely on that tls slot persisting over syscall w/ callbacks)
*/
insert_restore_target_from_dc(dcontext, &ilist, all_shared);
if (all_shared) {
/* restore app %xdi */
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
}
/* FIXME As noted in the routine's header comments, shared syscall targets
* the trace [IBT] table when both traces and BBs could be using it (when
* trace building is not disabled). Ideally, we want traces to target the
* trace table and BBs to target the BB table (when BB2BB IBL is on, that is).
* Since the BB IBT table usually holds non-trace head BBs as well as traces
* (including traces is option controlled), using it will doubtless lead to
* higher IBL hit rate, though it's unclear if there would be a visible
* impact on performance. Since BBs and traces use different fake linkstubs
* when executing thru shared syscall, we can detect what the last fragment
* was and conditionally jump to the ideal IBL routine.
*
* Since the EFLAGS at this point hold app state, we'd need to save/restore
* them prior to executing the IBL code if we used a 'cmp' followed by cond.
* branch. Or we could save the EFLAGS and jump to a new entry point in the
* IBL, one just after the 'seto'. (We'd have to move any load of %xdi
* with the dcontext to just below the 'seto'.)
*
* We could avoid conditional code altogether if both inline_trace_ibl
* and inline_bb_ibl are false. Instead of passing fake linkstub addresses
* from a fragment exit stub through shared syscall, we could pass the
* address of the IBL routine to jump to -- BB IBL for BBs and trace IBL
* for traces. Shared syscall would do an indirect jump to reach the proper
* routine. On an IBL miss, the address is passed through to d_r_dispatch, which
* can convert the address into the appropriate fake linkstub address (check
* if the address is within emitted code and equals either BB or trace IBL.)
* Since an address is being passed around and saved to the dcontext during
* syscalls, some of which could be relatively long, this is a security
* hole.
*/
if (!inline_ibl_head) {
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(ind_br_lookup_pc)));
} else {
append_ibl_head(dcontext, &ilist, ibl_code, patch, NULL, NULL, NULL,
opnd_create_pc(ind_br_lookup_pc),
false /*miss cannot have 8-bit offs*/, target_trace_table,
inline_ibl_head);
}
/* unlink path (there can be no fall-through) */
APP(&ilist, unlink);
/* we duplicate the restore from dc and restore of xdi on the link
* and unlink paths: see note above */
insert_restore_target_from_dc(dcontext, &ilist, all_shared);
if (all_shared) {
/* restore app %xdi */
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
}
/* When traversing the unlinked entry path, since IBL is bypassed
* control reaches d_r_dispatch, and the target is (usually) added to the IBT
* table. But since the unlinked path was used, the target may already be
* present in the table so the add attempt is unnecessary and triggers an
* ASSERT in fragment_add_ibl_target().
*
* The add attempt is bypassed by moving an unlinked linkstub ptr into the
* correct place -- for inlined IBL, the %xdi slot, otherwise, %xbx. This will
* identify exits from the unlinked path. The stub's flags are set to 0
* to bypass the add IBL target attempt.
*/
if (!inline_ibl_head) {
if (DYNAMO_OPTION(indirect_stubs)) {
APP(&ilist,
INSTR_CREATE_mov_imm(
dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_INTPTR(
(ptr_int_t)get_shared_syscalls_unlinked_linkstub())));
}
} else {
if (absolute) {
APP(&ilist,
instr_create_save_immed32_to_dcontext(
dcontext, (int)(ptr_int_t)get_shared_syscalls_unlinked_linkstub(),
SCRATCH_REG5_OFFS));
} else {
APP(&ilist,
XINST_CREATE_store(
dcontext, OPND_TLS_FIELD(TLS_REG3_SLOT),
OPND_CREATE_INTPTR(
(ptr_int_t)get_shared_syscalls_unlinked_linkstub())));
}
if (!DYNAMO_OPTION(atomic_inlined_linking)) {
/* we need to duplicate the emit_inline_ibl_stub unlinking race
* condition detection code here, before we jump to unlink
*/
/*
* # set flag in xcx (bottom byte = 0x1) so that unlinked path can
* # detect race condition during unlinking
* 2 movb $0x1, %cl
*/
/* we expect target saved in xbx_offset */
if (absolute) {
APP(&ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG2,
SCRATCH_REG1_OFFS));
} else
APP(&ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG2, TLS_REG1_SLOT));
APP(&ilist,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_CL),
OPND_CREATE_INT8(1)));
} else {
/* xbx could have changed in kernel, unlink expects it saved */
if (absolute) {
APP(&ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG1,
SCRATCH_REG1_OFFS));
} else
APP(&ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG1, TLS_REG1_SLOT));
}
}
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(unlinked_ib_lookup_pc)));
pc += encode_with_patch_list(dcontext, patch, &ilist, pc);
if (syscall_method == SYSCALL_METHOD_SYSENTER) {
ASSERT(after_syscall_ptr != 0);
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
*((uint *)(ptr_uint_t)after_syscall_ptr) =
(uint)(ptr_uint_t)(code->unlinked_shared_syscall + code->sys_syscall_offs);
}
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
static byte *
emit_dispatch_template(dcontext_t *dcontext, byte *pc, uint offset)
{
instrlist_t ilist;
/* PR 244737: we don't use this for x64 b/c syscall routines are thread-shared */
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
/* initialize the ilist */
instrlist_init(&ilist);
/* load %edi w/the dcontext */
insert_shared_get_dcontext(dcontext, &ilist, NULL, true);
/* load the generated_code_t address */
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(REG_EDI),
OPND_DC_FIELD(false, dcontext, OPSZ_PTR, PRIVATE_CODE_OFFSET)));
/* jump thru the address in the offset */
APP(&ilist, XINST_CREATE_jump_mem(dcontext, OPND_CREATE_MEM32(REG_EDI, offset)));
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, false /* no instr targets */);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
byte *
emit_shared_syscall_dispatch(dcontext_t *dcontext, byte *pc)
{
return emit_dispatch_template(dcontext, pc,
offsetof(generated_code_t, shared_syscall));
}
byte *
emit_unlinked_shared_syscall_dispatch(dcontext_t *dcontext, byte *pc)
{
return emit_dispatch_template(dcontext, pc,
offsetof(generated_code_t, unlinked_shared_syscall));
}
/* Links the shared_syscall routine to go directly to the indirect branch
* lookup routine.
* If it is already linked, does nothing.
* Assumes caller takes care of any synchronization if this is called
* from other than the owning thread!
*/
/* NOTE the link/unlink of shared syscall is atomic w/respect to threads in the
* cache since is only single byte write (always atomic). */
static void
link_shared_syscall_common(generated_code_t *code)
{
/* strategy: change "jmp unlink" back to "jecxz unlink" */
cache_pc pc;
if (code == NULL) /* shared_code_x86 */
return;
pc = code->unlinked_shared_syscall + code->sys_unlink_offs;
if (*pc != JECXZ_OPCODE) {
protect_generated_code(code, WRITABLE);
ASSERT(*pc == JMP_SHORT_OPCODE);
*pc = JECXZ_OPCODE;
protect_generated_code(code, READONLY);
}
}
void
link_shared_syscall(dcontext_t *dcontext)
{
ASSERT(IS_SHARED_SYSCALL_THREAD_SHARED || dcontext != GLOBAL_DCONTEXT);
if (dcontext == GLOBAL_DCONTEXT) {
link_shared_syscall_common(SHARED_GENCODE(GENCODE_X64));
# ifdef X64
/* N.B.: there are no 32-bit syscalls for WOW64 with 64-bit DR (i#821) */
if (DYNAMO_OPTION(x86_to_x64))
link_shared_syscall_common(SHARED_GENCODE(GENCODE_X86_TO_X64));
# endif
} else
link_shared_syscall_common(THREAD_GENCODE(dcontext));
}
/* Unlinks the shared_syscall routine so it goes back to d_r_dispatch after
* the system call itself.
* If it is already unlinked, does nothing.
* Assumes caller takes care of any synchronization if this is called
* from other than the owning thread!
*/
static void
unlink_shared_syscall_common(generated_code_t *code)
{
/* strategy: change "jecxz unlink" to "jmp unlink" */
cache_pc pc;
if (code == NULL) /* shared_code_x86 */
return;
pc = code->unlinked_shared_syscall + code->sys_unlink_offs;
if (*pc != JMP_SHORT_OPCODE) {
protect_generated_code(code, WRITABLE);
ASSERT(*pc == JECXZ_OPCODE);
*pc = JMP_SHORT_OPCODE;
protect_generated_code(code, READONLY);
}
}
void
unlink_shared_syscall(dcontext_t *dcontext)
{
ASSERT(IS_SHARED_SYSCALL_THREAD_SHARED || dcontext != GLOBAL_DCONTEXT);
if (dcontext == GLOBAL_DCONTEXT) {
unlink_shared_syscall_common(SHARED_GENCODE(GENCODE_X64));
# ifdef X64
/* N.B.: there are no 32-bit syscalls for WOW64 with 64-bit DR (i#821) */
if (DYNAMO_OPTION(x86_to_x64))
unlink_shared_syscall_common(SHARED_GENCODE(GENCODE_X86_TO_X64));
# endif
} else
unlink_shared_syscall_common(THREAD_GENCODE(dcontext));
}
#endif /* defined(WINDOWS) ****************************/
#ifdef WINDOWS
/* used by detach, this inlines the callback stack so that we can detach
*
* we spill xax and xbx to the PID and TID (respectively) TLS slots until we find
* the thread private state at which point we switch to using it for spilling. We
* use the TID slot (as opposed to the PEB slot that callback.c uses) because we need
* to get the TID anyways.
*
* note the counter walks backwards through the array of saved address (they are
* stored in reverse order)
*
* FIXME - we clobber eflags, but those should be dead after a system call anyways.
*
* From emit_patch_syscall()
* after_shared_syscall:
* jmp _after_do_syscall
*
* after_do_syscall:
* mov xax -> PID in TEB
* mov &callback_buf -> xax
* jmp xax
*
*
* From emit_detach_callback_code()
* // xax is currently saved in PID slot of TEB
* callback_buf:
* xchg xbx, TID in TEB // store xbx and get TID
* mov &callback_state -> xax //the array of detach_callback_stack_t
* match_tid:
* cmp xbx, thread_id_offset(xax)
* je match_found
* add xax, sizeof(detach_callback_stack_t)
* jmp match_tid // Note - infinite loop till find or crash (not clear what else to do)
* match_found: // xax now holds ptr to the detach_callback_stack_t for this thread
* xchg xbx, TID in TEB // restore tid & xbx
* mov xbx -> xbx_save_offset(xax)
* mov PID -> xbx
* xchg xbx, PID in TEB // restore pid, saved xax now in xbx
* mov xbx -> xax_save_offset(xax)
* mov xcx -> xcx_save_offset(xax)
* mov count_offset(xax) -> xbx // need count in register for addr calculation below
* sub xbx, 1
* mov xbx -> count_offset(xax)
* mov callback_addrs_offset(xax) -> xcx
* mov (xcx + xbx*sizeof(app_pc)) -> xcx // xcx now holds the xip we need to go to
* mov xcx -> target_offset(xax)
* mov xcx_save_offset(xax) -> xcx
* mov xbx_save_offset(xax) -> xbx
* lea code_buf_offset(xax) -> xax
* jmp xax
*
214f1000 6764871e2400 xchg fs:[0024],ebx
214f1006 b800114f21 mov eax,0x214f1100
214f100b 3b18 cmp ebx,[eax]
214f100d 0f8408000000 je 214f101b
214f1013 83c03c add eax,0x3c
214f1016 e9f0ffffff jmp 214f100b
214f101b 6764871e2400 xchg fs:[0024],ebx
214f1021 895810 mov [eax+0x10],ebx
214f1024 bb5c040000 mov ebx,0x45c
214f1029 6764871e2000 xchg fs:[0020],ebx
214f102f 89580c mov [eax+0xc],ebx
214f1032 894814 mov [eax+0x14],ecx
214f1035 8b5804 mov ebx,[eax+0x4]
214f1038 83eb01 sub ebx,0x1
214f103b 895804 mov [eax+0x4],ebx
214f103e 8b4808 mov ecx,[eax+0x8]
214f1041 8b0c99 mov ecx,[ecx+ebx*4]
214f1044 894818 mov [eax+0x18],ecx
214f1047 8b4814 mov ecx,[eax+0x14]
214f104a 8b5810 mov ebx,[eax+0x10]
214f104d 8d401c lea eax,[eax+0x1c]
214f1050 ffe0 jmp eax
*
*
* From emit_detach_callback_final_jmp()
* _detach_callback_stack_t.code_buf (thread private)
* mov (xax_save_offset) -> xax
* jmp *target
*
214f111c a10c114f21 mov eax,[214f110c]
214f1121 ff2518114f21 jmp dword ptr [214f1118]
*/
byte *
emit_detach_callback_code(dcontext_t *dcontext, byte *buf,
detach_callback_stack_t *callback_state)
{
byte *pc = buf;
instrlist_t ilist;
instr_t *match_tid = INSTR_CREATE_label(dcontext),
*match_found = INSTR_CREATE_label(dcontext);
/* i#821/PR 284029: for now we assume there are no syscalls in x86 code, so
* we do not need to generate an x86 version
*/
/* initialize the ilist */
instrlist_init(&ilist);
/* create instructions */
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_tls_slot(TID_TIB_OFFSET),
opnd_create_reg(SCRATCH_REG1)));
APP(&ilist,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(SCRATCH_REG0),
OPND_CREATE_INTPTR((ptr_uint_t)callback_state)));
APP(&ilist, match_tid);
/* FIXME - we clobber eflags. We don't anticipate that being a problem on callback
* returns since syscalls clobber eflags too. */
APP(&ilist,
INSTR_CREATE_cmp(
dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_MEMPTR(SCRATCH_REG0, offsetof(detach_callback_stack_t, tid))));
APP(&ilist, INSTR_CREATE_jcc_short(dcontext, OP_je, opnd_create_instr(match_found)));
APP(&ilist,
INSTR_CREATE_add(dcontext, opnd_create_reg(SCRATCH_REG0),
OPND_CREATE_INT_32OR8(sizeof(detach_callback_stack_t))));
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_instr(match_tid)));
APP(&ilist, match_found);
/* found matching tid ptr is in xax
* spill registers into local slots and restore TEB fields */
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_tls_slot(TID_TIB_OFFSET),
opnd_create_reg(SCRATCH_REG1)));
APP(&ilist,
XINST_CREATE_store(
dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, offsetof(detach_callback_stack_t, xbx_save)),
opnd_create_reg(SCRATCH_REG1)));
APP(&ilist,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_INTPTR((ptr_uint_t)get_process_id())));
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_tls_slot(PID_TIB_OFFSET),
opnd_create_reg(SCRATCH_REG1)));
APP(&ilist,
XINST_CREATE_store(
dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, offsetof(detach_callback_stack_t, xax_save)),
opnd_create_reg(SCRATCH_REG1)));
APP(&ilist,
XINST_CREATE_store(
dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, offsetof(detach_callback_stack_t, xcx_save)),
opnd_create_reg(SCRATCH_REG2)));
/* now find the right address and move it into target while updating the
* thread private count */
APP(&ilist,
XINST_CREATE_load(
dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_MEMPTR(SCRATCH_REG0, offsetof(detach_callback_stack_t, count))));
/* see earlier comment on clobbering eflags */
APP(&ilist,
INSTR_CREATE_sub(dcontext, opnd_create_reg(SCRATCH_REG1), OPND_CREATE_INT8(1)));
APP(&ilist,
XINST_CREATE_store(
dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, offsetof(detach_callback_stack_t, count)),
opnd_create_reg(SCRATCH_REG1)));
APP(&ilist,
XINST_CREATE_load(
dcontext, opnd_create_reg(SCRATCH_REG2),
OPND_CREATE_MEMPTR(SCRATCH_REG0,
offsetof(detach_callback_stack_t, callback_addrs))));
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2),
opnd_create_base_disp(SCRATCH_REG2, SCRATCH_REG1,
sizeof(app_pc), 0, OPSZ_PTR)));
APP(&ilist,
XINST_CREATE_store(
dcontext,
OPND_CREATE_MEMPTR(SCRATCH_REG0, offsetof(detach_callback_stack_t, target)),
opnd_create_reg(SCRATCH_REG2)));
APP(&ilist,
XINST_CREATE_load(
dcontext, opnd_create_reg(SCRATCH_REG2),
OPND_CREATE_MEMPTR(SCRATCH_REG0,
offsetof(detach_callback_stack_t, xcx_save))));
APP(&ilist,
XINST_CREATE_load(
dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_CREATE_MEMPTR(SCRATCH_REG0,
offsetof(detach_callback_stack_t, xbx_save))));
APP(&ilist,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(SCRATCH_REG0),
OPND_CREATE_MEM_lea(SCRATCH_REG0, REG_NULL, 0,
offsetof(detach_callback_stack_t, code_buf))));
APP(&ilist, INSTR_CREATE_jmp_ind(dcontext, opnd_create_reg(SCRATCH_REG0)));
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, true /* instr targets */);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
ASSERT(pc - buf < DETACH_CALLBACK_CODE_SIZE);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
void
emit_detach_callback_final_jmp(dcontext_t *dcontext,
detach_callback_stack_t *callback_state)
{
byte *pc = callback_state->code_buf;
instrlist_t ilist;
/* initialize the ilist */
instrlist_init(&ilist);
/* restore eax and jmp target */
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG0),
OPND_CREATE_ABSMEM(&(callback_state->xax_save), OPSZ_PTR)));
APP(&ilist,
INSTR_CREATE_jmp_ind(dcontext,
OPND_CREATE_ABSMEM(&(callback_state->target), OPSZ_PTR)));
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, true /* instr targets */);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
ASSERT(pc - callback_state->code_buf < DETACH_CALLBACK_FINAL_JMP_SIZE);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
}
void
emit_patch_syscall(dcontext_t *dcontext, byte *target _IF_X64(gencode_mode_t mode))
{
byte *pc = after_do_syscall_code_ex(dcontext _IF_X64(mode));
instrlist_t ilist;
if (DYNAMO_OPTION(shared_syscalls)) {
/* Simply patch shared_syscall to jump to after_do_syscall. Only
* one array of callback stack addresses is needed -- a return from
* a callback entered from shared_syscall will jump to the patched
* after_do_syscall and fetch the correct address off of our
* callback stack copy. It "just works".
*/
instr_t *instr = XINST_CREATE_jump(dcontext, opnd_create_pc(pc));
byte *tgt_pc = after_shared_syscall_code_ex(dcontext _IF_X64(mode));
byte *nxt_pc = instr_encode_to_copy(dcontext, instr,
vmcode_get_writable_addr(tgt_pc), tgt_pc);
ASSERT(nxt_pc != NULL);
nxt_pc = vmcode_get_executable_addr(nxt_pc);
/* check that there was room - shared_syscall should be before do_syscall
* anything between them is dead at this point */
ASSERT(after_shared_syscall_code_ex(dcontext _IF_X64(mode)) < pc && nxt_pc < pc);
instr_destroy(dcontext, instr);
LOG(THREAD, LOG_EMIT, 2,
"Finished patching shared syscall routine for detach -- patch " PFX
" to jump to " PFX "\n",
after_shared_syscall_code(dcontext), pc);
}
/* initialize the ilist */
instrlist_init(&ilist);
/* patch do_syscall to jmp to target */
/* Note that on 64-bit target may not be reachable in which case we need to inline
* the first register spill here so we can jmp reg. We go ahead and the spill here
* and jmp through reg for 32-bit as well for consistency. */
APP(&ilist,
XINST_CREATE_store(dcontext, opnd_create_tls_slot(PID_TIB_OFFSET),
opnd_create_reg(SCRATCH_REG0)));
APP(&ilist,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(SCRATCH_REG0),
OPND_CREATE_INTPTR((ptr_uint_t)target)));
APP(&ilist, INSTR_CREATE_jmp_ind(dcontext, opnd_create_reg(SCRATCH_REG0)));
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, true /* instr targets */);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* ASSERT that there was enough space after the system call (everything after
* do_syscall should be dead at this point). */
ASSERT(pc <= get_emitted_routines_code(dcontext _IF_X64(mode))->commit_end_pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
}
#endif /* WINDOWS */
/* this routine performs a single system call instruction and then returns
* to dynamo via fcache_return
*/
static byte *
emit_do_syscall_common(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc, bool handle_clone, bool thread_shared,
int interrupt, instr_t *syscall_instr, uint *syscall_offs /*OUT*/)
{
instrlist_t ilist;
instr_t *syscall = NULL;
#ifdef UNIX
instr_t *post_syscall;
#endif
#if defined(UNIX) && defined(X86_32)
/* PR 286922: 32-bit clone syscall cannot use vsyscall: must be int */
if (handle_clone) {
ASSERT(interrupt == 0 || interrupt == 0x80);
interrupt = 0x80;
}
#endif
if (syscall_instr != NULL)
syscall = syscall_instr;
else {
if (interrupt != 0) {
#ifdef X86
syscall = INSTR_CREATE_int(dcontext,
opnd_create_immed_int((char)interrupt, OPSZ_1));
#endif
IF_ARM(ASSERT_NOT_REACHED());
} else
syscall = create_syscall_instr(dcontext);
}
/* i#821/PR 284029: for now we assume there are no syscalls in x86 code.
*/
IF_X86_64(ASSERT_NOT_IMPLEMENTED(!GENCODE_IS_X86(code->gencode_mode)));
ASSERT(syscall_offs != NULL);
*syscall_offs = instr_length(dcontext, syscall);
/* initialize the ilist */
instrlist_init(&ilist);
#ifdef AARCH64
/* We will call this from handle_system_call, so need prefix on AArch64. */
APP(&ilist,
XINST_CREATE_load_pair(
dcontext, opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X1),
opnd_create_base_disp(dr_reg_stolen, DR_REG_NULL, 0, 0, OPSZ_16)));
/* XXX: should have a proper patch list entry */
*syscall_offs += AARCH64_INSTR_SIZE;
#endif
#if defined(ARM)
/* We have to save r0 in case the syscall is interrupted. We can't
* easily do this from d_r_dispatch b/c fcache_enter clobbers some TLS slots.
*/
APP(&ilist, instr_create_save_to_tls(dcontext, DR_REG_R0, TLS_REG0_SLOT));
/* XXX: should have a proper patch list entry */
*syscall_offs += THUMB_LONG_INSTR_SIZE;
#elif defined(AARCH64)
/* For AArch64, we need to save both x0 and x1 into SLOT 0 and SLOT 1
* in case the syscall is interrupted. See append_save_gpr.
* stp x0, x1, [x28]
*/
APP(&ilist,
INSTR_CREATE_stp(dcontext,
opnd_create_base_disp(dr_reg_stolen, DR_REG_NULL, 0, 0, OPSZ_16),
opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X1)));
*syscall_offs += AARCH64_INSTR_SIZE;
#endif
/* system call itself -- using same method we've observed OS using */
APP(&ilist, syscall);
#ifdef UNIX
# ifdef X86
if (get_syscall_method() == SYSCALL_METHOD_UNINITIALIZED) {
/* Since we lazily find out the method, but emit these routines
* up front, we have to leave room for the longest syscall method.
* This used to the 6-byte LOL64 call* but we now walk into that
* call* (PR 286922). Not much of a perf worry, but if we
* ever have proactive syscall determination on linux we should
* remove these nops.
*/
ASSERT(instr_length(dcontext, instrlist_last(&ilist)) == 2);
if (SYSCALL_METHOD_LONGEST_INSTR == 6) {
/* we could add 4-byte nop support but I'm too lazy */
APP(&ilist, INSTR_CREATE_nop3byte(dcontext));
APP(&ilist, INSTR_CREATE_nop1byte(dcontext));
} else
ASSERT_NOT_IMPLEMENTED(instr_length(dcontext, instrlist_last(&ilist)) ==
SYSCALL_METHOD_LONGEST_INSTR);
}
# endif
post_syscall = instrlist_last(&ilist);
#endif
/* go to fcache return -- use special syscall linkstub */
/* in case it returns: go to fcache return -- use 0 as &linkstub */
if (thread_shared)
APP(&ilist, instr_create_save_to_tls(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
else {
APP(&ilist,
instr_create_save_to_dcontext(dcontext, SCRATCH_REG0, SCRATCH_REG0_OFFS));
}
#ifdef AARCH64
/* Save X1 as this is used for the indirect branch in the exit stub. */
APP(&ilist, instr_create_save_to_tls(dcontext, SCRATCH_REG1, TLS_REG1_SLOT));
#endif
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)get_syscall_linkstub(),
opnd_create_reg(SCRATCH_REG0), &ilist, NULL, NULL, NULL);
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(fcache_return_pc)));
#ifdef UNIX
if (handle_clone) {
/* put in clone code, and make sure to target it.
* do it here since it assumes an instr after the syscall exists.
*/
mangle_insert_clone_code(dcontext, &ilist,
post_syscall _IF_X86_64(code->gencode_mode));
}
#endif
/* now encode the instructions */
pc =
instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc, NULL,
#ifdef UNIX
handle_clone /* instr targets */
#else
false /* no instr targets */
#endif
);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
#ifdef AARCHXX
byte *
emit_fcache_enter_gonative(dcontext_t *dcontext, generated_code_t *code, byte *pc)
{
int len;
instrlist_t ilist;
patch_list_t patch;
bool absolute = false;
bool shared = true;
init_patch_list(&patch, absolute ? PATCH_TYPE_ABSOLUTE : PATCH_TYPE_INDIRECT_XDI);
instrlist_init(&ilist);
append_fcache_enter_prologue(dcontext, &ilist, absolute);
append_setup_fcache_target(dcontext, &ilist, absolute, shared);
append_call_exit_dr_hook(dcontext, &ilist, absolute, shared);
/* restore the original register state */
append_restore_xflags(dcontext, &ilist, absolute);
append_restore_simd_reg(dcontext, &ilist, absolute);
append_restore_gpr(dcontext, &ilist, absolute);
/* We need to restore the stolen reg, but we have no scratch registers.
* We are forced to use the stack here. We assume a go-native point is
* a clean ABI point where the stack is valid and there is no app state
* beyond TOS.
*/
/* spill r0 */
APP(&ilist,
XINST_CREATE_store(dcontext, OPND_CREATE_MEMPTR(DR_REG_SP, -XSP_SZ),
opnd_create_reg(DR_REG_R0)));
/* Load target PC from FCACHE_ENTER_TARGET_SLOT, stored by
* by append_setup_fcache_target.
*/
APP(&ilist,
instr_create_restore_from_tls(dcontext, DR_REG_R0, FCACHE_ENTER_TARGET_SLOT));
/* store target PC */
APP(&ilist,
XINST_CREATE_store(dcontext, OPND_CREATE_MEMPTR(DR_REG_SP, -2 * XSP_SZ),
opnd_create_reg(DR_REG_R0)));
/* restore r0 */
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(DR_REG_R0),
OPND_CREATE_MEMPTR(DR_REG_SP, -XSP_SZ)));
/* restore stolen reg */
APP(&ilist,
instr_create_restore_from_tls(dcontext, dr_reg_stolen, TLS_REG_STOLEN_SLOT));
/* go to stored target PC */
# ifdef AARCH64
/* For AArch64, we can't jump through memory like on x86, or write
* to the PC like on ARM. For now assume we're at an ABI call
* boundary (true for dr_app_stop) and we clobber the caller-saved
* register r12.
* XXX: The only clean transfer method we have is SYS_rt_sigreturn,
* which we do use to send other threads native on detach.
* To support externally-triggered detach at non-clean points in the future
* we could try changing the callers to invoke thread_set_self_mcontext()
* instead of coming here (and also finish implementing that for A64).
*/
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(DR_REG_R12),
OPND_CREATE_MEMPTR(DR_REG_SP, -2 * XSP_SZ)));
APP(&ilist, INSTR_CREATE_br(dcontext, opnd_create_reg(DR_REG_R12)));
# else
APP(&ilist,
INSTR_CREATE_ldr(dcontext, opnd_create_reg(DR_REG_PC),
OPND_CREATE_MEMPTR(DR_REG_SP, -2 * XSP_SZ)));
# endif
/* now encode the instructions */
len = encode_with_patch_list(dcontext, &patch, &ilist, pc);
ASSERT(len != 0);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc + len;
}
#endif /* AARCHXX */
#ifdef WINDOWS
/* like fcache_enter but indirects the dcontext passed in through edi */
byte *
emit_fcache_enter_indirect(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc)
{
return emit_fcache_enter_common(dcontext, code, pc, false /*indirect*/,
false /*!shared*/);
}
/* This routine performs an int 2b, which maps to NtCallbackReturn, and then returns
* to dynamo via fcache_return (though it won't reach there)
*/
byte *
emit_do_callback_return(dcontext_t *dcontext, byte *pc, byte *fcache_return_pc,
bool thread_shared)
{
instrlist_t ilist;
/* initialize the ilist */
instrlist_init(&ilist);
/* interrupt 2b */
APP(&ilist, INSTR_CREATE_int(dcontext, opnd_create_immed_int(0x2b, OPSZ_1)));
/* in case it returns: go to fcache return -- use 0 as &linkstub */
if (thread_shared)
APP(&ilist, instr_create_save_to_tls(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
else
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_EAX, SCRATCH_REG0_OFFS));
/* for x64 we rely on sign-extension to fill out rax */
APP(&ilist,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EAX), OPND_CREATE_INT32(0)));
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(fcache_return_pc)));
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, false /* no instr targets */);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
#else /* !WINDOWS => UNIX */
byte *
emit_do_clone_syscall(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc, bool thread_shared,
uint *syscall_offs /*OUT*/)
{
return emit_do_syscall_common(dcontext, code, pc, fcache_return_pc, true,
thread_shared, false, NULL, syscall_offs);
}
# ifdef VMX86_SERVER
byte *
emit_do_vmkuw_syscall(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc, bool thread_shared,
uint *syscall_offs /*OUT*/)
{
instr_t *gateway = INSTR_CREATE_int(
dcontext, opnd_create_immed_int((char)VMKUW_SYSCALL_GATEWAY, OPSZ_1));
return emit_do_syscall_common(dcontext, code, pc, fcache_return_pc, false,
thread_shared, false, gateway, syscall_offs);
}
# endif
#endif /* UNIX */
byte *
emit_do_syscall(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc, bool thread_shared, int interrupt,
uint *syscall_offs /*OUT*/)
{
pc = emit_do_syscall_common(dcontext, code, pc, fcache_return_pc, false,
thread_shared, interrupt, NULL, syscall_offs);
return pc;
}
#ifndef WINDOWS
/* updates first syscall instr it finds with the new method of syscall */
static void
update_syscall(dcontext_t *dcontext, byte *pc)
{
LOG_DECLARE(byte *start_pc = pc;)
byte *prev_pc;
IF_ARM(dr_isa_mode_t old_mode;)
instr_t instr;
instr_init(dcontext, &instr);
# ifdef ARM
/* We need to switch to the mode of our gencode */
dr_set_isa_mode(dcontext, DEFAULT_ISA_MODE, &old_mode);
# endif
do {
prev_pc = pc;
instr_reset(dcontext, &instr);
pc = decode_cti(dcontext, pc, &instr);
ASSERT(pc != NULL); /* this our own code we're decoding, should be valid */
if (instr_is_syscall(&instr)) {
instr_t *newinst = create_syscall_instr(dcontext);
byte *nxt_pc = instr_encode_to_copy(
dcontext, newinst, vmcode_get_writable_addr(prev_pc), prev_pc);
/* instruction must not change size! */
ASSERT(nxt_pc != NULL);
nxt_pc = vmcode_get_executable_addr(nxt_pc);
if (nxt_pc != pc) {
pc = nxt_pc;
byte *stop_pc = prev_pc + SYSCALL_METHOD_LONGEST_INSTR;
ASSERT(nxt_pc <= stop_pc);
while (pc < stop_pc) {
/* we could add >3-byte nop support but I'm too lazy */
int noplen = MIN(stop_pc - pc, 3);
instr_t *nop = instr_create_nbyte_nop(dcontext, noplen, true);
pc = instr_encode_to_copy(dcontext, nop, vmcode_get_writable_addr(pc),
pc);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
instr_destroy(dcontext, nop);
}
}
instr_destroy(dcontext, newinst);
break;
}
ASSERT(pc - prev_pc < 128);
} while (1);
machine_cache_sync(prev_pc, pc, true);
instr_free(dcontext, &instr);
# ifdef ARM
dr_set_isa_mode(dcontext, old_mode, NULL);
# endif
DOLOG(3, LOG_EMIT, {
LOG(THREAD, LOG_EMIT, 3, "Just updated syscall routine:\n");
prev_pc = pc;
pc = start_pc;
do {
pc = disassemble_with_bytes(dcontext, pc, THREAD);
} while (pc < prev_pc + 1); /* +1 to get next instr */
LOG(THREAD, LOG_EMIT, 3, " ...\n");
});
}
void
update_syscalls(dcontext_t *dcontext)
{
byte *pc;
generated_code_t *code = THREAD_GENCODE(dcontext);
protect_generated_code(code, WRITABLE);
pc = get_do_syscall_entry(dcontext);
update_syscall(dcontext, pc);
# ifdef X64
/* PR 286922: for 32-bit, we do NOT update the clone syscall as it
* always uses int (since can't use call to vsyscall when swapping
* stacks!)
*/
pc = get_do_clone_syscall_entry(dcontext);
update_syscall(dcontext, pc);
# endif
protect_generated_code(code, READONLY);
}
#endif /* !WINDOWS */
/* Returns -1 on failure */
int
decode_syscall_num(dcontext_t *dcontext, byte *entry)
{
byte *pc;
int syscall = -1;
instr_t instr;
ASSERT(entry != NULL);
instr_init(dcontext, &instr);
pc = entry;
LOG(GLOBAL, LOG_EMIT, 3, "decode_syscall_num " PFX "\n", entry);
while (true) {
DOLOG(3, LOG_EMIT, { disassemble_with_bytes(dcontext, pc, GLOBAL); });
instr_reset(dcontext, &instr);
pc = decode(dcontext, pc, &instr);
if (pc == NULL)
break; /* give up gracefully */
/* we do not handle control transfer instructions! */
if (instr_is_cti(&instr)) {
#ifdef WINDOWS /* since no interception code buffer to check on linux */
if (DYNAMO_OPTION(native_exec_syscalls) && instr_is_ubr(&instr)) {
/* probably our own trampoline, follow it
* ASSUMPTION: mov eax is the instr that jmp targets: i.e.,
* we don't handle deep hooks here.
*/
if (!is_syscall_trampoline(opnd_get_pc(instr_get_target(&instr)), &pc)) {
break; /* give up gracefully */
} /* else, carry on at pc */
} else
#endif
break; /* give up gracefully */
}
if (instr_num_dsts(&instr) > 0 && opnd_is_reg(instr_get_dst(&instr, 0)) &&
opnd_get_reg(instr_get_dst(&instr, 0)) == SCRATCH_REG0) {
#ifndef AARCH64 /* FIXME i#1569: recognise "move" on AArch64 */
if (instr_get_opcode(&instr) == IF_X86_ELSE(OP_mov_imm, OP_mov)) {
IF_X64(ASSERT_TRUNCATE(int, int,
opnd_get_immed_int(instr_get_src(&instr, 0))));
syscall = (int)opnd_get_immed_int(instr_get_src(&instr, 0));
LOG(GLOBAL, LOG_EMIT, 3, "\tfound syscall num: 0x%x\n", syscall);
break;
} else
#endif
break; /* give up gracefully */
}
}
instr_free(dcontext, &instr);
return syscall;
}
#ifdef UNIX
/* PR 212290: can't be static code in x86.asm since it can't be PIC */
/*
* new_thread_dynamo_start - for initializing a new thread created
* via the clone system call.
* assumptions:
* 1) The clone_record_t is on the base of the stack.
* 2) App's IF_X86_ELSE(xax, r0) is scratch (app expects 0 in it).
*/
byte *
emit_new_thread_dynamo_start(dcontext_t *dcontext, byte *pc)
{
instrlist_t ilist;
uint offset;
/* initialize the ilist */
instrlist_init(&ilist);
/* Since we don't have TLS available here (we could use CLONE_SETTLS
* for kernel 2.5.32+: PR 285898) we can't non-racily acquire
* initstack_mutex as we can't spill or spare a register
* (xref i#101/PR 207903).
*/
/* Grab exec state and pass as param in a priv_mcontext_t struct.
* new_thread_setup() will restore real app xsp.
* We emulate x86.asm's PUSH_DR_MCONTEXT(SCRATCH_REG0) (for priv_mcontext_t.pc).
*/
offset = insert_push_all_registers(dcontext, NULL, &ilist, NULL, IF_X64_ELSE(16, 4),
opnd_create_reg(SCRATCH_REG0),
/* we have to pass in scratch to prevent
* use of the stolen reg, which would be
* a race w/ the parent's use of it!
*/
SCRATCH_REG0 _IF_AARCH64(false));
# ifndef AARCH64
/* put pre-push xsp into priv_mcontext_t.xsp slot */
ASSERT(offset == get_clean_call_switch_stack_size());
APP(&ilist,
XINST_CREATE_add_2src(dcontext, opnd_create_reg(SCRATCH_REG0),
opnd_create_reg(REG_XSP), OPND_CREATE_INT32(offset)));
APP(&ilist,
XINST_CREATE_store(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, offsetof(priv_mcontext_t, xsp)),
opnd_create_reg(SCRATCH_REG0)));
# ifdef X86
if (!INTERNAL_OPTION(safe_read_tls_init)) {
/* We avoid get_thread_id syscall in get_thread_private_dcontext()
* by clearing the segment register here (cheaper check than syscall)
* (xref PR 192231). If we crash prior to this point though, the
* signal handler will get the wrong dcontext, but that's a small window.
* See comments in get_thread_private_dcontext() for alternatives.
*/
APP(&ilist,
XINST_CREATE_load_int(dcontext, opnd_create_reg(REG_AX),
OPND_CREATE_INT16(0)));
APP(&ilist,
INSTR_CREATE_mov_seg(dcontext, opnd_create_reg(SEG_TLS),
opnd_create_reg(REG_AX)));
} /* Else, os_clone_pre() inherits a valid-except-.magic segment (i#2089). */
# endif
/* stack grew down, so priv_mcontext_t at tos */
APP(&ilist,
XINST_CREATE_move(dcontext, opnd_create_reg(SCRATCH_REG0),
opnd_create_reg(REG_XSP)));
# else
/* For AArch64, SP was already saved by insert_push_all_registers and
* pointing to priv_mcontext_t. Move sp to the first argument:
* mov x0, sp
*/
APP(&ilist,
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_X0),
opnd_create_reg(DR_REG_XSP)));
# endif
dr_insert_call_noreturn(dcontext, &ilist, NULL, (void *)new_thread_setup, 1,
opnd_create_reg(SCRATCH_REG0));
/* should not return */
insert_reachable_cti(dcontext, &ilist, NULL, vmcode_get_start(),
(byte *)unexpected_return, true /*jmp*/, false /*!returns*/,
false /*!precise*/, DR_REG_R11 /*scratch*/, NULL);
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, true /* instr targets */);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
#endif /* UNIX */
#ifdef TRACE_HEAD_CACHE_INCR
/* trace_t heads come here instead of back to dynamo to have their counters
* incremented.
*/
byte *
emit_trace_head_incr(dcontext_t *dcontext, byte *pc, byte *fcache_return_pc)
{
/* save ecx
save eax->xbx slot
mov target_fragment_offs(eax), eax
movzx counter_offs(eax), ecx
lea 1(ecx), ecx # increment counter
mov data16 cx, counter_offs(eax)
lea -hot_threshold(ecx), ecx # compare to hot_threshold
jecxz is_hot
mov start_pc_offs(eax), ecx
movzx prefix_size_offs(eax), eax
lea (ecx,eax,1), ecx
mov ecx, trace_head_pc_offs + dcontext # special slot to avoid target prefix
restore ecx
restore eax
jmp * trace_head_pc_offs + dcontext
is_hot:
restore ebx slot to eax # put &l into eax
restore ecx
jmp fcache_return
*/
instrlist_t ilist;
instr_t *is_hot =
instr_create_restore_from_dcontext(dcontext, REG_EAX, SCRATCH_REG1_OFFS);
instr_t *in;
/* PR 248210: unsupported feature on x64 */
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
instrlist_init(&ilist);
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_ECX, SCRATCH_REG2_OFFS));
if (DYNAMO_OPTION(shared_bbs)) {
/* HACK to get shared exit stub, which puts eax into fs:scratch1, to work
* w/ thread-private THCI: we pull eax out of the tls slot and into mcontext.
* This requires that all direct stubs for cti that can link to trace
* heads use the shared stub -- so if traces can link to trace heads, their
* exits must use the shared stubs, even if the traces are thread-private.
*/
APP(&ilist, RESTORE_FROM_TLS(dcontext, REG_ECX, EXIT_STUB_SPILL_SLOT));
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_ECX, SCRATCH_REG0_OFFS));
}
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_EAX, SCRATCH_REG1_OFFS));
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(REG_EAX),
OPND_CREATE_MEM32(REG_EAX, LINKSTUB_TARGET_FRAG_OFFS)));
ASSERT_NOT_IMPLEMENTED(false &&
"must handle LINKSTUB_CBR_FALLTHROUGH case"
" by calculating target tag")
APP(&ilist,
INSTR_CREATE_movzx(
dcontext, opnd_create_reg(REG_ECX),
opnd_create_base_disp(REG_EAX, REG_NULL, 0, FRAGMENT_COUNTER_OFFS, OPSZ_2)));
APP(&ilist,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX),
opnd_create_base_disp(REG_ECX, REG_NULL, 0, 1, OPSZ_lea)));
/* data16 prefix is set auto-magically */
APP(&ilist,
XINST_CREATE_store(
dcontext,
opnd_create_base_disp(REG_EAX, REG_NULL, 0, FRAGMENT_COUNTER_OFFS, OPSZ_2),
opnd_create_reg(REG_CX)));
APP(&ilist,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX),
opnd_create_base_disp(REG_ECX, REG_NULL, 0,
-((int)INTERNAL_OPTION(trace_threshold)),
OPSZ_lea)));
APP(&ilist, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(is_hot)));
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_MEM32(REG_EAX, FRAGMENT_START_PC_OFFS)));
APP(&ilist,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EAX),
opnd_create_base_disp(REG_EAX, REG_NULL, 0,
FRAGMENT_PREFIX_SIZE_OFFS, OPSZ_1)));
APP(&ilist,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX),
opnd_create_base_disp(REG_ECX, REG_EAX, 1, 0, OPSZ_lea)));
APP(&ilist, instr_create_save_to_dcontext(dcontext, REG_ECX, TRACE_HEAD_PC_OFFSET));
APP(&ilist, instr_create_restore_from_dcontext(dcontext, REG_ECX, SCRATCH_REG2_OFFS));
APP(&ilist, instr_create_restore_from_dcontext(dcontext, REG_EAX, SCRATCH_REG0_OFFS));
APP(&ilist,
INSTR_CREATE_jmp_ind(dcontext,
opnd_create_dcontext_field(dcontext, TRACE_HEAD_PC_OFFSET)));
APP(&ilist, is_hot);
APP(&ilist, instr_create_restore_from_dcontext(dcontext, REG_ECX, SCRATCH_REG2_OFFS));
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(fcache_return_pc)));
/* now encode the instructions */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, true /* instr targets */);
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
byte *
emit_trace_head_incr_shared(dcontext_t *dcontext, byte *pc, byte *fcache_return_pc)
{
ASSERT_NOT_IMPLEMENTED(false);
}
#endif /* TRACE_HEAD_CACHE_INCR */
/***************************************************************************
* SPECIAL IBL XFER ROUTINES
*/
byte *
special_ibl_xfer_tgt(dcontext_t *dcontext, generated_code_t *code,
ibl_entry_point_type_t entry_type, ibl_branch_type_t ibl_type)
{
/* We use the trace ibl so that the target will be a trace head,
* avoiding a trace disruption.
* We request that bbs doing this xfer are marked DR_EMIT_MUST_END_TRACE.
* We use the ret ibt b/c we figure most uses will involve rets and there's
* no reason to fill up the jmp ibt.
* This feature is unavail for prog shep b/c of the cross-type pollution.
*/
return get_ibl_routine_ex(
dcontext, entry_type,
DYNAMO_OPTION(disable_traces)
? (code->thread_shared ? IBL_BB_SHARED : IBL_BB_PRIVATE)
: (code->thread_shared ? IBL_TRACE_SHARED : IBL_TRACE_PRIVATE),
ibl_type _IF_X86_64(code->gencode_mode));
}
/* We only need a thread-private version if our ibl target is thread-private */
bool
special_ibl_xfer_is_thread_private(void)
{
#ifdef X64
return false; /* all gencode is shared */
#else
return (DYNAMO_OPTION(disable_traces) ? !DYNAMO_OPTION(shared_bbs)
: !DYNAMO_OPTION(shared_traces));
#endif
}
#ifdef AARCHXX
size_t
get_ibl_entry_tls_offs(dcontext_t *dcontext, cache_pc ibl_entry)
{
spill_state_t state;
byte *local;
ibl_type_t ibl_type = { 0 };
/* FIXME i#1551: add Thumb support: ARM vs Thumb gencode */
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type_ex(dcontext, ibl_entry, &ibl_type);
ASSERT(is_ibl);
/* FIXME i#1575: coarse-grain NYI on ARM/AArch64 */
ASSERT(ibl_type.source_fragment_type != IBL_COARSE_SHARED);
if (IS_IBL_TRACE(ibl_type.source_fragment_type)) {
if (IS_IBL_LINKED(ibl_type.link_state))
local = (byte *)&state.trace_ibl[ibl_type.branch_type].ibl;
else
local = (byte *)&state.trace_ibl[ibl_type.branch_type].unlinked;
} else {
ASSERT(IS_IBL_BB(ibl_type.source_fragment_type));
if (IS_IBL_LINKED(ibl_type.link_state))
local = (byte *)&state.bb_ibl[ibl_type.branch_type].ibl;
else
local = (byte *)&state.bb_ibl[ibl_type.branch_type].unlinked;
}
return (local - (byte *)&state);
}
#endif
/* emit the special_ibl trampoline code for transferring the control flow to
* ibl lookup
* - index: the index of special_ibl array to be emitted to
* - ibl_type: the branch type (IBL_RETURN or IBL_INDCALL)
* - custom_ilist: the custom instructions added by caller, which are added at
* the end of trampoline and right before jump to the ibl routine
* - tgt: the opnd holding the target, which will be moved into XCX for ibl.
*/
static byte *
emit_special_ibl_xfer(dcontext_t *dcontext, byte *pc, generated_code_t *code, uint index,
ibl_branch_type_t ibl_type, instrlist_t *custom_ilist, opnd_t tgt)
{
instrlist_t ilist;
patch_list_t patch;
instr_t *in;
/* For AArch64 the linkstub has to be in X0 and the app's X0 has to be
* spilled in TLS_REG0_SLOT before calling the ibl routine.
*/
reg_id_t stub_reg = IF_AARCH64_ELSE(SCRATCH_REG0, SCRATCH_REG1);
ushort stub_slot = IF_AARCH64_ELSE(TLS_REG0_SLOT, TLS_REG1_SLOT);
IF_X86(size_t len;)
byte *ibl_linked_tgt = special_ibl_xfer_tgt(dcontext, code, IBL_LINKED, ibl_type);
byte *ibl_unlinked_tgt = special_ibl_xfer_tgt(dcontext, code, IBL_UNLINKED, ibl_type);
bool absolute = !code->thread_shared;
ASSERT(ibl_linked_tgt != NULL);
ASSERT(ibl_unlinked_tgt != NULL);
instrlist_init(&ilist);
init_patch_list(&patch, absolute ? PATCH_TYPE_ABSOLUTE : PATCH_TYPE_INDIRECT_FS);
if (DYNAMO_OPTION(indirect_stubs)) {
const linkstub_t *linkstub = get_special_ibl_linkstub(
ibl_type, DYNAMO_OPTION(disable_traces) ? false : true);
APP(&ilist, SAVE_TO_TLS(dcontext, stub_reg, stub_slot));
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)linkstub, opnd_create_reg(stub_reg),
&ilist, NULL, NULL, NULL);
}
if (code->thread_shared || DYNAMO_OPTION(private_ib_in_tls)) {
#if defined(X86) && defined(X64)
if (GENCODE_IS_X86_TO_X64(code->gencode_mode) &&
DYNAMO_OPTION(x86_to_x64_ibl_opt)) {
APP(&ilist, SAVE_TO_REG(dcontext, SCRATCH_REG2, REG_R9));
} else
#endif
APP(&ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG2, MANGLE_XCX_SPILL_SLOT));
} else {
APP(&ilist, SAVE_TO_DC(dcontext, SCRATCH_REG2, SCRATCH_REG2_OFFS));
}
APP(&ilist, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), tgt));
/* insert customized instructions right before xfer to ibl */
if (custom_ilist != NULL)
in = instrlist_first(custom_ilist);
else
in = NULL;
while (in != NULL) {
instrlist_remove(custom_ilist, in);
APP(&ilist, in);
in = instrlist_first(custom_ilist);
}
#ifdef UNIX
/* i#4670: Jump to the unlinked IBL target if there are pending signals. This is
* required to bound delivery time for signals received while executing fragments
* that use the special ibl xfer trampoline, which uses a different (un)linking
* mechanism.
* XXX i#4804: This special unlinking strategy incurs overhead in the fast path
* (when linked) too. It can be avoided using a cleaner solution that links/unlinks
* just like any other fragment.
*/
instr_t *skip_unlinked_tgt_jump = INSTR_CREATE_label(dcontext);
insert_shared_get_dcontext(dcontext, &ilist, NULL, true);
# ifdef X86
/* Reuse DR_REG_XDI which contains dcontext currently. */
APP(&ilist,
XINST_CREATE_load_1byte_zext4(
dcontext, opnd_create_reg(DR_REG_EDI),
OPND_DC_FIELD(false, dcontext, OPSZ_1, SIGPENDING_OFFSET)));
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(DR_REG_XDI),
opnd_create_reg(DR_REG_XCX)));
APP(&ilist, INSTR_CREATE_jecxz(dcontext, opnd_create_instr(skip_unlinked_tgt_jump)));
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(DR_REG_XDI),
opnd_create_reg(DR_REG_XCX)));
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(ibl_unlinked_tgt)));
# elif defined(AARCHXX)
/* Reuse SCRATCH_REG5 which contains dcontext currently. */
APP(&ilist,
INSTR_CREATE_ldrsb(dcontext, opnd_create_reg(SCRATCH_REG5),
OPND_DC_FIELD(false, dcontext, OPSZ_1, SIGPENDING_OFFSET)));
APP(&ilist,
INSTR_CREATE_cbz(dcontext, opnd_create_instr(skip_unlinked_tgt_jump),
opnd_create_reg(SCRATCH_REG5)));
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
# if defined(AARCH64)
APP(&ilist,
INSTR_CREATE_ldr(
dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_TLS_FIELD(get_ibl_entry_tls_offs(dcontext, ibl_unlinked_tgt))));
APP(&ilist, XINST_CREATE_jump_reg(dcontext, opnd_create_reg(SCRATCH_REG1)));
# else /* ARM */
/* i#4670: The unlinking case is observed to hit very infrequently on x86.
* The fix has been tested on AArch64 but not on ARM yet.
*/
ASSERT_NOT_TESTED();
/* i#1906: loads to PC must use word-aligned addresses */
ASSERT(
ALIGNED(get_ibl_entry_tls_offs(dcontext, ibl_unlinked_tgt), PC_LOAD_ADDR_ALIGN));
APP(&ilist,
INSTR_CREATE_ldr(
dcontext, opnd_create_reg(DR_REG_PC),
OPND_TLS_FIELD(get_ibl_entry_tls_offs(dcontext, ibl_unlinked_tgt))));
# endif /* AARCH64/ARM */
# endif /* X86/AARCHXX */
APP(&ilist, skip_unlinked_tgt_jump);
# ifdef X86
APP(&ilist,
INSTR_CREATE_xchg(dcontext, opnd_create_reg(DR_REG_XDI),
opnd_create_reg(DR_REG_XCX)));
# endif /* X86 */
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
#endif /* UNIX */
#ifdef X86_64
if (GENCODE_IS_X86(code->gencode_mode))
instrlist_convert_to_x86(&ilist);
#endif
/* do not add new instrs that need conversion to x86 below here! */
#ifdef X86
/* to support patching the 4-byte pc-rel tgt we must ensure it doesn't
* cross a cache line
*/
for (len = 0, in = instrlist_first(&ilist); in != NULL; in = instr_get_next(in)) {
len += instr_length(dcontext, in);
}
if (CROSSES_ALIGNMENT(pc + len + 1 /*opcode*/, 4, PAD_JMPS_ALIGNMENT)) {
instr_t *nop_inst;
len = ALIGN_FORWARD(pc + len + 1, 4) - (ptr_uint_t)(pc + len + 1);
nop_inst = INSTR_CREATE_nopNbyte(dcontext, (uint)len);
# ifdef X64
if (GENCODE_IS_X86(code->gencode_mode)) {
instr_set_x86_mode(nop_inst, true /*x86*/);
instr_shrink_to_32_bits(nop_inst);
}
# endif
/* XXX: better to put prior to entry point but then need to change model
* of who assigns entry point
*/
APP(&ilist, nop_inst);
}
APP(&ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(ibl_linked_tgt)));
#elif defined(AARCH64)
APP(&ilist,
INSTR_CREATE_ldr(
dcontext, opnd_create_reg(SCRATCH_REG1),
OPND_TLS_FIELD(get_ibl_entry_tls_offs(dcontext, ibl_linked_tgt))));
APP(&ilist, XINST_CREATE_jump_reg(dcontext, opnd_create_reg(SCRATCH_REG1)));
#elif defined(ARM)
/* i#1906: loads to PC must use word-aligned addresses */
ASSERT(ALIGNED(get_ibl_entry_tls_offs(dcontext, ibl_linked_tgt), PC_LOAD_ADDR_ALIGN));
APP(&ilist,
INSTR_CREATE_ldr(
dcontext, opnd_create_reg(DR_REG_PC),
OPND_TLS_FIELD(get_ibl_entry_tls_offs(dcontext, ibl_linked_tgt))));
#endif
add_patch_marker(&patch, instrlist_last(&ilist), PATCH_UINT_SIZED /* pc relative */,
0 /* point at opcode */,
(ptr_uint_t *)&code->special_ibl_unlink_offs[index]);
/* now encode the instructions */
pc += encode_with_patch_list(dcontext, &patch, &ilist, pc);
ASSERT(pc != NULL);
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
return pc;
}
void
link_special_ibl_xfer(dcontext_t *dcontext)
{
relink_special_ibl_xfer(dcontext, CLIENT_IBL_IDX, IBL_LINKED, IBL_RETURN);
#ifdef UNIX
if (DYNAMO_OPTION(native_exec_opt)) {
relink_special_ibl_xfer(dcontext, NATIVE_PLT_IBL_IDX, IBL_LINKED, IBL_INDCALL);
relink_special_ibl_xfer(dcontext, NATIVE_RET_IBL_IDX, IBL_LINKED, IBL_RETURN);
}
#endif
}
void
unlink_special_ibl_xfer(dcontext_t *dcontext)
{
relink_special_ibl_xfer(dcontext, CLIENT_IBL_IDX, IBL_UNLINKED, IBL_RETURN);
#ifdef UNIX
if (DYNAMO_OPTION(native_exec_opt)) {
relink_special_ibl_xfer(dcontext, NATIVE_PLT_IBL_IDX, IBL_UNLINKED, IBL_INDCALL);
relink_special_ibl_xfer(dcontext, NATIVE_RET_IBL_IDX, IBL_UNLINKED, IBL_RETURN);
}
#endif
}
/* i#849: low-overhead xfer for clients */
byte *
emit_client_ibl_xfer(dcontext_t *dcontext, byte *pc, generated_code_t *code)
{
/* The client puts the target in SPILL_SLOT_REDIRECT_NATIVE_TGT. */
return emit_special_ibl_xfer(
dcontext, pc, code, CLIENT_IBL_IDX, IBL_RETURN, NULL,
reg_spill_slot_opnd(dcontext, SPILL_SLOT_REDIRECT_NATIVE_TGT));
}
/* i#171: out-of-line clean call */
/* XXX: i#1149 the clean call context switch should be shared among all threads */
bool
client_clean_call_is_thread_private(void)
{
#ifdef X64
return false; /* all gencode is shared */
#else
return !USE_SHARED_GENCODE();
#endif
}
byte *
emit_clean_call_save(dcontext_t *dcontext, byte *pc, generated_code_t *code)
{
#ifdef ARM
/* FIXME i#1621: NYI on AArch32 */
return pc;
#endif
instrlist_t ilist;
instrlist_init(&ilist);
/* xref insert_out_of_line_context_switch @ x86/mangle.c,
* stack was adjusted beyond what we place there to get retaddr
* in right spot, adjust the stack back to save context
*/
/* XXX: this LEA can be optimized away by using the LEA
* in insert_push_all_registers
*/
#ifdef X86
APP(&ilist,
INSTR_CREATE_lea(dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
(int)(get_clean_call_switch_stack_size() +
get_clean_call_temp_stack_size() +
XSP_SZ /* return addr */),
OPSZ_lea)));
/* save all registers */
insert_push_all_registers(dcontext, NULL, &ilist, NULL, (uint)PAGE_SIZE,
OPND_CREATE_INT32(0), REG_NULL);
#elif defined(AARCH64)
/* save all registers */
insert_push_all_registers(dcontext, NULL, &ilist, NULL, (uint)PAGE_SIZE,
OPND_CREATE_INT32(0), REG_NULL, true);
#endif
#ifdef WINDOWS
/* i#249: isolate the PEB and TEB */
/* We pay the cost of this extra load of dcontext in order to get
* this code shared (when not shared we place this where we already
* have the dcontext in a register: see prepare_for_clean_call()).
*/
if (SCRATCH_ALWAYS_TLS())
insert_get_mcontext_base(dcontext, &ilist, NULL, SCRATCH_REG0);
preinsert_swap_peb(dcontext, &ilist, NULL, !SCRATCH_ALWAYS_TLS(), SCRATCH_REG0 /*dc*/,
SCRATCH_REG2 /*scratch*/, true /*to priv*/);
/* We also need 2 extra loads to restore the 2 regs, in case the
* clean call passes them as args.
*/
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG0),
OPND_CREATE_MEMPTR(REG_XSP, offsetof(priv_mcontext_t, xax))));
APP(&ilist,
XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2),
OPND_CREATE_MEMPTR(REG_XSP, offsetof(priv_mcontext_t, xcx))));
#endif
/* clear eflags */
insert_clear_eflags(dcontext, NULL, &ilist, NULL);
#ifdef X86
/* return back */
APP(&ilist,
INSTR_CREATE_lea(dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
-(get_clean_call_temp_stack_size() +
(int)XSP_SZ /* return stack */),
OPSZ_lea)));
APP(&ilist,
INSTR_CREATE_ret_imm(dcontext,
OPND_CREATE_INT16(get_clean_call_temp_stack_size())));
#elif defined(AARCH64)
APP(&ilist, INSTR_CREATE_br(dcontext, opnd_create_reg(DR_REG_X30)));
#else
/* FIXME i#1621: NYI on AArch32 */
ASSERT_NOT_IMPLEMENTED(false);
#endif
/* emti code */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, IF_X86_ELSE(ZMM_ENABLED(), false));
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
instrlist_clear(dcontext, &ilist);
return pc;
}
byte *
emit_clean_call_restore(dcontext_t *dcontext, byte *pc, generated_code_t *code)
{
instrlist_t ilist;
#ifdef ARM
/* FIXME i#1551: NYI on AArch32
* (no assert here, it's in get_clean_call_restore())
*/
return pc;
#endif
instrlist_init(&ilist);
#ifdef WINDOWS
/* i#249: isolate the PEB and TEB */
/* We pay the cost of this extra load of dcontext in order to get
* this code shared (when not shared we place this where we already
* have the dcontext in a register: see cleanup_after_clean_call()).
* The 2 regs are dead as the popa will restore.
*/
if (SCRATCH_ALWAYS_TLS())
insert_get_mcontext_base(dcontext, &ilist, NULL, SCRATCH_REG0);
preinsert_swap_peb(dcontext, &ilist, NULL, !SCRATCH_ALWAYS_TLS(), SCRATCH_REG0 /*dc*/,
SCRATCH_REG2 /*scratch*/, false /*to app*/);
#endif
#ifdef X86
/* adjust the stack for the return target */
APP(&ilist,
INSTR_CREATE_lea(
dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0, (int)XSP_SZ, OPSZ_lea)));
/* restore all registers */
insert_pop_all_registers(dcontext, NULL, &ilist, NULL, (uint)PAGE_SIZE);
/* return back */
/* we adjust lea + ret_imm instead of ind jmp to take advantage of RSB */
APP(&ilist,
INSTR_CREATE_lea(dcontext, opnd_create_reg(DR_REG_XSP),
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
-(get_clean_call_switch_stack_size() +
(int)XSP_SZ /* return address */),
OPSZ_lea)));
APP(&ilist,
INSTR_CREATE_ret_imm(dcontext,
OPND_CREATE_INT16(get_clean_call_switch_stack_size())));
#elif defined(AARCH64)
insert_pop_all_registers(dcontext, NULL, &ilist, NULL, (uint)PAGE_SIZE, true);
APP(&ilist, INSTR_CREATE_br(dcontext, opnd_create_reg(DR_REG_X30)));
#else
/* FIXME i#1621: NYI on AArch32 */
ASSERT_NOT_IMPLEMENTED(false);
#endif
/* emit code */
pc = instrlist_encode_to_copy(dcontext, &ilist, vmcode_get_writable_addr(pc), pc,
NULL, IF_X86_ELSE(ZMM_ENABLED(), false));
ASSERT(pc != NULL);
pc = vmcode_get_executable_addr(pc);
instrlist_clear(dcontext, &ilist);
return pc;
}
/* mirrored inline implementation of set_last_exit() */
void
insert_set_last_exit(dcontext_t *dcontext, linkstub_t *l, instrlist_t *ilist,
instr_t *where, reg_id_t reg_dc)
{
ASSERT(l != NULL);
/* C equivalent:
* dcontext->last_exit = l
*/
insert_mov_immed_ptrsz(
dcontext, (ptr_int_t)l,
opnd_create_dcontext_field_via_reg(dcontext, reg_dc, LAST_EXIT_OFFSET), ilist,
where, NULL, NULL);
/* C equivalent:
* dcontext->last_fragment = linkstub_fragment()
*/
insert_mov_immed_ptrsz(
dcontext, (ptr_int_t)linkstub_fragment(dcontext, l),
opnd_create_dcontext_field_via_reg(dcontext, reg_dc, LAST_FRAG_OFFSET), ilist,
where, NULL, NULL);
/* C equivalent:
* dcontext->coarse_exit.dir_exit = NULL
*/
insert_mov_immed_ptrsz(
dcontext, (ptr_int_t)NULL,
opnd_create_dcontext_field_via_reg(dcontext, reg_dc, COARSE_DIR_EXIT_OFFSET),
ilist, where, NULL, NULL);
}
/* mirrored inline implementation of return_to_native() */
static void
insert_entering_native(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg_dc, reg_id_t reg_scratch)
{
/* FIXME i#2375: for UNIX we need to do what os_thread_not_under_dynamo() does:
* set the signal mask and clear the TLS.
*/
#ifdef WINDOWS
/* FIXME i#1238-c#1: we did not turn off asynch interception in windows */
/* skip C equivalent:
* set_asynch_interception(dcontext->owning_thread, false)
*/
ASSERT_BUG_NUM(1238, false && "set_asynch_interception is not inlined");
#endif
/* C equivalent:
* dcontext->thread_record->under_dynamo_control = false
*/
PRE(ilist, where,
instr_create_restore_from_dc_via_reg(dcontext, reg_dc, reg_scratch,
THREAD_RECORD_OFFSET));
PRE(ilist, where,
XINST_CREATE_store(
dcontext,
OPND_CREATE_MEM8(reg_scratch,
offsetof(thread_record_t, under_dynamo_control)),
OPND_CREATE_INT8(false)));
/* C equivalent:
* set_last_exit(dcontext, (linkstub_t *) get_native_exec_linkstub())
*/
insert_set_last_exit(dcontext, (linkstub_t *)get_native_exec_linkstub(), ilist, where,
reg_dc);
/* XXX i#1238-c#4 -native_exec_opt does not support -kstats
* skip C equivalent:
* KSTOP_NOT_MATCHING(dispatch_num_exits)
*/
/* skip C equivalent:
* SYSLOG_INTERNAL_WARNING_ONCE("entered at least one module natively")
*/
/* C equivalent:
* whereami = DR_WHERE_APP
*/
PRE(ilist, where,
instr_create_save_immed_to_dc_via_reg(dcontext, reg_dc, WHEREAMI_OFFSET,
(ptr_int_t)DR_WHERE_APP, OPSZ_4));
/* skip C equivalent:
* STATS_INC(num_native_module_enter)
*/
}
/* mirrored inline implementation of return_to_native()
* two registers are needed:
* - reg_dc holds the dcontext
* - reg_scratch is the scratch register.
*/
void
insert_return_to_native(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg_dc, reg_id_t reg_scratch)
{
/* skip C equivalent:
* ENTERING_DR()
*/
ASSERT(dcontext != NULL);
/* C equivalent:
* entering_native(dcontext)
*/
insert_entering_native(dcontext, ilist, where, reg_dc, reg_scratch);
/* skip C equivalent:
* EXITING_DR()
*/
}
#if defined(UNIX)
static void
insert_entering_non_native(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg_dc, reg_id_t reg_scratch)
{
/* FIXME i#2375: for UNIX we need to do what os_thread_re_take_over() and
* os_thread_under_dynamo() do: reinstate the TLS and restore the signal mask.
*/
/* C equivalent:
* dcontext->thread_record->under_dynamo_control = true
*/
PRE(ilist, where,
instr_create_restore_from_dc_via_reg(dcontext, reg_dc, reg_scratch,
THREAD_RECORD_OFFSET));
PRE(ilist, where,
XINST_CREATE_store(
dcontext,
OPND_CREATE_MEM8(reg_scratch,
offsetof(thread_record_t, under_dynamo_control)),
OPND_CREATE_INT8(true)));
/* C equivalent:
* set_last_exit(dcontext, (linkstub_t *) get_native_exec_linkstub())
*/
insert_set_last_exit(dcontext, (linkstub_t *)get_native_exec_linkstub(), ilist, where,
reg_dc);
/* C equivalent:
* whereami = DR_WHERE_FCACHE
*/
PRE(ilist, where,
instr_create_save_immed_to_dc_via_reg(dcontext, reg_dc, WHEREAMI_OFFSET,
(ptr_int_t)DR_WHERE_FCACHE, OPSZ_4));
}
/* Emit code to transfer execution from native module to code cache of non-native
* module via plt calls.
* The emitted code update some fields of dcontext like whereami and last_exit,
* and jump to ibl looking for target code fragment.
* We assume %XAX holds the target and can be clobbered.
*/
byte *
emit_native_plt_ibl_xfer(dcontext_t *dcontext, byte *pc, generated_code_t *code)
{
instrlist_t ilist;
opnd_t tgt = opnd_create_reg(SCRATCH_REG0);
ASSERT(DYNAMO_OPTION(native_exec_opt));
instrlist_init(&ilist);
insert_shared_get_dcontext(dcontext, &ilist, NULL, true);
insert_entering_non_native(dcontext, &ilist, NULL, REG_NULL, SCRATCH_REG0);
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
return emit_special_ibl_xfer(dcontext, pc, code, NATIVE_PLT_IBL_IDX, IBL_INDCALL,
&ilist, tgt);
}
/* Emit code to transfer execution from native module to code cache of non-native
* module via return.
* The emitted code update some fields of dcontext like whereami and last_exit,
* and jump to ibl looking for target code fragment.
* We assume %XAX holds the target and must be restored from TLS_REG0_SLOT before
* jumpping to ibl.
*/
byte *
emit_native_ret_ibl_xfer(dcontext_t *dcontext, byte *pc, generated_code_t *code)
{
instrlist_t ilist;
opnd_t tgt = opnd_create_reg(SCRATCH_REG0);
ASSERT(DYNAMO_OPTION(native_exec_opt));
instrlist_init(&ilist);
insert_shared_get_dcontext(dcontext, &ilist, NULL, true);
insert_entering_non_native(dcontext, &ilist, NULL, REG_NULL, SCRATCH_REG0);
insert_shared_restore_dcontext_reg(dcontext, &ilist, NULL);
/* restore xax */
APP(&ilist, instr_create_restore_from_tls(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
return emit_special_ibl_xfer(dcontext, pc, code, NATIVE_RET_IBL_IDX, IBL_RETURN,
&ilist, tgt);
}
#endif /* UNIX */
| 1 | 24,882 | Patch lists are used in many places so this is going to break other updates. This should just be a local change in emit_special_ibl_xfer() to pass the second-to-last instruction instead of the last instruction. | DynamoRIO-dynamorio | c |
@@ -38,7 +38,7 @@ char const * test_genesis_data = R"%%%({
"source": "B0311EA55708D6A53C75CDBF88300259C6D018522FE3D4D0A242E431F9E8B6D0",
"representative": "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpiij4txtdo",
"account": "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpiij4txtdo",
- "work": "9680625b39d3363d",
+ "work": "7b42a00ee91d5810",
"signature": "ECDA914373A2F0CA1296475BAEE40500A7F0A7AD72A5A80C81D7FAB7F6C802B2CC7DB50F5DD0FB25B2EF11761FA7344A158DD5A700B21BD47DE5BD0F63153A02"
})%%%";
| 1 | #include <nano/secure/common.hpp>
#include <nano/crypto_lib/random_pool.hpp>
#include <nano/lib/interface.h>
#include <nano/lib/numbers.hpp>
#include <nano/node/common.hpp>
#include <nano/secure/blockstore.hpp>
#include <nano/secure/versioning.hpp>
#include <boost/endian/conversion.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <queue>
#include <iostream>
#include <limits>
#include <nano/core_test/testutil.hpp>
#include <nano/lib/config.hpp>
#include <crypto/ed25519-donna/ed25519.h>
size_t constexpr nano::send_block::size;
size_t constexpr nano::receive_block::size;
size_t constexpr nano::open_block::size;
size_t constexpr nano::change_block::size;
size_t constexpr nano::state_block::size;
nano::nano_networks nano::network_constants::active_network = nano::nano_networks::ACTIVE_NETWORK;
namespace
{
char const * test_private_key_data = "34F0A37AAD20F4A260F0A5B3CB3D7FB50673212263E58A380BC10474BB039CE4";
char const * test_public_key_data = "B0311EA55708D6A53C75CDBF88300259C6D018522FE3D4D0A242E431F9E8B6D0"; // xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpiij4txtdo
char const * beta_public_key_data = "A59A47CC4F593E75AE9AD653FDA9358E2F7898D9ACC8C60E80D0495CE20FBA9F"; // xrb_3betaz86ypbygpqbookmzpnmd5jhh4efmd8arr9a3n4bdmj1zgnzad7xpmfp
char const * live_public_key_data = "E89208DD038FBB269987689621D52292AE9C35941A7484756ECCED92A65093BA"; // xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3
char const * test_genesis_data = R"%%%({
"type": "open",
"source": "B0311EA55708D6A53C75CDBF88300259C6D018522FE3D4D0A242E431F9E8B6D0",
"representative": "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpiij4txtdo",
"account": "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpiij4txtdo",
"work": "9680625b39d3363d",
"signature": "ECDA914373A2F0CA1296475BAEE40500A7F0A7AD72A5A80C81D7FAB7F6C802B2CC7DB50F5DD0FB25B2EF11761FA7344A158DD5A700B21BD47DE5BD0F63153A02"
})%%%";
char const * beta_genesis_data = R"%%%({
"type": "open",
"source": "A59A47CC4F593E75AE9AD653FDA9358E2F7898D9ACC8C60E80D0495CE20FBA9F",
"representative": "xrb_3betaz86ypbygpqbookmzpnmd5jhh4efmd8arr9a3n4bdmj1zgnzad7xpmfp",
"account": "xrb_3betaz86ypbygpqbookmzpnmd5jhh4efmd8arr9a3n4bdmj1zgnzad7xpmfp",
"work": "000000000f0aaeeb",
"signature": "A726490E3325E4FA59C1C900D5B6EEBB15FE13D99F49D475B93F0AACC5635929A0614CF3892764A04D1C6732A0D716FFEB254D4154C6F544D11E6630F201450B"
})%%%";
char const * live_genesis_data = R"%%%({
"type": "open",
"source": "E89208DD038FBB269987689621D52292AE9C35941A7484756ECCED92A65093BA",
"representative": "xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3",
"account": "xrb_3t6k35gi95xu6tergt6p69ck76ogmitsa8mnijtpxm9fkcm736xtoncuohr3",
"work": "62f05417dd3fb691",
"signature": "9F0C933C8ADE004D808EA1985FA746A7E95BA2A38F867640F53EC8F180BDFE9E2C1268DEAD7C2664F356E37ABA362BC58E46DBA03E523A7B5A19E4B6EB12BB02"
})%%%";
}
nano::network_params::network_params () :
network_params (network_constants::active_network)
{
}
nano::network_params::network_params (nano::nano_networks network_a) :
network (network_a), ledger (network), voting (network), node (network), portmapping (network), bootstrap (network)
{
unsigned constexpr kdf_full_work = 64 * 1024;
unsigned constexpr kdf_test_work = 8;
kdf_work = network.is_test_network () ? kdf_test_work : kdf_full_work;
header_magic_number = network.is_test_network () ? std::array<uint8_t, 2>{ { 'R', 'A' } } : network.is_beta_network () ? std::array<uint8_t, 2>{ { 'R', 'B' } } : std::array<uint8_t, 2>{ { 'R', 'C' } };
}
nano::ledger_constants::ledger_constants (nano::network_constants & network_constants) :
ledger_constants (network_constants.network ())
{
}
nano::ledger_constants::ledger_constants (nano::nano_networks network_a) :
zero_key ("0"),
test_genesis_key (test_private_key_data),
nano_test_account (test_public_key_data),
nano_beta_account (beta_public_key_data),
nano_live_account (live_public_key_data),
nano_test_genesis (test_genesis_data),
nano_beta_genesis (beta_genesis_data),
nano_live_genesis (live_genesis_data),
genesis_account (network_a == nano::nano_networks::nano_test_network ? nano_test_account : network_a == nano::nano_networks::nano_beta_network ? nano_beta_account : nano_live_account),
genesis_block (network_a == nano::nano_networks::nano_test_network ? nano_test_genesis : network_a == nano::nano_networks::nano_beta_network ? nano_beta_genesis : nano_live_genesis),
genesis_amount (std::numeric_limits<nano::uint128_t>::max ()),
burn_account (0)
{
}
nano::random_constants::random_constants ()
{
nano::random_pool::generate_block (not_an_account.bytes.data (), not_an_account.bytes.size ());
}
nano::node_constants::node_constants (nano::network_constants & network_constants)
{
period = network_constants.is_test_network () ? std::chrono::seconds (1) : std::chrono::seconds (60);
cutoff = period * 5;
syn_cookie_cutoff = std::chrono::seconds (5);
backup_interval = std::chrono::minutes (5);
search_pending_interval = network_constants.is_test_network () ? std::chrono::seconds (1) : std::chrono::seconds (5 * 60);
peer_interval = search_pending_interval;
unchecked_cleaning_interval = std::chrono::hours (2);
process_confirmed_interval = network_constants.is_test_network () ? std::chrono::milliseconds (50) : std::chrono::milliseconds (500);
max_weight_samples = network_constants.is_live_network () ? 4032 : 864;
weight_period = 5 * 60; // 5 minutes
}
nano::voting_constants::voting_constants (nano::network_constants & network_constants)
{
max_cache = network_constants.is_test_network () ? 2 : 1000;
}
nano::portmapping_constants::portmapping_constants (nano::network_constants & network_constants)
{
mapping_timeout = network_constants.is_test_network () ? 53 : 3593;
check_timeout = network_constants.is_test_network () ? 17 : 53;
}
nano::bootstrap_constants::bootstrap_constants (nano::network_constants & network_constants)
{
lazy_max_pull_blocks = network_constants.is_test_network () ? 2 : 512;
}
/* Convenience constants for core_test which is always on the test network */
namespace
{
nano::ledger_constants test_constants (nano::nano_networks::nano_test_network);
}
nano::keypair const & nano::zero_key (test_constants.zero_key);
nano::keypair const & nano::test_genesis_key (test_constants.test_genesis_key);
nano::account const & nano::nano_test_account (test_constants.nano_test_account);
std::string const & nano::nano_test_genesis (test_constants.nano_test_genesis);
nano::account const & nano::genesis_account (test_constants.genesis_account);
std::string const & nano::genesis_block (test_constants.genesis_block);
nano::uint128_t const & nano::genesis_amount (test_constants.genesis_amount);
nano::account const & nano::burn_account (test_constants.burn_account);
// Create a new random keypair
nano::keypair::keypair ()
{
random_pool::generate_block (prv.data.bytes.data (), prv.data.bytes.size ());
ed25519_publickey (prv.data.bytes.data (), pub.bytes.data ());
}
// Create a keypair given a private key
nano::keypair::keypair (nano::raw_key && prv_a) :
prv (std::move (prv_a))
{
ed25519_publickey (prv.data.bytes.data (), pub.bytes.data ());
}
// Create a keypair given a hex string of the private key
nano::keypair::keypair (std::string const & prv_a)
{
auto error (prv.data.decode_hex (prv_a));
assert (!error);
ed25519_publickey (prv.data.bytes.data (), pub.bytes.data ());
}
// Serialize a block prefixed with an 8-bit typecode
void nano::serialize_block (nano::stream & stream_a, nano::block const & block_a)
{
write (stream_a, block_a.type ());
block_a.serialize (stream_a);
}
nano::account_info::account_info (nano::block_hash const & head_a, nano::block_hash const & rep_block_a, nano::block_hash const & open_block_a, nano::amount const & balance_a, uint64_t modified_a, uint64_t block_count_a, uint64_t confirmation_height_a, nano::epoch epoch_a) :
head (head_a),
rep_block (rep_block_a),
open_block (open_block_a),
balance (balance_a),
modified (modified_a),
block_count (block_count_a),
confirmation_height (confirmation_height_a),
epoch (epoch_a)
{
}
bool nano::account_info::deserialize (nano::stream & stream_a)
{
auto error (false);
try
{
nano::read (stream_a, head.bytes);
nano::read (stream_a, rep_block.bytes);
nano::read (stream_a, open_block.bytes);
nano::read (stream_a, balance.bytes);
nano::read (stream_a, modified);
nano::read (stream_a, block_count);
nano::read (stream_a, confirmation_height);
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
bool nano::account_info::operator== (nano::account_info const & other_a) const
{
return head == other_a.head && rep_block == other_a.rep_block && open_block == other_a.open_block && balance == other_a.balance && modified == other_a.modified && block_count == other_a.block_count && confirmation_height == other_a.confirmation_height && epoch == other_a.epoch;
}
bool nano::account_info::operator!= (nano::account_info const & other_a) const
{
return !(*this == other_a);
}
size_t nano::account_info::db_size () const
{
assert (reinterpret_cast<const uint8_t *> (this) == reinterpret_cast<const uint8_t *> (&head));
assert (reinterpret_cast<const uint8_t *> (&head) + sizeof (head) == reinterpret_cast<const uint8_t *> (&rep_block));
assert (reinterpret_cast<const uint8_t *> (&rep_block) + sizeof (rep_block) == reinterpret_cast<const uint8_t *> (&open_block));
assert (reinterpret_cast<const uint8_t *> (&open_block) + sizeof (open_block) == reinterpret_cast<const uint8_t *> (&balance));
assert (reinterpret_cast<const uint8_t *> (&balance) + sizeof (balance) == reinterpret_cast<const uint8_t *> (&modified));
assert (reinterpret_cast<const uint8_t *> (&modified) + sizeof (modified) == reinterpret_cast<const uint8_t *> (&block_count));
assert (reinterpret_cast<const uint8_t *> (&block_count) + sizeof (block_count) == reinterpret_cast<const uint8_t *> (&confirmation_height));
return sizeof (head) + sizeof (rep_block) + sizeof (open_block) + sizeof (balance) + sizeof (modified) + sizeof (block_count) + sizeof (confirmation_height);
}
size_t nano::block_counts::sum () const
{
return send + receive + open + change + state_v0 + state_v1;
}
nano::pending_info::pending_info (nano::account const & source_a, nano::amount const & amount_a, nano::epoch epoch_a) :
source (source_a),
amount (amount_a),
epoch (epoch_a)
{
}
bool nano::pending_info::deserialize (nano::stream & stream_a)
{
auto error (false);
try
{
nano::read (stream_a, source.bytes);
nano::read (stream_a, amount.bytes);
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
bool nano::pending_info::operator== (nano::pending_info const & other_a) const
{
return source == other_a.source && amount == other_a.amount && epoch == other_a.epoch;
}
nano::pending_key::pending_key (nano::account const & account_a, nano::block_hash const & hash_a) :
account (account_a),
hash (hash_a)
{
}
bool nano::pending_key::deserialize (nano::stream & stream_a)
{
auto error (false);
try
{
nano::read (stream_a, account.bytes);
nano::read (stream_a, hash.bytes);
}
catch (std::runtime_error const &)
{
error = true;
}
return error;
}
bool nano::pending_key::operator== (nano::pending_key const & other_a) const
{
return account == other_a.account && hash == other_a.hash;
}
nano::block_hash nano::pending_key::key () const
{
return account;
}
nano::unchecked_info::unchecked_info (std::shared_ptr<nano::block> block_a, nano::account const & account_a, uint64_t modified_a, nano::signature_verification verified_a) :
block (block_a),
account (account_a),
modified (modified_a),
verified (verified_a)
{
}
void nano::unchecked_info::serialize (nano::stream & stream_a) const
{
assert (block != nullptr);
nano::serialize_block (stream_a, *block);
nano::write (stream_a, account.bytes);
nano::write (stream_a, modified);
nano::write (stream_a, verified);
}
bool nano::unchecked_info::deserialize (nano::stream & stream_a)
{
block = nano::deserialize_block (stream_a);
bool error (block == nullptr);
if (!error)
{
try
{
nano::read (stream_a, account.bytes);
nano::read (stream_a, modified);
nano::read (stream_a, verified);
}
catch (std::runtime_error const &)
{
error = true;
}
}
return error;
}
nano::endpoint_key::endpoint_key (const std::array<uint8_t, 16> & address_a, uint16_t port_a) :
address (address_a), network_port (boost::endian::native_to_big (port_a))
{
}
const std::array<uint8_t, 16> & nano::endpoint_key::address_bytes () const
{
return address;
}
uint16_t nano::endpoint_key::port () const
{
return boost::endian::big_to_native (network_port);
}
nano::block_info::block_info (nano::account const & account_a, nano::amount const & balance_a) :
account (account_a),
balance (balance_a)
{
}
bool nano::vote::operator== (nano::vote const & other_a) const
{
auto blocks_equal (true);
if (blocks.size () != other_a.blocks.size ())
{
blocks_equal = false;
}
else
{
for (auto i (0); blocks_equal && i < blocks.size (); ++i)
{
auto block (blocks[i]);
auto other_block (other_a.blocks[i]);
if (block.which () != other_block.which ())
{
blocks_equal = false;
}
else if (block.which ())
{
if (boost::get<nano::block_hash> (block) != boost::get<nano::block_hash> (other_block))
{
blocks_equal = false;
}
}
else
{
if (!(*boost::get<std::shared_ptr<nano::block>> (block) == *boost::get<std::shared_ptr<nano::block>> (other_block)))
{
blocks_equal = false;
}
}
}
}
return sequence == other_a.sequence && blocks_equal && account == other_a.account && signature == other_a.signature;
}
bool nano::vote::operator!= (nano::vote const & other_a) const
{
return !(*this == other_a);
}
void nano::vote::serialize_json (boost::property_tree::ptree & tree) const
{
tree.put ("account", account.to_account ());
tree.put ("signature", signature.number ());
tree.put ("sequence", std::to_string (sequence));
boost::property_tree::ptree blocks_tree;
for (auto block : blocks)
{
boost::property_tree::ptree entry;
if (block.which ())
{
entry.put ("", boost::get<nano::block_hash> (block).to_string ());
}
else
{
entry.put ("", boost::get<std::shared_ptr<nano::block>> (block)->hash ().to_string ());
}
blocks_tree.push_back (std::make_pair ("", entry));
}
tree.add_child ("blocks", blocks_tree);
}
std::string nano::vote::to_json () const
{
std::stringstream stream;
boost::property_tree::ptree tree;
serialize_json (tree);
boost::property_tree::write_json (stream, tree);
return stream.str ();
}
nano::vote::vote (nano::vote const & other_a) :
sequence (other_a.sequence),
blocks (other_a.blocks),
account (other_a.account),
signature (other_a.signature)
{
}
nano::vote::vote (bool & error_a, nano::stream & stream_a, nano::block_uniquer * uniquer_a)
{
error_a = deserialize (stream_a, uniquer_a);
}
nano::vote::vote (bool & error_a, nano::stream & stream_a, nano::block_type type_a, nano::block_uniquer * uniquer_a)
{
try
{
nano::read (stream_a, account.bytes);
nano::read (stream_a, signature.bytes);
nano::read (stream_a, sequence);
while (stream_a.in_avail () > 0)
{
if (type_a == nano::block_type::not_a_block)
{
nano::block_hash block_hash;
nano::read (stream_a, block_hash);
blocks.push_back (block_hash);
}
else
{
std::shared_ptr<nano::block> block (nano::deserialize_block (stream_a, type_a, uniquer_a));
if (block == nullptr)
{
throw std::runtime_error ("Block is null");
}
blocks.push_back (block);
}
}
}
catch (std::runtime_error const &)
{
error_a = true;
}
if (blocks.empty ())
{
error_a = true;
}
}
nano::vote::vote (nano::account const & account_a, nano::raw_key const & prv_a, uint64_t sequence_a, std::shared_ptr<nano::block> block_a) :
sequence (sequence_a),
blocks (1, block_a),
account (account_a),
signature (nano::sign_message (prv_a, account_a, hash ()))
{
}
nano::vote::vote (nano::account const & account_a, nano::raw_key const & prv_a, uint64_t sequence_a, std::vector<nano::block_hash> const & blocks_a) :
sequence (sequence_a),
account (account_a)
{
assert (!blocks_a.empty ());
assert (blocks_a.size () <= 12);
blocks.reserve (blocks_a.size ());
std::copy (blocks_a.cbegin (), blocks_a.cend (), std::back_inserter (blocks));
signature = nano::sign_message (prv_a, account_a, hash ());
}
std::string nano::vote::hashes_string () const
{
std::string result;
for (auto hash : *this)
{
result += hash.to_string ();
result += ", ";
}
return result;
}
const std::string nano::vote::hash_prefix = "vote ";
nano::uint256_union nano::vote::hash () const
{
nano::uint256_union result;
blake2b_state hash;
blake2b_init (&hash, sizeof (result.bytes));
if (blocks.size () > 1 || (!blocks.empty () && blocks.front ().which ()))
{
blake2b_update (&hash, hash_prefix.data (), hash_prefix.size ());
}
for (auto block_hash : *this)
{
blake2b_update (&hash, block_hash.bytes.data (), sizeof (block_hash.bytes));
}
union
{
uint64_t qword;
std::array<uint8_t, 8> bytes;
};
qword = sequence;
blake2b_update (&hash, bytes.data (), sizeof (bytes));
blake2b_final (&hash, result.bytes.data (), sizeof (result.bytes));
return result;
}
nano::uint256_union nano::vote::full_hash () const
{
nano::uint256_union result;
blake2b_state state;
blake2b_init (&state, sizeof (result.bytes));
blake2b_update (&state, hash ().bytes.data (), sizeof (hash ().bytes));
blake2b_update (&state, account.bytes.data (), sizeof (account.bytes.data ()));
blake2b_update (&state, signature.bytes.data (), sizeof (signature.bytes.data ()));
blake2b_final (&state, result.bytes.data (), sizeof (result.bytes));
return result;
}
void nano::vote::serialize (nano::stream & stream_a, nano::block_type type) const
{
write (stream_a, account);
write (stream_a, signature);
write (stream_a, sequence);
for (auto const & block : blocks)
{
if (block.which ())
{
assert (type == nano::block_type::not_a_block);
write (stream_a, boost::get<nano::block_hash> (block));
}
else
{
if (type == nano::block_type::not_a_block)
{
write (stream_a, boost::get<std::shared_ptr<nano::block>> (block)->hash ());
}
else
{
boost::get<std::shared_ptr<nano::block>> (block)->serialize (stream_a);
}
}
}
}
void nano::vote::serialize (nano::stream & stream_a) const
{
write (stream_a, account);
write (stream_a, signature);
write (stream_a, sequence);
for (auto const & block : blocks)
{
if (block.which ())
{
write (stream_a, nano::block_type::not_a_block);
write (stream_a, boost::get<nano::block_hash> (block));
}
else
{
nano::serialize_block (stream_a, *boost::get<std::shared_ptr<nano::block>> (block));
}
}
}
bool nano::vote::deserialize (nano::stream & stream_a, nano::block_uniquer * uniquer_a)
{
auto error (false);
try
{
nano::read (stream_a, account);
nano::read (stream_a, signature);
nano::read (stream_a, sequence);
nano::block_type type;
while (true)
{
if (nano::try_read (stream_a, type))
{
// Reached the end of the stream
break;
}
if (type == nano::block_type::not_a_block)
{
nano::block_hash block_hash;
nano::read (stream_a, block_hash);
blocks.push_back (block_hash);
}
else
{
std::shared_ptr<nano::block> block (nano::deserialize_block (stream_a, type, uniquer_a));
if (block == nullptr)
{
throw std::runtime_error ("Block is empty");
}
blocks.push_back (block);
}
}
}
catch (std::runtime_error const &)
{
error = true;
}
if (blocks.empty ())
{
error = true;
}
return error;
}
bool nano::vote::validate () const
{
return nano::validate_message (account, hash (), signature);
}
nano::block_hash nano::iterate_vote_blocks_as_hash::operator() (boost::variant<std::shared_ptr<nano::block>, nano::block_hash> const & item) const
{
nano::block_hash result;
if (item.which ())
{
result = boost::get<nano::block_hash> (item);
}
else
{
result = boost::get<std::shared_ptr<nano::block>> (item)->hash ();
}
return result;
}
boost::transform_iterator<nano::iterate_vote_blocks_as_hash, nano::vote_blocks_vec_iter> nano::vote::begin () const
{
return boost::transform_iterator<nano::iterate_vote_blocks_as_hash, nano::vote_blocks_vec_iter> (blocks.begin (), nano::iterate_vote_blocks_as_hash ());
}
boost::transform_iterator<nano::iterate_vote_blocks_as_hash, nano::vote_blocks_vec_iter> nano::vote::end () const
{
return boost::transform_iterator<nano::iterate_vote_blocks_as_hash, nano::vote_blocks_vec_iter> (blocks.end (), nano::iterate_vote_blocks_as_hash ());
}
nano::vote_uniquer::vote_uniquer (nano::block_uniquer & uniquer_a) :
uniquer (uniquer_a)
{
}
std::shared_ptr<nano::vote> nano::vote_uniquer::unique (std::shared_ptr<nano::vote> vote_a)
{
auto result (vote_a);
if (result != nullptr && !result->blocks.empty ())
{
if (!result->blocks.front ().which ())
{
result->blocks.front () = uniquer.unique (boost::get<std::shared_ptr<nano::block>> (result->blocks.front ()));
}
nano::uint256_union key (vote_a->full_hash ());
std::lock_guard<std::mutex> lock (mutex);
auto & existing (votes[key]);
if (auto block_l = existing.lock ())
{
result = block_l;
}
else
{
existing = vote_a;
}
release_assert (std::numeric_limits<CryptoPP::word32>::max () > votes.size ());
for (auto i (0); i < cleanup_count && !votes.empty (); ++i)
{
auto random_offset = nano::random_pool::generate_word32 (0, static_cast<CryptoPP::word32> (votes.size () - 1));
auto existing (std::next (votes.begin (), random_offset));
if (existing == votes.end ())
{
existing = votes.begin ();
}
if (existing != votes.end ())
{
if (auto block_l = existing->second.lock ())
{
// Still live
}
else
{
votes.erase (existing);
}
}
}
}
return result;
}
size_t nano::vote_uniquer::size ()
{
std::lock_guard<std::mutex> lock (mutex);
return votes.size ();
}
namespace nano
{
std::unique_ptr<seq_con_info_component> collect_seq_con_info (vote_uniquer & vote_uniquer, const std::string & name)
{
auto count = vote_uniquer.size ();
auto sizeof_element = sizeof (vote_uniquer::value_type);
auto composite = std::make_unique<seq_con_info_composite> (name);
composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "votes", count, sizeof_element }));
return composite;
}
}
nano::genesis::genesis ()
{
static nano::network_params network_params;
boost::property_tree::ptree tree;
std::stringstream istream (network_params.ledger.genesis_block);
boost::property_tree::read_json (istream, tree);
open = nano::deserialize_block_json (tree);
assert (open != nullptr);
}
nano::block_hash nano::genesis::hash () const
{
return open->hash ();
}
| 1 | 15,543 | @argakiig previous work was above live threshold, causing the high difficulty to bleed into other blocks when genesis open was processed in tests. | nanocurrency-nano-node | cpp |
@@ -1,4 +1,4 @@
-class RemoveDiscountPercentageAndDiscountTitleFromProducts < ActiveRecord::Migration
+class RemoveDiscountPercentageAndDiscountTitleFromProducts < ActiveRecord::Migration[4.2]
def change
remove_column :products, :discount_percentage, :integer
remove_column :products, :discount_title, :string | 1 | class RemoveDiscountPercentageAndDiscountTitleFromProducts < ActiveRecord::Migration
def change
remove_column :products, :discount_percentage, :integer
remove_column :products, :discount_title, :string
end
end
| 1 | 18,645 | Metrics/LineLength: Line is too long. [89/80] | thoughtbot-upcase | rb |
@@ -30,6 +30,7 @@ type SystemConfig struct {
Encryption RootEncryption `json:"Encryption"`
RemoveRpmDb bool `json:"RemoveRpmDb"`
ReadOnlyVerityRoot ReadOnlyVerityRoot `json:"ReadOnlyVerityRoot"`
+ HidepidDisable bool `json:"HidepidDisable"`
}
// GetRootPartitionSetting returns a pointer to the partition setting describing the disk which | 1 | // Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// Parser for the image builder's configuration schemas.
package configuration
import (
"encoding/json"
"fmt"
"strings"
"microsoft.com/pkggen/internal/logger"
)
// SystemConfig defines how each system present on the image is supposed to be configured.
type SystemConfig struct {
IsDefault bool `json:"IsDefault"`
BootType string `json:"BootType"`
Hostname string `json:"Hostname"`
Name string `json:"Name"`
PackageLists []string `json:"PackageLists"`
KernelOptions map[string]string `json:"KernelOptions"`
KernelCommandLine KernelCommandLine `json:"KernelCommandLine"`
AdditionalFiles map[string]string `json:"AdditionalFiles"`
PartitionSettings []PartitionSetting `json:"PartitionSettings"`
PostInstallScripts []PostInstallScript `json:"PostInstallScripts"`
Groups []Group `json:"Groups"`
Users []User `json:"Users"`
Encryption RootEncryption `json:"Encryption"`
RemoveRpmDb bool `json:"RemoveRpmDb"`
ReadOnlyVerityRoot ReadOnlyVerityRoot `json:"ReadOnlyVerityRoot"`
}
// GetRootPartitionSetting returns a pointer to the partition setting describing the disk which
// will be mounted at "/", or nil if no partition is found
func (s *SystemConfig) GetRootPartitionSetting() (rootPartitionSetting *PartitionSetting) {
for i, p := range s.PartitionSettings {
if p.MountPoint == "/" {
// We want to refernce the actual object in the slice
return &s.PartitionSettings[i]
}
}
return nil
}
// IsValid returns an error if the SystemConfig is not valid
func (s *SystemConfig) IsValid() (err error) {
// IsDefault must be validated by a parent struct
// Validate BootType
// Validate HostName
if strings.TrimSpace(s.Name) == "" {
return fmt.Errorf("missing [Name] field")
}
if len(s.PackageLists) == 0 {
return fmt.Errorf("system configuration must provide at least one package list inside the [PackageLists] field")
}
// Additional package list validation must be done via the imageconfigvalidator tool since there is no guranatee that
// the paths are valid at this point.
// Enforce that any non-rootfs configuration has a default kernel.
if len(s.PartitionSettings) != 0 {
// Ensure that default option is always present
if _, ok := s.KernelOptions["default"]; !ok {
return fmt.Errorf("system configuration must always provide default kernel inside the [KernelOptions] field; remember that kernels are FORBIDDEN from appearing in any of the [PackageLists]")
}
}
// A rootfs MAY include a kernel (ISO), so run the full checks even if this is a rootfs
if len(s.KernelOptions) != 0 {
// Ensure that non-comment options are not blank
for name, kernelName := range s.KernelOptions {
// Skip comments
if name[0] == '_' {
continue
}
if strings.TrimSpace(kernelName) == "" {
return fmt.Errorf("empty kernel entry found in the [KernelOptions] field (%s); remember that kernels are FORBIDDEN from appearing in any of the [PackageLists]", name)
}
}
}
// Validate the partitions this system config will be including
mountPointUsed := make(map[string]bool)
for _, partitionSetting := range s.PartitionSettings {
if err = partitionSetting.IsValid(); err != nil {
return fmt.Errorf("invalid [PartitionSettings]: %w", err)
}
if mountPointUsed[partitionSetting.MountPoint] {
return fmt.Errorf("invalid [PartitionSettings]: duplicate mount point found at '%s'", partitionSetting.MountPoint)
}
if partitionSetting.MountPoint != "" {
// Don't track unmounted partition duplication (They will all mount at "")
mountPointUsed[partitionSetting.MountPoint] = true
}
}
if s.ReadOnlyVerityRoot.Enable || s.Encryption.Enable {
if len(mountPointUsed) == 0 {
logger.Log.Warnf("[ReadOnlyVerityRoot] or [Encryption] is enabled, but no partitions are listed as part of System Config '%s'. This is only valid for ISO installers", s.Name)
} else {
if !mountPointUsed["/"] {
return fmt.Errorf("invalid [ReadOnlyVerityRoot] or [Encryption]: must have a partition mounted at '/'")
}
if s.ReadOnlyVerityRoot.Enable && s.Encryption.Enable {
return fmt.Errorf("invalid [ReadOnlyVerityRoot] and [Encryption]: verity root currently does not support root encryption")
}
if s.ReadOnlyVerityRoot.Enable && !mountPointUsed["/boot"] {
return fmt.Errorf("invalid [ReadOnlyVerityRoot]: must have a separate partition mounted at '/boot'")
}
}
}
if err = s.ReadOnlyVerityRoot.IsValid(); err != nil {
return fmt.Errorf("invalid [ReadOnlyVerityRoot]: %w", err)
}
if err = s.KernelCommandLine.IsValid(); err != nil {
return fmt.Errorf("invalid [KernelCommandLine]: %w", err)
}
//Validate PostInstallScripts
//Validate Groups
//Validate Users
for _, b := range s.Users {
if err = b.IsValid(); err != nil {
return fmt.Errorf("invalid [User]: %w", err)
}
}
//Validate Encryption
return
}
// UnmarshalJSON Unmarshals a Disk entry
func (s *SystemConfig) UnmarshalJSON(b []byte) (err error) {
// Use an intermediate type which will use the default JSON unmarshal implementation
type IntermediateTypeSystemConfig SystemConfig
err = json.Unmarshal(b, (*IntermediateTypeSystemConfig)(s))
if err != nil {
return fmt.Errorf("failed to parse [SystemConfig]: %w", err)
}
// Now validate the resulting unmarshaled object
err = s.IsValid()
if err != nil {
return fmt.Errorf("failed to parse [SystemConfig]: %w", err)
}
return
}
| 1 | 14,117 | Please also extend the config tests now to include the new field. | microsoft-CBL-Mariner | go |
@@ -53,6 +53,7 @@ namespace NLog.Layouts
public JsonLayout()
{
this.Attributes = new List<JsonAttribute>();
+ this.RenderEmptyLiteral = true;
}
/// <summary> | 1 | //
// Copyright (c) 2004-2016 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Layouts
{
using System;
using Config;
using LayoutRenderers.Wrappers;
using System.Collections.Generic;
using System.Text;
/// <summary>
/// A specialized layout that renders JSON-formatted events.
/// </summary>
[Layout("JsonLayout")]
[ThreadAgnostic]
[AppDomainFixedOutput]
public class JsonLayout : Layout
{
/// <summary>
/// Initializes a new instance of the <see cref="JsonLayout"/> class.
/// </summary>
public JsonLayout()
{
this.Attributes = new List<JsonAttribute>();
}
/// <summary>
/// Gets the array of attributes' configurations.
/// </summary>
/// <docgen category='CSV Options' order='10' />
[ArrayParameter(typeof(JsonAttribute), "attribute")]
public IList<JsonAttribute> Attributes { get; private set; }
/// <summary>
/// Gets or sets the option to suppress the extra spaces in the output json
/// </summary>
public bool SuppressSpaces { get; set; }
/// <summary>
/// Formats the log event as a JSON document for writing.
/// </summary>
/// <param name="logEvent">The log event to be formatted.</param>
/// <returns>A JSON string representation of the log event.</returns>
protected override string GetFormattedMessage(LogEventInfo logEvent)
{
var jsonWrapper = new JsonEncodeLayoutRendererWrapper();
var sb = new StringBuilder();
sb.Append("{");
AppendIf(!this.SuppressSpaces, sb, " ");
bool first = true;
//Memory profiling pointed out that using a foreach-loop was allocating
//an Enumerator. Switching to a for-loop avoids the memory allocation.
for (int i = 0; i < this.Attributes.Count; i++)
{
var col = this.Attributes[i];
jsonWrapper.Inner = col.Layout;
jsonWrapper.JsonEncode = col.Encode;
string text = jsonWrapper.Render(logEvent);
if (!string.IsNullOrEmpty(text))
{
if (!first)
{
sb.Append(",");
AppendIf(!this.SuppressSpaces, sb, " ");
}
first = false;
string format;
if(col.Encode)
{
format = "\"{0}\":{1}\"{2}\"";
}
else
{
//If encoding is disabled for current attribute, do not escape the value of the attribute.
//This enables user to write arbitrary string value (including JSON).
format = "\"{0}\":{1}{2}";
}
sb.AppendFormat(format, col.Name, !this.SuppressSpaces ? " " : "", text);
}
}
AppendIf(!this.SuppressSpaces, sb, " ");
sb.Append("}");
return sb.ToString();
}
private static void AppendIf<T>(bool condition, StringBuilder stringBuilder, T objectToAppend)
{
if (condition)
{
stringBuilder.Append(objectToAppend);
}
}
}
} | 1 | 13,591 | I think it's wise to keep the behavior backwards-compatible (so `false` as default) | NLog-NLog | .cs |
@@ -47,7 +47,7 @@ module Selenium
opts[:path] ||= '/'
opts[:secure] ||= false
-
+ opts[:sameSite]
obj = opts.delete(:expires)
opts[:expiry] = seconds_from(obj).to_i if obj
| 1 | # frozen_string_literal: true
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
class Manager
#
# @api private
#
def initialize(bridge)
@bridge = bridge
end
#
# Add a cookie to the browser
#
# @param [Hash] opts the options to create a cookie with.
# @option opts [String] :name A name
# @option opts [String] :value A value
# @option opts [String] :path ('/') A path
# @option opts [String] :secure (false) A boolean
# @option opts [Time,DateTime,Numeric,nil] :expires (nil) Expiry date, either as a Time, DateTime, or seconds since epoch.
#
# @raise [ArgumentError] if :name or :value is not specified
#
def add_cookie(opts = {})
raise ArgumentError, 'name is required' unless opts[:name]
raise ArgumentError, 'value is required' unless opts[:value]
opts[:path] ||= '/'
opts[:secure] ||= false
obj = opts.delete(:expires)
opts[:expiry] = seconds_from(obj).to_i if obj
@bridge.add_cookie opts
end
#
# Get the cookie with the given name
#
# @param [String] name the name of the cookie
# @return [Hash, nil] the cookie, or nil if it wasn't found.
#
def cookie_named(name)
convert_cookie(@bridge.cookie(name))
end
#
# Delete the cookie with the given name
#
# @param [String] name the name of the cookie to delete
#
def delete_cookie(name)
@bridge.delete_cookie name
end
#
# Delete all cookies
#
def delete_all_cookies
@bridge.delete_all_cookies
end
#
# Get all cookies
#
# @return [Array<Hash>] list of cookies
#
def all_cookies
@bridge.cookies.map { |cookie| convert_cookie(cookie) }
end
def timeouts
@timeouts ||= Timeouts.new(@bridge)
end
#
# @api beta This API may be changed or removed in a future release.
#
def logs
@logs ||= Logs.new(@bridge)
end
#
# Create a new top-level browsing context
# https://w3c.github.io/webdriver/#new-window
# @param type [Symbol] Supports two values: :tab and :window.
# Use :tab if you'd like the new window to share an OS-level window
# with the current browsing context.
# Use :window otherwise
# @return [String] The value of the window handle
#
def new_window(type = :tab)
case type
when :tab, :window
result = @bridge.new_window(type)
unless result.key?('handle')
raise UnknownError, "the driver did not return a handle. " \
"The returned result: #{result.inspect}"
end
result['handle']
else
raise ArgumentError, "invalid argument for type. Got: '#{type.inspect}'. " \
"Try :tab or :window"
end
end
#
# @api beta This API may be changed or removed in a future release.
#
def window
@window ||= Window.new(@bridge)
end
private
SECONDS_PER_DAY = 86_400.0
def datetime_at(int)
DateTime.civil(1970) + (int / SECONDS_PER_DAY)
end
def seconds_from(obj)
case obj
when Time
obj.to_f
when DateTime
(obj - DateTime.civil(1970)) * SECONDS_PER_DAY
when Numeric
obj
else
raise ArgumentError, "invalid value for expiration date: #{obj.inspect}"
end
end
def strip_port(str)
str.split(':', 2).first
end
def convert_cookie(cookie)
{
name: cookie['name'],
value: cookie['value'],
path: cookie['path'],
domain: cookie['domain'] && strip_port(cookie['domain']),
expires: cookie['expiry'] && datetime_at(cookie['expiry']),
secure: cookie['secure']
}
end
end # Options
end # WebDriver
end # Selenium
| 1 | 17,469 | We don't need this line, do we? | SeleniumHQ-selenium | rb |
@@ -0,0 +1,4 @@
+# Proprietary German backlinks service.
+# They keep requesting invalid formats.
+User-agent: SEOkicks-Robot
+Disallow: / | 1 | 1 | 17,372 | I don't know if we've actually had this problem on Upcase. I'd lean towards having a blank file unless (like on robots) we see multiple errors per week from this crawler. | thoughtbot-upcase | rb |
|
@@ -179,7 +179,13 @@ static void render_decorations(struct roots_view *view,
float matrix[16];
wlr_matrix_project_box(&matrix, &project_box, WL_OUTPUT_TRANSFORM_NORMAL,
view->rotation, &output->transform_matrix);
- float color[4] = { 0.2, 0.2, 0.2, 1 };
+ float color[4] = { 0.2, 0.2, 0.2, 1 };
+ if (view_get_activated(view)) {
+ color[0] = 0.09804;
+ color[1] = 0.09804;
+ color[2] = 0.43921;
+ }
+
wlr_render_colored_quad(desktop->server->renderer, &color, &matrix);
}
| 1 | #define _POSIX_C_SOURCE 200809L
#include <time.h>
#include <stdlib.h>
#include <stdbool.h>
#include <GLES2/gl2.h>
#include <wlr/types/wlr_output_layout.h>
#include <wlr/types/wlr_compositor.h>
#include <wlr/types/wlr_wl_shell.h>
#include <wlr/types/wlr_xdg_shell_v6.h>
#include <wlr/render/matrix.h>
#include <wlr/util/log.h>
#include "rootston/server.h"
#include "rootston/desktop.h"
#include "rootston/config.h"
/**
* Rotate a child's position relative to a parent. The parent size is (pw, ph),
* the child position is (*sx, *sy) and its size is (sw, sh).
*/
static void rotate_child_position(double *sx, double *sy, double sw, double sh,
double pw, double ph, float rotation) {
if (rotation != 0.0) {
// Coordinates relative to the center of the subsurface
double ox = *sx - pw/2 + sw/2,
oy = *sy - ph/2 + sh/2;
// Rotated coordinates
double rx = cos(-rotation)*ox - sin(-rotation)*oy,
ry = cos(-rotation)*oy + sin(-rotation)*ox;
*sx = rx + pw/2 - sw/2;
*sy = ry + ph/2 - sh/2;
}
}
static void render_surface(struct wlr_surface *surface,
struct roots_desktop *desktop, struct wlr_output *wlr_output,
struct timespec *when, double lx, double ly, float rotation) {
if (!wlr_surface_has_buffer(surface)) {
return;
}
int width = surface->current->width;
int height = surface->current->height;
int render_width = width * wlr_output->scale;
int render_height = height * wlr_output->scale;
double ox = lx, oy = ly;
wlr_output_layout_output_coords(desktop->layout, wlr_output, &ox, &oy);
ox *= wlr_output->scale;
oy *= wlr_output->scale;
struct wlr_box render_box = {
.x = lx, .y = ly,
.width = render_width, .height = render_height,
};
if (wlr_output_layout_intersects(desktop->layout, wlr_output, &render_box)) {
struct wlr_box project_box = {
.x = ox,
.y = oy,
.width = render_width,
.height = render_height,
};
float matrix[16];
wlr_matrix_project_box(&matrix, &project_box, surface->current->transform,
rotation, &wlr_output->transform_matrix);
wlr_render_with_matrix(desktop->server->renderer, surface->texture,
&matrix);
wlr_surface_send_frame_done(surface, when);
}
struct wlr_subsurface *subsurface;
wl_list_for_each(subsurface, &surface->subsurface_list, parent_link) {
struct wlr_surface_state *state = subsurface->surface->current;
double sx = state->subsurface_position.x;
double sy = state->subsurface_position.y;
double sw = state->buffer_width / state->scale;
double sh = state->buffer_height / state->scale;
rotate_child_position(&sx, &sy, sw, sh, width, height, rotation);
render_surface(subsurface->surface, desktop, wlr_output, when,
lx + sx,
ly + sy,
rotation);
}
}
static void render_xdg_v6_popups(struct wlr_xdg_surface_v6 *surface,
struct roots_desktop *desktop, struct wlr_output *wlr_output,
struct timespec *when, double base_x, double base_y, float rotation) {
double width = surface->surface->current->width;
double height = surface->surface->current->height;
struct wlr_xdg_surface_v6 *popup;
wl_list_for_each(popup, &surface->popups, popup_link) {
if (!popup->configured) {
continue;
}
double popup_width = popup->surface->current->width;
double popup_height = popup->surface->current->height;
double popup_sx, popup_sy;
wlr_xdg_surface_v6_popup_get_position(popup, &popup_sx, &popup_sy);
rotate_child_position(&popup_sx, &popup_sy, popup_width, popup_height,
width, height, rotation);
render_surface(popup->surface, desktop, wlr_output, when,
base_x + popup_sx, base_y + popup_sy, rotation);
render_xdg_v6_popups(popup, desktop, wlr_output, when,
base_x + popup_sx, base_y + popup_sy, rotation);
}
}
static void render_wl_shell_surface(struct wlr_wl_shell_surface *surface,
struct roots_desktop *desktop, struct wlr_output *wlr_output,
struct timespec *when, double lx, double ly, float rotation,
bool is_child) {
if (is_child || surface->state != WLR_WL_SHELL_SURFACE_STATE_POPUP) {
render_surface(surface->surface, desktop, wlr_output, when,
lx, ly, rotation);
double width = surface->surface->current->width;
double height = surface->surface->current->height;
struct wlr_wl_shell_surface *popup;
wl_list_for_each(popup, &surface->popups, popup_link) {
double popup_width = popup->surface->current->width;
double popup_height = popup->surface->current->height;
double popup_x = popup->transient_state->x;
double popup_y = popup->transient_state->y;
rotate_child_position(&popup_x, &popup_y, popup_width, popup_height,
width, height, rotation);
render_wl_shell_surface(popup, desktop, wlr_output, when,
lx + popup_x, ly + popup_y, rotation, true);
}
}
}
static void render_xwayland_children(struct wlr_xwayland_surface *surface,
struct roots_desktop *desktop, struct wlr_output *wlr_output,
struct timespec *when) {
struct wlr_xwayland_surface *child;
wl_list_for_each(child, &surface->children, parent_link) {
if (child->surface != NULL && child->added) {
render_surface(child->surface, desktop, wlr_output, when,
child->x, child->y, 0);
}
render_xwayland_children(child, desktop, wlr_output, when);
}
}
static void render_decorations(struct roots_view *view,
struct roots_desktop *desktop, struct wlr_output *output) {
if (!view->decorated) {
return;
}
struct wlr_box deco_box;
view_get_deco_box(view, &deco_box);
double sx = deco_box.x - view->x;
double sy = deco_box.y - view->y;
rotate_child_position(&sx, &sy, deco_box.width, deco_box.height,
view->wlr_surface->current->width,
view->wlr_surface->current->height, view->rotation);
double ox = sx + view->x;
double oy = sy + view->y;
wlr_output_layout_output_coords(desktop->layout, output, &ox, &oy);
ox *= output->scale;
oy *= output->scale;
struct wlr_box project_box = {
.x = ox,
.y = oy,
.width = deco_box.width,
.height = deco_box.height,
};
float matrix[16];
wlr_matrix_project_box(&matrix, &project_box, WL_OUTPUT_TRANSFORM_NORMAL,
view->rotation, &output->transform_matrix);
float color[4] = { 0.2, 0.2, 0.2, 1 };
wlr_render_colored_quad(desktop->server->renderer, &color, &matrix);
}
static void render_view(struct roots_view *view, struct roots_desktop *desktop,
struct wlr_output *wlr_output, struct timespec *when) {
render_decorations(view, desktop, wlr_output);
switch (view->type) {
case ROOTS_XDG_SHELL_V6_VIEW:
render_surface(view->wlr_surface, desktop, wlr_output, when,
view->x, view->y, view->rotation);
render_xdg_v6_popups(view->xdg_surface_v6, desktop, wlr_output,
when, view->x, view->y, view->rotation);
break;
case ROOTS_WL_SHELL_VIEW:
render_wl_shell_surface(view->wl_shell_surface, desktop, wlr_output,
when, view->x, view->y, view->rotation, false);
break;
case ROOTS_XWAYLAND_VIEW:
render_surface(view->wlr_surface, desktop, wlr_output, when,
view->x, view->y, view->rotation);
break;
}
}
static bool has_standalone_surface(struct roots_view *view) {
if (!wl_list_empty(&view->wlr_surface->subsurface_list)) {
return false;
}
switch (view->type) {
case ROOTS_XDG_SHELL_V6_VIEW:
return wl_list_empty(&view->xdg_surface_v6->popups);
case ROOTS_WL_SHELL_VIEW:
return wl_list_empty(&view->wl_shell_surface->popups);
case ROOTS_XWAYLAND_VIEW:
return wl_list_empty(&view->xwayland_surface->children);
}
return true;
}
static void output_frame_notify(struct wl_listener *listener, void *data) {
struct wlr_output *wlr_output = data;
struct roots_output *output = wl_container_of(listener, output, frame);
struct roots_desktop *desktop = output->desktop;
struct roots_server *server = desktop->server;
if (!wlr_output->enabled) {
return;
}
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
wlr_output_make_current(wlr_output);
wlr_renderer_begin(server->renderer, wlr_output);
if (output->fullscreen_view != NULL) {
struct roots_view *view = output->fullscreen_view;
// Make sure the view is centered on screen
const struct wlr_box *output_box =
wlr_output_layout_get_box(desktop->layout, wlr_output);
struct wlr_box view_box;
view_get_box(view, &view_box);
double view_x = (double)(output_box->width - view_box.width) / 2 +
output_box->x;
double view_y = (double)(output_box->height - view_box.height) / 2 +
output_box->y;
view_move(view, view_x, view_y);
if (has_standalone_surface(view)) {
wlr_output_set_fullscreen_surface(wlr_output, view->wlr_surface);
} else {
wlr_output_set_fullscreen_surface(wlr_output, NULL);
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
render_view(view, desktop, wlr_output, &now);
// During normal rendering the xwayland window tree isn't traversed
// because all windows are rendered. Here we only want to render
// the fullscreen window's children so we have to traverse the tree.
if (view->type == ROOTS_XWAYLAND_VIEW) {
render_xwayland_children(view->xwayland_surface, desktop,
wlr_output, &now);
}
}
wlr_renderer_end(server->renderer);
wlr_output_swap_buffers(wlr_output);
output->last_frame = desktop->last_frame = now;
return;
} else {
wlr_output_set_fullscreen_surface(wlr_output, NULL);
}
struct roots_view *view;
wl_list_for_each_reverse(view, &desktop->views, link) {
render_view(view, desktop, wlr_output, &now);
}
struct wlr_drag_icon *drag_icon = NULL;
struct roots_seat *seat = NULL;
wl_list_for_each(seat, &server->input->seats, link) {
wl_list_for_each(drag_icon, &seat->seat->drag_icons, link) {
if (!drag_icon->mapped) {
continue;
}
struct wlr_surface *icon = drag_icon->surface;
struct wlr_cursor *cursor = seat->cursor->cursor;
double icon_x = 0, icon_y = 0;
if (drag_icon->is_pointer) {
icon_x = cursor->x + drag_icon->sx;
icon_y = cursor->y + drag_icon->sy;
render_surface(icon, desktop, wlr_output, &now, icon_x, icon_y, 0);
} else {
struct wlr_touch_point *point =
wlr_seat_touch_get_point(seat->seat, drag_icon->touch_id);
if (point) {
icon_x = seat->touch_x + drag_icon->sx;
icon_y = seat->touch_y + drag_icon->sy;
render_surface(icon, desktop, wlr_output, &now, icon_x, icon_y, 0);
}
}
}
}
wlr_renderer_end(server->renderer);
wlr_output_swap_buffers(wlr_output);
output->last_frame = desktop->last_frame = now;
}
static void set_mode(struct wlr_output *output,
struct roots_output_config *oc) {
int mhz = (int)(oc->mode.refresh_rate * 1000);
if (wl_list_empty(&output->modes)) {
// Output has no mode, try setting a custom one
wlr_output_set_custom_mode(output, oc->mode.width, oc->mode.height, mhz);
return;
}
struct wlr_output_mode *mode, *best = NULL;
wl_list_for_each(mode, &output->modes, link) {
if (mode->width == oc->mode.width && mode->height == oc->mode.height) {
if (mode->refresh == mhz) {
best = mode;
break;
}
best = mode;
}
}
if (!best) {
wlr_log(L_ERROR, "Configured mode for %s not available", output->name);
} else {
wlr_log(L_DEBUG, "Assigning configured mode to %s", output->name);
wlr_output_set_mode(output, best);
}
}
void output_add_notify(struct wl_listener *listener, void *data) {
struct roots_desktop *desktop = wl_container_of(listener, desktop,
output_add);
struct wlr_output *wlr_output = data;
struct roots_input *input = desktop->server->input;
struct roots_config *config = desktop->config;
wlr_log(L_DEBUG, "Output '%s' added", wlr_output->name);
wlr_log(L_DEBUG, "'%s %s %s' %"PRId32"mm x %"PRId32"mm", wlr_output->make,
wlr_output->model, wlr_output->serial, wlr_output->phys_width,
wlr_output->phys_height);
if (wl_list_length(&wlr_output->modes) > 0) {
struct wlr_output_mode *mode =
wl_container_of((&wlr_output->modes)->prev, mode, link);
wlr_output_set_mode(wlr_output, mode);
}
struct roots_output *output = calloc(1, sizeof(struct roots_output));
clock_gettime(CLOCK_MONOTONIC, &output->last_frame);
output->desktop = desktop;
output->wlr_output = wlr_output;
output->frame.notify = output_frame_notify;
wl_signal_add(&wlr_output->events.frame, &output->frame);
wl_list_insert(&desktop->outputs, &output->link);
struct roots_output_config *output_config =
roots_config_get_output(config, wlr_output);
if (output_config) {
if (output_config->enable) {
if (output_config->mode.width) {
set_mode(wlr_output, output_config);
}
wlr_output_set_scale(wlr_output, output_config->scale);
wlr_output_set_transform(wlr_output, output_config->transform);
wlr_output_layout_add(desktop->layout, wlr_output, output_config->x,
output_config->y);
} else {
wlr_output_enable(wlr_output, false);
}
} else {
wlr_output_layout_add_auto(desktop->layout, wlr_output);
}
struct roots_seat *seat;
wl_list_for_each(seat, &input->seats, link) {
roots_seat_configure_cursor(seat);
roots_seat_configure_xcursor(seat);
}
}
void output_remove_notify(struct wl_listener *listener, void *data) {
struct wlr_output *wlr_output = data;
struct roots_desktop *desktop =
wl_container_of(listener, desktop, output_remove);
struct roots_output *output = NULL, *_output;
wl_list_for_each(_output, &desktop->outputs, link) {
if (_output->wlr_output == wlr_output) {
output = _output;
break;
}
}
if (!output) {
return; // We are unfamiliar with this output
}
wlr_output_layout_remove(desktop->layout, output->wlr_output);
// TODO: cursor
//example_config_configure_cursor(sample->config, sample->cursor,
// sample->compositor);
wl_list_remove(&output->link);
wl_list_remove(&output->frame.link);
free(output);
}
| 1 | 9,716 | I'm not good at picking colors, if anyone else has a preference. | swaywm-wlroots | c |
@@ -50,6 +50,10 @@ class FoldersScreenUtils {
this.refreshFolders();
}, 1000);
}
+
+ static cancelTimers() {
+ if (this.scheduleRefreshFoldersIID_) clearTimeout(this.scheduleRefreshFoldersIID_);
+ }
}
module.exports = { FoldersScreenUtils }; | 1 | const Folder = require('lib/models/Folder.js');
const Setting = require('lib/models/Setting.js');
class FoldersScreenUtils {
static async allForDisplay(options = {}) {
const orderDir = Setting.value('folders.sortOrder.reverse') ? 'DESC' : 'ASC';
const folderOptions = Object.assign(
{},
{
caseInsensitive: true,
order: [
{
by: 'title',
dir: orderDir,
},
],
},
options
);
let folders = await Folder.all(folderOptions);
if (Setting.value('folders.sortOrder.field') === 'last_note_user_updated_time') {
folders = await Folder.orderByLastModified(folders, orderDir);
}
if (Setting.value('showNoteCounts')) {
await Folder.addNoteCounts(folders,
Setting.value('showCompletedTodos'));
}
return folders;
}
static async refreshFolders() {
const folders = await this.allForDisplay({ includeConflictFolder: true });
this.dispatch({
type: 'FOLDER_UPDATE_ALL',
items: folders,
});
}
static scheduleRefreshFolders() {
if (this.scheduleRefreshFoldersIID_) clearTimeout(this.scheduleRefreshFoldersIID_);
this.scheduleRefreshFoldersIID_ = setTimeout(() => {
this.scheduleRefreshFoldersIID_ = null;
this.refreshFolders();
}, 1000);
}
}
module.exports = { FoldersScreenUtils };
| 1 | 11,762 | Note that even if you cancel the timer, the refreshFolders function might still be running since it's async. Could that be a problem for the test units? One big issue I had with tests is they sometimes would work and sometimes fail randomly, and that's because there are still code running in the background. One example was the ItemChange class, which saves data asynchronously. So I've implemented a waitForAllSaved(), which can be used for test units, to make sure nothing is running once the test ends. So just wondering if we might end up with this issue here. | laurent22-joplin | js |
@@ -0,0 +1,16 @@
+'use strict';
+
+const tsd = require('tsd').default;
+const { expect } = require('chai');
+
+describe('Exported Types', () => {
+ it('should be as expected', async () => {
+ const diagnostics = await tsd();
+ if (diagnostics.length !== 0) {
+ const messages = diagnostics
+ .map(d => `${d.fileName}:${d.line}:${d.column} - [${d.severity}]: ${d.message}`)
+ .join('\n');
+ expect.fail('\n' + messages);
+ }
+ });
+}); | 1 | 1 | 19,698 | can we make this something more descriptive? like... types should compile? I'm still not sure exactly what this is testing... this says "Exported types" - but what does tsd actually look at? | mongodb-node-mongodb-native | js |
|
@@ -48,17 +48,12 @@ public class BridgeWebChromeClient extends WebChromeClient {
callback.onCustomViewHidden();
super.onShowCustomView(view, callback);
}
-
- @Override
- public void onHideCustomView() {
- super.onHideCustomView();
- }
@Override
public void onPermissionRequest(final PermissionRequest request) {
boolean isRequestPermissionRequired = android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.M;
- List<String> permissionList = new ArrayList<String>();
+ List<String> permissionList = new ArrayList<>();
if (Arrays.asList(request.getResources()).contains("android.webkit.resource.VIDEO_CAPTURE")) {
permissionList.add(Manifest.permission.CAMERA);
} | 1 | package com.getcapacitor;
import android.Manifest;
import android.app.Activity;
import android.content.ActivityNotFoundException;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.net.Uri;
import android.provider.MediaStore;
import android.webkit.ConsoleMessage;
import android.webkit.GeolocationPermissions;
import android.webkit.JsPromptResult;
import android.webkit.JsResult;
import android.webkit.MimeTypeMap;
import android.webkit.PermissionRequest;
import android.webkit.ValueCallback;
import android.webkit.WebChromeClient;
import android.webkit.WebView;
import android.view.View;
import com.getcapacitor.plugin.camera.CameraUtils;
import org.apache.cordova.CordovaPlugin;
import org.json.JSONException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* Custom WebChromeClient handler, required for showing dialogs, confirms, etc. in our
* WebView instance.
*/
public class BridgeWebChromeClient extends WebChromeClient {
private Bridge bridge;
static final int FILE_CHOOSER = PluginRequestCodes.FILE_CHOOSER;
static final int FILE_CHOOSER_IMAGE_CAPTURE = PluginRequestCodes.FILE_CHOOSER_IMAGE_CAPTURE;
static final int FILE_CHOOSER_VIDEO_CAPTURE = PluginRequestCodes.FILE_CHOOSER_VIDEO_CAPTURE;
static final int FILE_CHOOSER_CAMERA_PERMISSION = PluginRequestCodes.FILE_CHOOSER_CAMERA_PERMISSION;
static final int GET_USER_MEDIA_PERMISSIONS = PluginRequestCodes.GET_USER_MEDIA_PERMISSIONS;
public BridgeWebChromeClient(Bridge bridge) {
this.bridge = bridge;
}
@Override
public void onShowCustomView(View view, CustomViewCallback callback) {
callback.onCustomViewHidden();
super.onShowCustomView(view, callback);
}
@Override
public void onHideCustomView() {
super.onHideCustomView();
}
@Override
public void onPermissionRequest(final PermissionRequest request) {
boolean isRequestPermissionRequired = android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.M;
List<String> permissionList = new ArrayList<String>();
if (Arrays.asList(request.getResources()).contains("android.webkit.resource.VIDEO_CAPTURE")) {
permissionList.add(Manifest.permission.CAMERA);
}
if (Arrays.asList(request.getResources()).contains("android.webkit.resource.AUDIO_CAPTURE")) {
permissionList.add(Manifest.permission.MODIFY_AUDIO_SETTINGS);
permissionList.add(Manifest.permission.RECORD_AUDIO);
}
if (!permissionList.isEmpty() && isRequestPermissionRequired) {
String [] permissions = permissionList.toArray(new String[0]);;
bridge.cordovaInterface.requestPermissions(new CordovaPlugin(){
@Override
public void onRequestPermissionResult(int requestCode, String[] permissions, int[] grantResults) throws JSONException {
if (GET_USER_MEDIA_PERMISSIONS == requestCode) {
for (int r : grantResults) {
if (r == PackageManager.PERMISSION_DENIED) {
request.deny();
return;
}
}
request.grant(request.getResources());
}
}
}, GET_USER_MEDIA_PERMISSIONS, permissions);
} else {
request.grant(request.getResources());
}
}
/**
* Show the browser alert modal
* @param view
* @param url
* @param message
* @param result
* @return
*/
@Override
public boolean onJsAlert(WebView view, String url, String message, final JsResult result) {
if (bridge.getActivity().isFinishing()) {
return true;
}
Dialogs.alert(view.getContext(), message, new Dialogs.OnResultListener() {
@Override
public void onResult(boolean value, boolean didCancel, String inputValue) {
if(value) {
result.confirm();
} else {
result.cancel();
}
}
});
return true;
}
/**
* Show the browser confirm modal
* @param view
* @param url
* @param message
* @param result
* @return
*/
@Override
public boolean onJsConfirm(WebView view, String url, String message, final JsResult result) {
if (bridge.getActivity().isFinishing()) {
return true;
}
Dialogs.confirm(view.getContext(), message, new Dialogs.OnResultListener() {
@Override
public void onResult(boolean value, boolean didCancel, String inputValue) {
if(value) {
result.confirm();
} else {
result.cancel();
}
}
});
return true;
}
/**
* Show the browser prompt modal
* @param view
* @param url
* @param message
* @param defaultValue
* @param result
* @return
*/
@Override
public boolean onJsPrompt(WebView view, String url, String message, String defaultValue, final JsPromptResult result) {
if (bridge.getActivity().isFinishing()) {
return true;
}
Dialogs.prompt(view.getContext(), message, new Dialogs.OnResultListener() {
@Override
public void onResult(boolean value, boolean didCancel, String inputValue) {
if(value) {
result.confirm(inputValue);
} else {
result.cancel();
}
}
});
return true;
}
/**
* Handle the browser geolocation prompt
* @param origin
* @param callback
*/
@Override
public void onGeolocationPermissionsShowPrompt(String origin, GeolocationPermissions.Callback callback) {
super.onGeolocationPermissionsShowPrompt(origin, callback);
Logger.debug("onGeolocationPermissionsShowPrompt: DOING IT HERE FOR ORIGIN: " + origin);
// Set that we want geolocation perms for this origin
callback.invoke(origin, true, false);
Plugin geo = bridge.getPlugin("Geolocation").getInstance();
if (!geo.hasRequiredPermissions()) {
geo.pluginRequestAllPermissions();
} else {
Logger.debug("onGeolocationPermissionsShowPrompt: has required permis");
}
}
@Override
public boolean onShowFileChooser(WebView webView, final ValueCallback<Uri[]> filePathCallback, final FileChooserParams fileChooserParams) {
List<String> acceptTypes = Arrays.asList(fileChooserParams.getAcceptTypes());
boolean captureEnabled = fileChooserParams.isCaptureEnabled();
boolean capturePhoto = captureEnabled && acceptTypes.contains("image/*");
final boolean captureVideo = captureEnabled && acceptTypes.contains("video/*");
if ((capturePhoto || captureVideo)) {
if(isMediaCaptureSupported()) {
showMediaCaptureOrFilePicker(filePathCallback, fileChooserParams, captureVideo);
} else {
this.bridge.cordovaInterface.requestPermission(new CordovaPlugin(){
@Override
public void onRequestPermissionResult(int requestCode, String[] permissions, int[] grantResults) throws JSONException {
if (FILE_CHOOSER_CAMERA_PERMISSION == requestCode) {
if (grantResults[0] == PackageManager.PERMISSION_GRANTED) {
showMediaCaptureOrFilePicker(filePathCallback, fileChooserParams, captureVideo);
} else {
Logger.warn(Logger.tags("FileChooser"), "Camera permission not granted");
filePathCallback.onReceiveValue(null);
}
}
}
}, FILE_CHOOSER_CAMERA_PERMISSION, Manifest.permission.CAMERA);
}
} else {
showFilePicker(filePathCallback, fileChooserParams);
}
return true;
}
private boolean isMediaCaptureSupported() {
Plugin camera = bridge.getPlugin("Camera").getInstance();
boolean isSupported = camera.hasPermission(Manifest.permission.CAMERA) || !camera.hasDefinedPermission(Manifest.permission.CAMERA);
return isSupported;
}
private void showMediaCaptureOrFilePicker(ValueCallback<Uri[]> filePathCallback, FileChooserParams fileChooserParams, boolean isVideo) {
// TODO: add support for video capture on Android M and older
// On Android M and lower the VIDEO_CAPTURE_INTENT (e.g.: intent.getData())
// returns a file:// URI instead of the expected content:// URI.
// So we disable it for now because it requires a bit more work
boolean isVideoCaptureSupported = android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.N;
boolean shown = false;
if (isVideo && isVideoCaptureSupported) {
shown = showVideoCapturePicker(filePathCallback);
} else {
shown = showImageCapturePicker(filePathCallback);
}
if (!shown) {
Logger.warn(Logger.tags("FileChooser"), "Media capture intent could not be launched. Falling back to default file picker.");
showFilePicker(filePathCallback, fileChooserParams);
}
}
private boolean showImageCapturePicker(final ValueCallback<Uri[]> filePathCallback) {
Intent takePictureIntent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE);
if (takePictureIntent.resolveActivity(bridge.getActivity().getPackageManager()) == null) {
return false;
}
final Uri imageFileUri;
try {
imageFileUri = CameraUtils.createImageFileUri(bridge.getActivity(), bridge.getContext().getPackageName());
} catch (Exception ex) {
Logger.error("Unable to create temporary media capture file: " + ex.getMessage());
return false;
}
takePictureIntent.putExtra(MediaStore.EXTRA_OUTPUT, imageFileUri);
bridge.cordovaInterface.startActivityForResult(new CordovaPlugin() {
@Override
public void onActivityResult(int requestCode, int resultCode, Intent intent) {
Uri[] result = null;
if (resultCode == Activity.RESULT_OK) {
result = new Uri[]{imageFileUri};
}
filePathCallback.onReceiveValue(result);
}
}, takePictureIntent, FILE_CHOOSER_IMAGE_CAPTURE);
return true;
}
private boolean showVideoCapturePicker(final ValueCallback<Uri[]> filePathCallback) {
Intent takeVideoIntent = new Intent(MediaStore.ACTION_VIDEO_CAPTURE);
if (takeVideoIntent.resolveActivity(bridge.getActivity().getPackageManager()) == null) {
return false;
}
bridge.cordovaInterface.startActivityForResult(new CordovaPlugin() {
@Override
public void onActivityResult(int requestCode, int resultCode, Intent intent) {
Uri[] result = null;
if (resultCode == Activity.RESULT_OK) {
result = new Uri[]{intent.getData()};
}
filePathCallback.onReceiveValue(result);
}
}, takeVideoIntent, FILE_CHOOSER_VIDEO_CAPTURE);
return true;
}
private void showFilePicker(final ValueCallback<Uri[]> filePathCallback, FileChooserParams fileChooserParams) {
Intent intent = fileChooserParams.createIntent();
if (fileChooserParams.getMode() == FileChooserParams.MODE_OPEN_MULTIPLE) {
intent.putExtra(Intent.EXTRA_ALLOW_MULTIPLE, true);
}
if (fileChooserParams.getAcceptTypes().length > 1) {
String[] validTypes = getValidTypes(fileChooserParams.getAcceptTypes());
intent.putExtra(Intent.EXTRA_MIME_TYPES, validTypes);
}
try {
bridge.cordovaInterface.startActivityForResult(new CordovaPlugin() {
@Override
public void onActivityResult(int requestCode, int resultCode, Intent intent) {
Uri[] result;
if (resultCode == Activity.RESULT_OK && intent.getClipData() != null && intent.getClipData().getItemCount() > 1) {
final int numFiles = intent.getClipData().getItemCount();
result = new Uri[numFiles];
for (int i = 0; i < numFiles; i++) {
result[i] = intent.getClipData().getItemAt(i).getUri();
}
} else {
result = WebChromeClient.FileChooserParams.parseResult(resultCode, intent);
}
filePathCallback.onReceiveValue(result);
}
}, intent, FILE_CHOOSER);
} catch (ActivityNotFoundException e) {
filePathCallback.onReceiveValue(null);
}
}
private String[] getValidTypes(String[] currentTypes) {
List<String> validTypes = new ArrayList<>();
MimeTypeMap mtm = MimeTypeMap.getSingleton();
for (String mime : currentTypes) {
if (mime.startsWith(".")) {
String extension = mime.substring(1);
String extensionMime = mtm.getMimeTypeFromExtension(extension);
if (extensionMime != null && !validTypes.contains(extensionMime)) {
validTypes.add(extensionMime);
}
} else if (!validTypes.contains(mime)) {
validTypes.add(mime);
}
}
Object[] validObj = validTypes.toArray();
return Arrays.copyOf(validObj, validObj.length, String[].class);
}
@Override
public boolean onConsoleMessage(ConsoleMessage consoleMessage) {
String tag = Logger.tags("Console");
if (consoleMessage.message() != null && isValidMsg(consoleMessage.message())) {
String msg = String.format("File: %s - Line %d - Msg: %s" , consoleMessage.sourceId() , consoleMessage.lineNumber(), consoleMessage.message());
String level = consoleMessage.messageLevel().name();
if ("ERROR".equalsIgnoreCase(level)) {
Logger.error(tag, msg, null);
} else if ("WARNING".equalsIgnoreCase(level)) {
Logger.warn(tag, msg);
} else if ("TIP".equalsIgnoreCase(level)) {
Logger.debug(tag, msg);
} else {
Logger.info(tag, msg);
}
}
return true;
}
public boolean isValidMsg(String msg) {
return !(msg.contains("%cresult %c") || (msg.contains("%cnative %c")) || msg.equalsIgnoreCase("[object Object]") || msg.equalsIgnoreCase("console.groupEnd"));
}
}
| 1 | 8,775 | this method is need for full screen video button to appear in players | ionic-team-capacitor | js |
@@ -26,6 +26,7 @@ type HeadersCfg struct {
penalize func(context.Context, []headerdownload.PenaltyItem)
wakeUpChan chan struct{}
batchSize datasize.ByteSize
+ increment *uint64
}
func StageHeadersCfg( | 1 | package stagedsync
import (
"context"
"fmt"
"runtime"
"time"
"github.com/c2h5oh/datasize"
"github.com/ledgerwatch/turbo-geth/common"
"github.com/ledgerwatch/turbo-geth/core/rawdb"
"github.com/ledgerwatch/turbo-geth/core/types"
"github.com/ledgerwatch/turbo-geth/eth/stagedsync/stages"
"github.com/ledgerwatch/turbo-geth/ethdb"
"github.com/ledgerwatch/turbo-geth/log"
"github.com/ledgerwatch/turbo-geth/params"
"github.com/ledgerwatch/turbo-geth/turbo/stages/headerdownload"
)
type HeadersCfg struct {
db ethdb.RwKV
hd *headerdownload.HeaderDownload
chainConfig params.ChainConfig
headerReqSend func(context.Context, *headerdownload.HeaderRequest) []byte
announceNewHashes func(context.Context, []headerdownload.Announce)
penalize func(context.Context, []headerdownload.PenaltyItem)
wakeUpChan chan struct{}
batchSize datasize.ByteSize
}
func StageHeadersCfg(
db ethdb.RwKV,
headerDownload *headerdownload.HeaderDownload,
chainConfig params.ChainConfig,
headerReqSend func(context.Context, *headerdownload.HeaderRequest) []byte,
announceNewHashes func(context.Context, []headerdownload.Announce),
penalize func(context.Context, []headerdownload.PenaltyItem),
wakeUpChan chan struct{},
batchSize datasize.ByteSize,
) HeadersCfg {
return HeadersCfg{
db: db,
hd: headerDownload,
chainConfig: chainConfig,
headerReqSend: headerReqSend,
announceNewHashes: announceNewHashes,
penalize: penalize,
wakeUpChan: wakeUpChan,
batchSize: batchSize,
}
}
// HeadersForward progresses Headers stage in the forward direction
func HeadersForward(
s *StageState,
u Unwinder,
ctx context.Context,
tx ethdb.RwTx,
cfg HeadersCfg,
initialCycle bool,
) error {
var headerProgress uint64
var err error
useExternalTx := tx != nil
if !useExternalTx {
tx, err = cfg.db.BeginRw(context.Background())
if err != nil {
return err
}
defer tx.Rollback()
}
headerProgress, err = stages.GetStageProgress(tx, stages.Headers)
if err != nil {
return err
}
logPrefix := s.LogPrefix()
// Check if this is called straight after the unwinds, which means we need to create new canonical markings
hash, err := rawdb.ReadCanonicalHash(tx, headerProgress)
if err != nil {
return err
}
if hash == (common.Hash{}) {
headHash := rawdb.ReadHeadHeaderHash(tx)
if err = fixCanonicalChain(logPrefix, headerProgress, headHash, tx); err != nil {
return err
}
if !useExternalTx {
if err = tx.Commit(); err != nil {
return err
}
}
s.Done()
return nil
}
log.Info(fmt.Sprintf("[%s] Processing headers...", logPrefix), "from", headerProgress)
batch := ethdb.NewBatch(tx)
defer batch.Rollback()
logEvery := time.NewTicker(logInterval)
defer logEvery.Stop()
localTd, err := rawdb.ReadTd(tx, hash, headerProgress)
if err != nil {
return err
}
headerInserter := headerdownload.NewHeaderInserter(logPrefix, localTd, headerProgress)
cfg.hd.SetHeaderReader(&chainReader{config: &cfg.chainConfig, batch: batch})
var peer []byte
stopped := false
timer := time.NewTimer(1 * time.Second) // Check periodically even in the absence of incoming messages
prevProgress := headerProgress
for !stopped {
currentTime := uint64(time.Now().Unix())
req, penalties := cfg.hd.RequestMoreHeaders(currentTime)
if req != nil {
peer = cfg.headerReqSend(ctx, req)
if peer != nil {
cfg.hd.SentRequest(req, currentTime, 5 /* timeout */)
log.Debug("Sent request", "height", req.Number)
}
}
cfg.penalize(ctx, penalties)
maxRequests := 64 // Limit number of requests sent per round to let some headers to be inserted into the database
for req != nil && peer != nil && maxRequests > 0 {
req, penalties = cfg.hd.RequestMoreHeaders(currentTime)
if req != nil {
peer = cfg.headerReqSend(ctx, req)
if peer != nil {
cfg.hd.SentRequest(req, currentTime, 5 /*timeout */)
log.Debug("Sent request", "height", req.Number)
}
}
cfg.penalize(ctx, penalties)
maxRequests--
}
// Send skeleton request if required
req = cfg.hd.RequestSkeleton()
if req != nil {
peer = cfg.headerReqSend(ctx, req)
if peer != nil {
log.Debug("Sent skeleton request", "height", req.Number)
}
}
// Load headers into the database
if err = cfg.hd.InsertHeaders(headerInserter.FeedHeaderFunc(batch)); err != nil {
return err
}
if batch.BatchSize() >= int(cfg.batchSize) {
if err = batch.Commit(); err != nil {
return err
}
if !useExternalTx {
if err = s.Update(tx, headerInserter.GetHighest()); err != nil {
return err
}
if err = tx.Commit(); err != nil {
return err
}
tx, err = cfg.db.BeginRw(ctx)
if err != nil {
return err
}
}
batch = ethdb.NewBatch(tx)
cfg.hd.SetHeaderReader(&chainReader{config: &cfg.chainConfig, batch: batch})
}
timer.Stop()
announces := cfg.hd.GrabAnnounces()
if len(announces) > 0 {
cfg.announceNewHashes(ctx, announces)
}
if !initialCycle && headerInserter.AnythingDone() {
// if this is not an initial cycle, we need to react quickly when new headers are coming in
break
}
timer = time.NewTimer(1 * time.Second)
select {
case <-ctx.Done():
stopped = true
case <-logEvery.C:
progress := cfg.hd.Progress()
logProgressHeaders(logPrefix, prevProgress, progress, batch)
prevProgress = progress
case <-timer.C:
log.Debug("RequestQueueTime (header) ticked")
case <-cfg.wakeUpChan:
log.Debug("headerLoop woken up by the incoming request")
}
if initialCycle && cfg.hd.InSync() {
log.Debug("Top seen", "height", cfg.hd.TopSeenHeight())
break
}
}
if headerInserter.AnythingDone() {
if err := s.Update(batch, headerInserter.GetHighest()); err != nil {
return err
}
}
if headerInserter.UnwindPoint() < headerProgress {
if err := u.UnwindTo(headerInserter.UnwindPoint(), batch, batch); err != nil {
return fmt.Errorf("%s: failed to unwind to %d: %w", logPrefix, headerInserter.UnwindPoint(), err)
}
} else {
if err := fixCanonicalChain(logPrefix, headerInserter.GetHighest(), headerInserter.GetHighestHash(), batch); err != nil {
return fmt.Errorf("%s: failed to fix canonical chain: %w", logPrefix, err)
}
if !stopped {
s.Done()
}
}
if err := batch.Commit(); err != nil {
return fmt.Errorf("%s: failed to write batch commit: %v", logPrefix, err)
}
if !useExternalTx {
if err := tx.Commit(); err != nil {
return err
}
}
log.Info("Processed", "highest", headerInserter.GetHighest())
if stopped {
return fmt.Errorf("interrupted")
}
stageHeadersGauge.Update(int64(headerInserter.GetHighest()))
return nil
}
func fixCanonicalChain(logPrefix string, height uint64, hash common.Hash, tx ethdb.StatelessRwTx) error {
if height == 0 {
return nil
}
ancestorHash := hash
ancestorHeight := height
var ch common.Hash
var err error
for ch, err = rawdb.ReadCanonicalHash(tx, ancestorHeight); err == nil && ch != ancestorHash; ch, err = rawdb.ReadCanonicalHash(tx, ancestorHeight) {
if err = rawdb.WriteCanonicalHash(tx, ancestorHash, ancestorHeight); err != nil {
return fmt.Errorf("[%s] marking canonical header %d %x: %w", logPrefix, ancestorHeight, ancestorHash, err)
}
ancestor := rawdb.ReadHeader(tx, ancestorHash, ancestorHeight)
if ancestor == nil {
log.Error("ancestor nil", "height", ancestorHeight, "hash", ancestorHash)
}
ancestorHash = ancestor.ParentHash
ancestorHeight--
}
if err != nil {
return fmt.Errorf("[%s] reading canonical hash for %d: %w", logPrefix, ancestorHeight, err)
}
return nil
}
func HeadersUnwind(u *UnwindState, s *StageState, tx ethdb.RwTx, cfg HeadersCfg) error {
var err error
useExternalTx := tx != nil
if !useExternalTx {
tx, err = cfg.db.BeginRw(context.Background())
if err != nil {
return err
}
defer tx.Rollback()
}
// Delete canonical hashes that are being unwound
var headerProgress uint64
headerProgress, err = stages.GetStageProgress(tx, stages.Headers)
if err != nil {
return err
}
for blockHeight := headerProgress; blockHeight > u.UnwindPoint; blockHeight-- {
if err = rawdb.DeleteCanonicalHash(tx, blockHeight); err != nil {
return err
}
}
if err = u.Skip(tx); err != nil {
return err
}
if !useExternalTx {
if err := tx.Commit(); err != nil {
return err
}
}
return nil
}
func logProgressHeaders(logPrefix string, prev, now uint64, batch ethdb.DbWithPendingMutations) uint64 {
speed := float64(now-prev) / float64(logInterval/time.Second)
var m runtime.MemStats
runtime.ReadMemStats(&m)
log.Info(fmt.Sprintf("[%s] Wrote block headers", logPrefix),
"number", now,
"blk/second", speed,
"batch", common.StorageSize(batch.BatchSize()),
"alloc", common.StorageSize(m.Alloc),
"sys", common.StorageSize(m.Sys),
"numGC", int(m.NumGC))
return now
}
type chainReader struct {
config *params.ChainConfig
batch ethdb.DbWithPendingMutations
}
func (cr chainReader) Config() *params.ChainConfig { return cr.config }
func (cr chainReader) CurrentHeader() *types.Header { panic("") }
func (cr chainReader) GetHeader(hash common.Hash, number uint64) *types.Header {
return rawdb.ReadHeader(cr.batch, hash, number)
}
func (cr chainReader) GetHeaderByNumber(number uint64) *types.Header {
return rawdb.ReadHeaderByNumber(cr.batch, number)
}
func (cr chainReader) GetHeaderByHash(hash common.Hash) *types.Header { panic("") }
| 1 | 22,178 | Why is this a pointer? | ledgerwatch-erigon | go |
@@ -30,12 +30,16 @@ import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.UnboundPredicate;
import org.apache.iceberg.relocated.com.google.common.base.Objects;
+import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.util.UnicodeUtil;
abstract class Truncate<T> implements Transform<T, T> {
@SuppressWarnings("unchecked")
static <T> Truncate<T> get(Type type, int width) {
+ Preconditions.checkArgument(width > 0,
+ "The width of truncate must larger than zero,but is %s", width);
+
switch (type.typeId()) {
case INTEGER:
return (Truncate<T>) new TruncateInteger(width); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.transforms;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import org.apache.iceberg.expressions.BoundLiteralPredicate;
import org.apache.iceberg.expressions.BoundPredicate;
import org.apache.iceberg.expressions.BoundTransform;
import org.apache.iceberg.expressions.BoundUnaryPredicate;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.expressions.UnboundPredicate;
import org.apache.iceberg.relocated.com.google.common.base.Objects;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.util.UnicodeUtil;
abstract class Truncate<T> implements Transform<T, T> {
@SuppressWarnings("unchecked")
static <T> Truncate<T> get(Type type, int width) {
switch (type.typeId()) {
case INTEGER:
return (Truncate<T>) new TruncateInteger(width);
case LONG:
return (Truncate<T>) new TruncateLong(width);
case DECIMAL:
return (Truncate<T>) new TruncateDecimal(width);
case STRING:
return (Truncate<T>) new TruncateString(width);
case BINARY:
return (Truncate<T>) new TruncateByteBuffer(width);
default:
throw new UnsupportedOperationException(
"Cannot truncate type: " + type);
}
}
public abstract Integer width();
@Override
public abstract T apply(T value);
@Override
public Type getResultType(Type sourceType) {
return sourceType;
}
private static class TruncateInteger extends Truncate<Integer> {
private final int width;
private TruncateInteger(int width) {
this.width = width;
}
@Override
public Integer width() {
return width;
}
@Override
public Integer apply(Integer value) {
if (value == null) {
return null;
}
return value - (((value % width) + width) % width);
}
@Override
public boolean canTransform(Type type) {
return type.typeId() == Type.TypeID.INTEGER;
}
@Override
public UnboundPredicate<Integer> project(String name, BoundPredicate<Integer> pred) {
if (pred.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, pred);
}
if (pred.isUnaryPredicate()) {
return Expressions.predicate(pred.op(), name);
} else if (pred.isLiteralPredicate()) {
return ProjectionUtil.truncateInteger(name, pred.asLiteralPredicate(), this);
} else if (pred.isSetPredicate() && pred.op() == Expression.Operation.IN) {
return ProjectionUtil.transformSet(name, pred.asSetPredicate(), this);
}
return null;
}
@Override
public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<Integer> pred) {
if (pred.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, pred);
}
// TODO: for integers, can this return the original predicate?
// No. the predicate needs to be in terms of the applied value. For all x, apply(x) <= x.
// Therefore, the lower bound can be transformed outside of a greater-than bound.
if (pred instanceof BoundUnaryPredicate) {
return Expressions.predicate(pred.op(), name);
} else if (pred instanceof BoundLiteralPredicate) {
return ProjectionUtil.truncateIntegerStrict(name, pred.asLiteralPredicate(), this);
} else if (pred.isSetPredicate() && pred.op() == Expression.Operation.NOT_IN) {
return ProjectionUtil.transformSet(name, pred.asSetPredicate(), this);
}
return null;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof TruncateInteger)) {
return false;
}
TruncateInteger that = (TruncateInteger) o;
return width == that.width;
}
@Override
public int hashCode() {
return Objects.hashCode(width);
}
@Override
public String toString() {
return "truncate[" + width + "]";
}
}
private static class TruncateLong extends Truncate<Long> {
private final int width;
private TruncateLong(int width) {
this.width = width;
}
@Override
public Integer width() {
return width;
}
@Override
public Long apply(Long value) {
if (value == null) {
return null;
}
return value - (((value % width) + width) % width);
}
@Override
public boolean canTransform(Type type) {
return type.typeId() == Type.TypeID.LONG;
}
@Override
public UnboundPredicate<Long> project(String name, BoundPredicate<Long> pred) {
if (pred.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, pred);
}
if (pred.isUnaryPredicate()) {
return Expressions.predicate(pred.op(), name);
} else if (pred.isLiteralPredicate()) {
return ProjectionUtil.truncateLong(name, pred.asLiteralPredicate(), this);
} else if (pred.isSetPredicate() && pred.op() == Expression.Operation.IN) {
return ProjectionUtil.transformSet(name, pred.asSetPredicate(), this);
}
return null;
}
@Override
public UnboundPredicate<Long> projectStrict(String name, BoundPredicate<Long> pred) {
if (pred.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, pred);
}
if (pred.isUnaryPredicate()) {
return Expressions.predicate(pred.op(), name);
} else if (pred.isLiteralPredicate()) {
return ProjectionUtil.truncateLongStrict(name, pred.asLiteralPredicate(), this);
} else if (pred.isSetPredicate() && pred.op() == Expression.Operation.NOT_IN) {
return ProjectionUtil.transformSet(name, pred.asSetPredicate(), this);
}
return null;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof TruncateLong)) {
return false;
}
TruncateLong that = (TruncateLong) o;
return width == that.width;
}
@Override
public int hashCode() {
return Objects.hashCode(width);
}
@Override
public String toString() {
return "truncate[" + width + "]";
}
}
private static class TruncateString extends Truncate<CharSequence> {
private final int length;
private TruncateString(int length) {
this.length = length;
}
@Override
public Integer width() {
return length;
}
@Override
public CharSequence apply(CharSequence value) {
if (value == null) {
return null;
}
return UnicodeUtil.truncateString(value, length);
}
@Override
public boolean canTransform(Type type) {
return type.typeId() == Type.TypeID.STRING;
}
@Override
public UnboundPredicate<CharSequence> project(String name,
BoundPredicate<CharSequence> predicate) {
if (predicate.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, predicate);
}
if (predicate.isUnaryPredicate()) {
return Expressions.predicate(predicate.op(), name);
} else if (predicate.isLiteralPredicate()) {
return ProjectionUtil.truncateArray(name, predicate.asLiteralPredicate(), this);
} else if (predicate.isSetPredicate() && predicate.op() == Expression.Operation.IN) {
return ProjectionUtil.transformSet(name, predicate.asSetPredicate(), this);
}
return null;
}
@Override
public UnboundPredicate<CharSequence> projectStrict(String name,
BoundPredicate<CharSequence> predicate) {
if (predicate.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, predicate);
}
if (predicate instanceof BoundUnaryPredicate) {
return Expressions.predicate(predicate.op(), name);
} else if (predicate instanceof BoundLiteralPredicate) {
BoundLiteralPredicate<CharSequence> pred = predicate.asLiteralPredicate();
if (pred.op() == Expression.Operation.STARTS_WITH) {
if (pred.literal().value().length() < width()) {
return Expressions.predicate(pred.op(), name, pred.literal().value());
} else if (pred.literal().value().length() == width()) {
return Expressions.equal(name, pred.literal().value());
}
} else {
return ProjectionUtil.truncateArrayStrict(name, pred, this);
}
} else if (predicate.isSetPredicate() && predicate.op() == Expression.Operation.NOT_IN) {
return ProjectionUtil.transformSet(name, predicate.asSetPredicate(), this);
}
return null;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof TruncateString)) {
return false;
}
TruncateString that = (TruncateString) o;
return length == that.length;
}
@Override
public int hashCode() {
return Objects.hashCode(length);
}
@Override
public String toString() {
return "truncate[" + length + "]";
}
}
private static class TruncateByteBuffer extends Truncate<ByteBuffer> {
private final int length;
private TruncateByteBuffer(int length) {
this.length = length;
}
@Override
public Integer width() {
return length;
}
@Override
public ByteBuffer apply(ByteBuffer value) {
if (value == null) {
return null;
}
ByteBuffer ret = value.duplicate();
ret.limit(Math.min(value.limit(), value.position() + length));
return ret;
}
@Override
public boolean canTransform(Type type) {
return type.typeId() == Type.TypeID.BINARY;
}
@Override
public UnboundPredicate<ByteBuffer> project(String name,
BoundPredicate<ByteBuffer> pred) {
if (pred.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, pred);
}
if (pred.isUnaryPredicate()) {
return Expressions.predicate(pred.op(), name);
} else if (pred.isLiteralPredicate()) {
return ProjectionUtil.truncateArray(name, pred.asLiteralPredicate(), this);
} else if (pred.isSetPredicate() && pred.op() == Expression.Operation.IN) {
return ProjectionUtil.transformSet(name, pred.asSetPredicate(), this);
}
return null;
}
@Override
public UnboundPredicate<ByteBuffer> projectStrict(String name,
BoundPredicate<ByteBuffer> pred) {
if (pred.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, pred);
}
if (pred.isUnaryPredicate()) {
return Expressions.predicate(pred.op(), name);
} else if (pred.isLiteralPredicate()) {
return ProjectionUtil.truncateArrayStrict(name, pred.asLiteralPredicate(), this);
} else if (pred.isSetPredicate() && pred.op() == Expression.Operation.NOT_IN) {
return ProjectionUtil.transformSet(name, pred.asSetPredicate(), this);
}
return null;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof TruncateByteBuffer)) {
return false;
}
TruncateByteBuffer that = (TruncateByteBuffer) o;
return length == that.length;
}
@Override
public int hashCode() {
return Objects.hashCode(length);
}
@Override
public String toHumanString(ByteBuffer value) {
return value == null ? "null" : TransformUtil.base64encode(value);
}
@Override
public String toString() {
return "truncate[" + length + "]";
}
}
private static class TruncateDecimal extends Truncate<BigDecimal> {
private final BigInteger unscaledWidth;
private TruncateDecimal(int unscaledWidth) {
this.unscaledWidth = BigInteger.valueOf(unscaledWidth);
}
@Override
public Integer width() {
return unscaledWidth.intValue();
}
@Override
public BigDecimal apply(BigDecimal value) {
if (value == null) {
return null;
}
BigDecimal remainder = new BigDecimal(
value.unscaledValue()
.remainder(unscaledWidth)
.add(unscaledWidth)
.remainder(unscaledWidth),
value.scale());
return value.subtract(remainder);
}
@Override
public boolean canTransform(Type type) {
return type.typeId() == Type.TypeID.DECIMAL;
}
@Override
public UnboundPredicate<BigDecimal> project(String name,
BoundPredicate<BigDecimal> pred) {
if (pred.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, pred);
}
if (pred.isUnaryPredicate()) {
return Expressions.predicate(pred.op(), name);
} else if (pred.isLiteralPredicate()) {
return ProjectionUtil.truncateDecimal(name, pred.asLiteralPredicate(), this);
} else if (pred.isSetPredicate() && pred.op() == Expression.Operation.IN) {
return ProjectionUtil.transformSet(name, pred.asSetPredicate(), this);
}
return null;
}
@Override
public UnboundPredicate<BigDecimal> projectStrict(String name,
BoundPredicate<BigDecimal> pred) {
if (pred.term() instanceof BoundTransform) {
return ProjectionUtil.projectTransformPredicate(this, name, pred);
}
if (pred.isUnaryPredicate()) {
return Expressions.predicate(pred.op(), name);
} else if (pred.isLiteralPredicate()) {
return ProjectionUtil.truncateDecimalStrict(name, pred.asLiteralPredicate(), this);
} else if (pred.isSetPredicate() && pred.op() == Expression.Operation.NOT_IN) {
return ProjectionUtil.transformSet(name, pred.asSetPredicate(), this);
}
return null;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (!(o instanceof TruncateDecimal)) {
return false;
}
TruncateDecimal that = (TruncateDecimal) o;
return unscaledWidth.equals(that.unscaledWidth);
}
@Override
public int hashCode() {
return Objects.hashCode(unscaledWidth);
}
@Override
public String toString() {
return "truncate[" + unscaledWidth + "]";
}
}
}
| 1 | 26,025 | nit: Space after comma | apache-iceberg | java |
@@ -853,6 +853,11 @@ class XLongField(LongField):
return lhex(self.i2h(pkt, x))
+class XLELongField(LELongField, XLongField):
+ def i2repr(self, pkt, x):
+ return XLongField.i2repr(self, pkt, x)
+
+
class IEEEFloatField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "f") | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
Fields: basic data structures that make up parts of packets.
"""
from __future__ import absolute_import
import collections
import copy
import inspect
import socket
import struct
import time
from scapy.config import conf
from scapy.dadict import DADict
from scapy.volatile import RandBin, RandByte, RandEnumKeys, RandInt, RandIP, RandIP6, RandLong, RandMAC, RandNum, RandShort, RandSInt, RandTermString, VolatileValue # noqa: E501
from scapy.data import EPOCH
from scapy.error import log_runtime, Scapy_Exception
from scapy.compat import bytes_hex, chb, orb, plain_str, raw
from scapy.pton_ntop import inet_ntop, inet_pton
from scapy.utils import inet_aton, inet_ntoa, lhex, mac2str, str2mac
from scapy.utils6 import in6_6to4ExtractAddr, in6_isaddr6to4, \
in6_isaddrTeredo, in6_ptop, Net6, teredoAddrExtractInfo
from scapy.base_classes import BasePacket, Gen, Net, Field_metaclass
from scapy.error import warning
import scapy.modules.six as six
from scapy.modules.six.moves import range
"""
Helper class to specify a protocol extendable for runtime modifications
"""
class ObservableDict(dict):
def __init__(self, *args, **kw):
self.observers = []
super(ObservableDict, self).__init__(*args, **kw)
def observe(self, observer):
self.observers.append(observer)
def __setitem__(self, key, value):
for o in self.observers:
o.notify_set(self, key, value)
super(ObservableDict, self).__setitem__(key, value)
def __delitem__(self, key):
for o in self.observers:
o.notify_del(self, key)
super(ObservableDict, self).__delitem__(key)
def update(self, anotherDict):
for k in anotherDict:
self[k] = anotherDict[k]
############
# Fields #
############
class Field(six.with_metaclass(Field_metaclass, object)):
"""For more information on how this work, please refer to
http://www.secdev.org/projects/scapy/files/scapydoc.pdf
chapter ``Adding a New Field''"""
__slots__ = ["name", "fmt", "default", "sz", "owners"]
islist = 0
ismutable = False
holds_packets = 0
def __init__(self, name, default, fmt="H"):
self.name = name
if fmt[0] in "@=<>!":
self.fmt = fmt
else:
self.fmt = "!" + fmt
self.default = self.any2i(None, default)
self.sz = struct.calcsize(self.fmt)
self.owners = []
def register_owner(self, cls):
self.owners.append(cls)
def i2len(self, pkt, x):
"""Convert internal value to a length usable by a FieldLenField"""
return self.sz
def i2count(self, pkt, x):
"""Convert internal value to a number of elements usable by a FieldLenField.
Always 1 except for list fields"""
return 1
def h2i(self, pkt, x):
"""Convert human value to internal value"""
return x
def i2h(self, pkt, x):
"""Convert internal value to human value"""
return x
def m2i(self, pkt, x):
"""Convert machine value to internal value"""
return x
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
x = 0
elif isinstance(x, str):
return raw(x)
return x
def any2i(self, pkt, x):
"""Try to understand the most input values possible and make an internal value from them""" # noqa: E501
return self.h2i(pkt, x)
def i2repr(self, pkt, x):
"""Convert internal value to a nice representation"""
return repr(self.i2h(pkt, x))
def addfield(self, pkt, s, val):
"""Add an internal value to a string
Copy the network representation of field `val` (belonging to layer
`pkt`) to the raw string packet `s`, and return the new string packet.
"""
return s + struct.pack(self.fmt, self.i2m(pkt, val))
def getfield(self, pkt, s):
"""Extract an internal value from a string
Extract from the raw packet `s` the field value belonging to layer
`pkt`.
Returns a two-element list,
first the raw packet string after having removed the extracted field,
second the extracted field itself in internal representation.
"""
return s[self.sz:], self.m2i(pkt, struct.unpack(self.fmt, s[:self.sz])[0]) # noqa: E501
def do_copy(self, x):
if hasattr(x, "copy"):
return x.copy()
if isinstance(x, list):
x = x[:]
for i in range(len(x)):
if isinstance(x[i], BasePacket):
x[i] = x[i].copy()
return x
def __repr__(self):
return "<Field (%s).%s>" % (",".join(x.__name__ for x in self.owners), self.name) # noqa: E501
def copy(self):
return copy.deepcopy(self)
def randval(self):
"""Return a volatile object whose value is both random and suitable for this field""" # noqa: E501
fmtt = self.fmt[-1]
if fmtt in "BHIQ":
return {"B": RandByte, "H": RandShort, "I": RandInt, "Q": RandLong}[fmtt]() # noqa: E501
elif fmtt == "s":
if self.fmt[0] in "0123456789":
value = int(self.fmt[:-1])
else:
value = int(self.fmt[1:-1])
return RandBin(value)
else:
warning("no random class for [%s] (fmt=%s).", self.name, self.fmt)
class Emph(object):
__slots__ = ["fld"]
def __init__(self, fld):
self.fld = fld
def __getattr__(self, attr):
return getattr(self.fld, attr)
def __hash__(self):
return hash(self.fld)
def __eq__(self, other):
return self.fld == other
class ActionField(object):
__slots__ = ["_fld", "_action_method", "_privdata"]
def __init__(self, fld, action_method, **kargs):
self._fld = fld
self._action_method = action_method
self._privdata = kargs
def any2i(self, pkt, val):
getattr(pkt, self._action_method)(val, self._fld, **self._privdata)
return getattr(self._fld, "any2i")(pkt, val)
def __getattr__(self, attr):
return getattr(self._fld, attr)
class ConditionalField(object):
__slots__ = ["fld", "cond"]
def __init__(self, fld, cond):
self.fld = fld
self.cond = cond
def _evalcond(self, pkt):
return self.cond(pkt)
def getfield(self, pkt, s):
if self._evalcond(pkt):
return self.fld.getfield(pkt, s)
else:
return s, None
def addfield(self, pkt, s, val):
if self._evalcond(pkt):
return self.fld.addfield(pkt, s, val)
else:
return s
def __getattr__(self, attr):
return getattr(self.fld, attr)
class MultipleTypeField(object):
"""MultipleTypeField are used for fields that can be implemented by
various Field subclasses, depending on conditions on the packet.
It is initialized with `flds` and `dflt`.
`dflt` is the default field type, to be used when none of the
conditions matched the current packet.
`flds` is a list of tuples (`fld`, `cond`), where `fld` if a field
type, and `cond` a "condition" to determine if `fld` is the field type
that should be used.
`cond` is either:
- a callable `cond_pkt` that accepts one argument (the packet) and
returns True if `fld` should be used, False otherwise.
- a tuple (`cond_pkt`, `cond_pkt_val`), where `cond_pkt` is the same
as in the previous case and `cond_pkt_val` is a callable that
accepts two arguments (the packet, and the value to be set) and
returns True if `fld` should be used, False otherwise.
See scapy.layers.l2.ARP (type "help(ARP)" in Scapy) for an example of
use.
"""
__slots__ = ["flds", "dflt", "name"]
def __init__(self, flds, dflt):
self.flds = flds
self.dflt = dflt
self.name = self.dflt.name
def _find_fld_pkt(self, pkt):
"""Given a Packet instance `pkt`, returns the Field subclass to be
used. If you know the value to be set (e.g., in .addfield()), use
._find_fld_pkt_val() instead.
"""
for fld, cond in self.flds:
if isinstance(cond, tuple):
cond = cond[0]
if cond(pkt):
return fld
return self.dflt
def _find_fld_pkt_val(self, pkt, val):
"""Given a Packet instance `pkt` and the value `val` to be set,
returns the Field subclass to be used.
"""
for fld, cond in self.flds:
if isinstance(cond, tuple):
if cond[1](pkt, val):
return fld
elif cond(pkt):
return fld
return self.dflt
def _find_fld(self):
"""Returns the Field subclass to be used, depending on the Packet
instance, or the default subclass.
DEV: since the Packet instance is not provided, we have to use a hack
to guess it. It should only be used if you cannot provide the current
Packet instance (for example, because of the current Scapy API).
If you have the current Packet instance, use ._find_fld_pkt_val() (if
the value to set is also known) of ._find_fld_pkt() instead.
"""
# Hack to preserve current Scapy API
# See https://stackoverflow.com/a/7272464/3223422
frame = inspect.currentframe().f_back.f_back
while frame is not None:
try:
pkt = frame.f_locals['self']
except KeyError:
pass
else:
if isinstance(pkt, tuple(self.dflt.owners)):
return self._find_fld_pkt(pkt)
frame = frame.f_back
return self.dflt
def getfield(self, pkt, s):
return self._find_fld_pkt(pkt).getfield(pkt, s)
def addfield(self, pkt, s, val):
return self._find_fld_pkt_val(pkt, val).addfield(pkt, s, val)
def any2i(self, pkt, val):
return self._find_fld_pkt_val(pkt, val).any2i(pkt, val)
def h2i(self, pkt, val):
return self._find_fld_pkt_val(pkt, val).h2i(pkt, val)
def i2h(self, pkt, val):
return self._find_fld_pkt_val(pkt, val).i2h(pkt, val)
def i2m(self, pkt, val):
return self._find_fld_pkt_val(pkt, val).i2m(pkt, val)
def i2len(self, pkt, val):
return self._find_fld_pkt_val(pkt, val).i2len(pkt, val)
def i2repr(self, pkt, val):
return self._find_fld_pkt_val(pkt, val).i2repr(pkt, val)
def register_owner(self, cls):
for fld, _ in self.flds:
fld.owners.append(cls)
self.dflt.owners.append(cls)
def __getattr__(self, attr):
return getattr(self._find_fld(), attr)
class PadField(object):
"""Add bytes after the proxified field so that it ends at the specified
alignment from its beginning"""
__slots__ = ["_fld", "_align", "_padwith"]
def __init__(self, fld, align, padwith=None):
self._fld = fld
self._align = align
self._padwith = padwith or b""
def padlen(self, flen):
return -flen % self._align
def getfield(self, pkt, s):
remain, val = self._fld.getfield(pkt, s)
padlen = self.padlen(len(s) - len(remain))
return remain[padlen:], val
def addfield(self, pkt, s, val):
sval = self._fld.addfield(pkt, b"", val)
return s + sval + struct.pack("%is" % (self.padlen(len(sval))), self._padwith) # noqa: E501
def __getattr__(self, attr):
return getattr(self._fld, attr)
class ReversePadField(PadField):
"""Add bytes BEFORE the proxified field so that it starts at the specified
alignment from its beginning"""
def getfield(self, pkt, s):
# We need to get the length that has already been dissected
padlen = self.padlen(pkt._tmp_dissect_pos)
remain, val = self._fld.getfield(pkt, s[padlen:])
return remain, val
def addfield(self, pkt, s, val):
sval = self._fld.addfield(pkt, b"", val)
return s + struct.pack("%is" % (self.padlen(len(s))), self._padwith) + sval # noqa: E501
class DestField(Field):
__slots__ = ["defaultdst"]
# Each subclass must have its own bindings attribute
# bindings = {}
def __init__(self, name, default):
self.defaultdst = default
def dst_from_pkt(self, pkt):
for addr, condition in self.bindings.get(pkt.payload.__class__, []):
try:
if all(pkt.payload.getfieldval(field) == value
for field, value in six.iteritems(condition)):
return addr
except AttributeError:
pass
return self.defaultdst
@classmethod
def bind_addr(cls, layer, addr, **condition):
cls.bindings.setdefault(layer, []).append((addr, condition))
class MACField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "6s")
def i2m(self, pkt, x):
if x is None:
return b"\0\0\0\0\0\0"
return mac2str(x)
def m2i(self, pkt, x):
return str2mac(x)
def any2i(self, pkt, x):
if isinstance(x, bytes) and len(x) == 6:
x = self.m2i(pkt, x)
return x
def i2repr(self, pkt, x):
x = self.i2h(pkt, x)
if self in conf.resolve:
x = conf.manufdb._resolve_MAC(x)
return x
def randval(self):
return RandMAC()
class IPField(Field):
slots = []
def __init__(self, name, default):
Field.__init__(self, name, default, "4s")
def h2i(self, pkt, x):
if isinstance(x, bytes):
x = plain_str(x)
if isinstance(x, str):
try:
inet_aton(x)
except socket.error:
x = Net(x)
elif isinstance(x, list):
x = [self.h2i(pkt, n) for n in x]
return x
def resolve(self, x):
if self in conf.resolve:
try:
ret = socket.gethostbyaddr(x)[0]
except Exception:
pass
else:
if ret:
return ret
return x
def i2m(self, pkt, x):
if x is None:
return b'\x00\x00\x00\x00'
return inet_aton(plain_str(x))
def m2i(self, pkt, x):
return inet_ntoa(x)
def any2i(self, pkt, x):
return self.h2i(pkt, x)
def i2repr(self, pkt, x):
r = self.resolve(self.i2h(pkt, x))
return r if isinstance(r, str) else repr(r)
def randval(self):
return RandIP()
class SourceIPField(IPField):
__slots__ = ["dstname"]
def __init__(self, name, dstname):
IPField.__init__(self, name, None)
self.dstname = dstname
def __findaddr(self, pkt):
if conf.route is None:
# unused import, only to initialize conf.route
import scapy.route # noqa: F401
dst = ("0.0.0.0" if self.dstname is None
else getattr(pkt, self.dstname) or "0.0.0.0")
if isinstance(dst, (Gen, list)):
r = {conf.route.route(str(daddr)) for daddr in dst}
if len(r) > 1:
warning("More than one possible route for %r" % (dst,))
return min(r)[1]
return conf.route.route(dst)[1]
def i2m(self, pkt, x):
if x is None:
x = self.__findaddr(pkt)
return IPField.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
x = self.__findaddr(pkt)
return IPField.i2h(self, pkt, x)
class IP6Field(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "16s")
def h2i(self, pkt, x):
if isinstance(x, bytes):
x = plain_str(x)
if isinstance(x, str):
try:
x = in6_ptop(x)
except socket.error:
x = Net6(x)
elif isinstance(x, list):
x = [self.h2i(pkt, n) for n in x]
return x
def i2m(self, pkt, x):
if x is None:
x = "::"
return inet_pton(socket.AF_INET6, plain_str(x))
def m2i(self, pkt, x):
return inet_ntop(socket.AF_INET6, x)
def any2i(self, pkt, x):
return self.h2i(pkt, x)
def i2repr(self, pkt, x):
if x is None:
return self.i2h(pkt, x)
elif not isinstance(x, Net6) and not isinstance(x, list):
if in6_isaddrTeredo(x): # print Teredo info
server, _, maddr, mport = teredoAddrExtractInfo(x)
return "%s [Teredo srv: %s cli: %s:%s]" % (self.i2h(pkt, x), server, maddr, mport) # noqa: E501
elif in6_isaddr6to4(x): # print encapsulated address
vaddr = in6_6to4ExtractAddr(x)
return "%s [6to4 GW: %s]" % (self.i2h(pkt, x), vaddr)
r = self.i2h(pkt, x) # No specific information to return
return r if isinstance(r, str) else repr(r)
def randval(self):
return RandIP6()
class SourceIP6Field(IP6Field):
__slots__ = ["dstname"]
def __init__(self, name, dstname):
IP6Field.__init__(self, name, None)
self.dstname = dstname
def i2m(self, pkt, x):
if x is None:
dst = ("::" if self.dstname is None else
getattr(pkt, self.dstname) or "::")
iff, x, nh = conf.route6.route(dst)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
if conf.route6 is None:
# unused import, only to initialize conf.route6
import scapy.route6 # noqa: F401
dst = ("::" if self.dstname is None else getattr(pkt, self.dstname)) # noqa: E501
if isinstance(dst, (Gen, list)):
r = {conf.route6.route(str(daddr)) for daddr in dst}
if len(r) > 1:
warning("More than one possible route for %r" % (dst,))
x = min(r)[1]
else:
x = conf.route6.route(dst)[1]
return IP6Field.i2h(self, pkt, x)
class DestIP6Field(IP6Field, DestField):
bindings = {}
def __init__(self, name, default):
IP6Field.__init__(self, name, None)
DestField.__init__(self, name, default)
def i2m(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IP6Field.i2m(self, pkt, x)
def i2h(self, pkt, x):
if x is None:
x = self.dst_from_pkt(pkt)
return IP6Field.i2h(self, pkt, x)
class ByteField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "B")
class XByteField(ByteField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class OByteField(ByteField):
def i2repr(self, pkt, x):
return "%03o" % self.i2h(pkt, x)
class ThreeBytesField(ByteField):
def __init__(self, name, default):
Field.__init__(self, name, default, "!I")
def addfield(self, pkt, s, val):
return s + struct.pack(self.fmt, self.i2m(pkt, val))[1:4]
def getfield(self, pkt, s):
return s[3:], self.m2i(pkt, struct.unpack(self.fmt, b"\x00" + s[:3])[0]) # noqa: E501
class X3BytesField(ThreeBytesField, XByteField):
def i2repr(self, pkt, x):
return XByteField.i2repr(self, pkt, x)
class LEThreeBytesField(ByteField):
def __init__(self, name, default):
Field.__init__(self, name, default, "<I")
def addfield(self, pkt, s, val):
return s + struct.pack(self.fmt, self.i2m(pkt, val))[:3]
def getfield(self, pkt, s):
return s[3:], self.m2i(pkt, struct.unpack(self.fmt, s[:3] + b"\x00")[0]) # noqa: E501
class LEX3BytesField(LEThreeBytesField, XByteField):
def i2repr(self, pkt, x):
return XByteField.i2repr(self, pkt, x)
class SignedByteField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "b")
class FieldValueRangeException(Scapy_Exception):
pass
class FieldAttributeException(Scapy_Exception):
pass
class YesNoByteField(ByteField):
"""
byte based flag field that shows representation of its number based on a given association # noqa: E501
in its default configuration the following representation is generated:
x == 0 : 'no'
x != 0 : 'yes'
in more sophisticated use-cases (e.g. yes/no/invalid) one can use the config attribute to configure # noqa: E501
key-value, key-range and key-value-set associations that will be used to generate the values representation. # noqa: E501
a range is given by a tuple (<first-val>, <last-value>) including the last value. a single-value tuple # noqa: E501
is treated as scalar.
a list defines a set of (probably non consecutive) values that should be associated to a given key. # noqa: E501
all values not associated with a key will be shown as number of type unsigned byte. # noqa: E501
config = {
'no' : 0,
'foo' : (1,22),
'yes' : 23,
'bar' : [24,25, 42, 48, 87, 253]
}
generates the following representations:
x == 0 : 'no'
x == 15: 'foo'
x == 23: 'yes'
x == 42: 'bar'
x == 43: 43
using the config attribute one could also revert the stock-yes-no-behavior:
config = {
'yes' : 0,
'no' : (1,255)
}
will generate the following value representation:
x == 0 : 'yes'
x != 0 : 'no'
"""
__slots__ = ['eval_fn']
def _build_config_representation(self, config):
assoc_table = dict()
for key in config:
value_spec = config[key]
value_spec_type = type(value_spec)
if value_spec_type is int:
if value_spec < 0 or value_spec > 255:
raise FieldValueRangeException('given field value {} invalid - ' # noqa: E501
'must be in range [0..255]'.format(value_spec)) # noqa: E501
assoc_table[value_spec] = key
elif value_spec_type is list:
for value in value_spec:
if value < 0 or value > 255:
raise FieldValueRangeException('given field value {} invalid - ' # noqa: E501
'must be in range [0..255]'.format(value)) # noqa: E501
assoc_table[value] = key
elif value_spec_type is tuple:
value_spec_len = len(value_spec)
if value_spec_len != 2:
raise FieldAttributeException('invalid length {} of given config item tuple {} - must be ' # noqa: E501
'(<start-range>, <end-range>).'.format(value_spec_len, value_spec)) # noqa: E501
value_range_start = value_spec[0]
if value_range_start < 0 or value_range_start > 255:
raise FieldValueRangeException('given field value {} invalid - ' # noqa: E501
'must be in range [0..255]'.format(value_range_start)) # noqa: E501
value_range_end = value_spec[1]
if value_range_end < 0 or value_range_end > 255:
raise FieldValueRangeException('given field value {} invalid - ' # noqa: E501
'must be in range [0..255]'.format(value_range_end)) # noqa: E501
for value in range(value_range_start, value_range_end + 1):
assoc_table[value] = key
self.eval_fn = lambda x: assoc_table[x] if x in assoc_table else x
def __init__(self, name, default, config=None, *args, **kargs):
if not config:
# this represents the common use case and therefore it is kept small # noqa: E501
self.eval_fn = lambda x: 'no' if x == 0 else 'yes'
else:
self._build_config_representation(config)
ByteField.__init__(self, name, default, *args, **kargs)
def i2repr(self, pkt, x):
return self.eval_fn(x)
class ShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "H")
class SignedShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "h")
class LEShortField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<H")
class XShortField(ShortField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class IntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "I")
class SignedIntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "i")
def randval(self):
return RandSInt()
class LEIntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<I")
class LESignedIntField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "<i")
def randval(self):
return RandSInt()
class XIntField(IntField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class XLEIntField(LEIntField, XIntField):
def i2repr(self, pkt, x):
return XIntField.i2repr(self, pkt, x)
class XLEShortField(LEShortField, XShortField):
def i2repr(self, pkt, x):
return XShortField.i2repr(self, pkt, x)
class LongField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "Q")
class LELongField(LongField):
def __init__(self, name, default):
Field.__init__(self, name, default, "<Q")
class XLongField(LongField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class IEEEFloatField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "f")
class IEEEDoubleField(Field):
def __init__(self, name, default):
Field.__init__(self, name, default, "d")
class StrField(Field):
__slots__ = ["remain"]
def __init__(self, name, default, fmt="H", remain=0):
Field.__init__(self, name, default, fmt)
self.remain = remain
def i2len(self, pkt, i):
return len(i)
def any2i(self, pkt, x):
if isinstance(x, six.text_type):
x = raw(x)
return super(StrField, self).any2i(pkt, x)
def i2repr(self, pkt, x):
val = super(StrField, self).i2repr(pkt, x)
if val[:2] in ['b"', "b'"]:
return val[1:]
return val
def i2m(self, pkt, x):
if x is None:
return b""
if not isinstance(x, bytes):
return raw(x)
return x
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val)
def getfield(self, pkt, s):
if self.remain == 0:
return b"", self.m2i(pkt, s)
else:
return s[-self.remain:], self.m2i(pkt, s[:-self.remain])
def randval(self):
return RandBin(RandNum(0, 1200))
class PacketField(StrField):
__slots__ = ["cls"]
holds_packets = 1
def __init__(self, name, default, cls, remain=0):
StrField.__init__(self, name, default, remain=remain)
self.cls = cls
def i2m(self, pkt, i):
if i is None:
return b""
return raw(i)
def m2i(self, pkt, m):
return self.cls(m)
def getfield(self, pkt, s):
i = self.m2i(pkt, s)
remain = b""
if conf.padding_layer in i:
r = i[conf.padding_layer]
del(r.underlayer.payload)
remain = r.load
return remain, i
class PacketLenField(PacketField):
__slots__ = ["length_from"]
def __init__(self, name, default, cls, length_from=None):
PacketField.__init__(self, name, default, cls)
self.length_from = length_from
def getfield(self, pkt, s):
len_pkt = self.length_from(pkt)
try:
i = self.m2i(pkt, s[:len_pkt])
except Exception:
if conf.debug_dissector:
raise
i = conf.raw_layer(load=s[:len_pkt])
return s[len_pkt:], i
class PacketListField(PacketField):
""" PacketListField represents a series of Packet instances that might occur right in the middle of another Packet # noqa: E501
field list.
This field type may also be used to indicate that a series of Packet instances have a sibling semantic instead of # noqa: E501
a parent/child relationship (i.e. a stack of layers).
"""
__slots__ = ["count_from", "length_from", "next_cls_cb"]
islist = 1
def __init__(self, name, default, cls=None, count_from=None, length_from=None, next_cls_cb=None): # noqa: E501
""" The number of Packet instances that are dissected by this field can be parametrized using one of three # noqa: E501
different mechanisms/parameters:
* count_from: a callback that returns the number of Packet instances to dissect. The callback prototype is: # noqa: E501
count_from(pkt:Packet) -> int
* length_from: a callback that returns the number of bytes that must be dissected by this field. The # noqa: E501
callback prototype is:
length_from(pkt:Packet) -> int
* next_cls_cb: a callback that enables a Scapy developer to dynamically discover if another Packet instance # noqa: E501
should be dissected or not. See below for this callback prototype.
The bytes that are not consumed during the dissection of this field are passed to the next field of the current # noqa: E501
packet.
For the serialization of such a field, the list of Packets that are contained in a PacketListField can be # noqa: E501
heterogeneous and is unrestricted.
The type of the Packet instances that are dissected with this field is specified or discovered using one of the # noqa: E501
following mechanism:
* the cls parameter may contain a callable that returns an instance of the dissected Packet. This # noqa: E501
may either be a reference of a Packet subclass (e.g. DNSRROPT in layers/dns.py) to generate an # noqa: E501
homogeneous PacketListField or a function deciding the type of the Packet instance # noqa: E501
(e.g. _CDPGuessAddrRecord in contrib/cdp.py)
* the cls parameter may contain a class object with a defined "dispatch_hook" classmethod. That # noqa: E501
method must return a Packet instance. The dispatch_hook callmethod must implement the following prototype: # noqa: E501
dispatch_hook(cls, _pkt:Optional[Packet], *args, **kargs) -> Packet_metaclass # noqa: E501
The _pkt parameter may contain a reference to the packet instance containing the PacketListField that is # noqa: E501
being dissected.
* the next_cls_cb parameter may contain a callable whose prototype is: # noqa: E501
cbk(pkt:Packet, lst:List[Packet], cur:Optional[Packet], remain:str) -> Optional[Packet_metaclass] # noqa: E501
The pkt argument contains a reference to the Packet instance containing the PacketListField that is # noqa: E501
being dissected. The lst argument is the list of all Packet instances that were previously parsed during # noqa: E501
the current PacketListField dissection, save for the very last Packet instance. The cur argument # noqa: E501
contains a reference to that very last parsed Packet instance. The remain argument contains the bytes # noqa: E501
that may still be consumed by the current PacketListField dissection operation. This callback returns # noqa: E501
either the type of the next Packet to dissect or None to indicate that no more Packet are to be # noqa: E501
dissected.
These four arguments allows a variety of dynamic discovery of the number of Packet to dissect and of the # noqa: E501
type of each one of these Packets, including: type determination based on current Packet instances or # noqa: E501
its underlayers, continuation based on the previously parsed Packet instances within that # noqa: E501
PacketListField, continuation based on a look-ahead on the bytes to be dissected... # noqa: E501
The cls and next_cls_cb parameters are semantically exclusive, although one could specify both. If both are # noqa: E501
specified, cls is silently ignored. The same is true for count_from and next_cls_cb. # noqa: E501
length_from and next_cls_cb are compatible and the dissection will end, whichever of the two stop conditions # noqa: E501
comes first.
@param name: the name of the field
@param default: the default value of this field; generally an empty Python list # noqa: E501
@param cls: either a callable returning a Packet instance or a class object defining a dispatch_hook class # noqa: E501
method
@param count_from: a callback returning the number of Packet instances to dissect # noqa: E501
@param length_from: a callback returning the number of bytes to dissect
@param next_cls_cb: a callback returning either None or the type of the next Packet to dissect. # noqa: E501
"""
if default is None:
default = [] # Create a new list for each instance
PacketField.__init__(self, name, default, cls)
self.count_from = count_from
self.length_from = length_from
self.next_cls_cb = next_cls_cb
def any2i(self, pkt, x):
if not isinstance(x, list):
return [x]
else:
return x
def i2count(self, pkt, val):
if isinstance(val, list):
return len(val)
return 1
def i2len(self, pkt, val):
return sum(len(p) for p in val)
def do_copy(self, x):
if x is None:
return None
else:
return [p if isinstance(p, six.string_types) else p.copy() for p in x] # noqa: E501
def getfield(self, pkt, s):
c = len_pkt = cls = None
if self.length_from is not None:
len_pkt = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
if self.next_cls_cb is not None:
cls = self.next_cls_cb(pkt, [], None, s)
c = 1
lst = []
ret = b""
remain = s
if len_pkt is not None:
remain, ret = s[:len_pkt], s[len_pkt:]
while remain:
if c is not None:
if c <= 0:
break
c -= 1
try:
if cls is not None:
p = cls(remain)
else:
p = self.m2i(pkt, remain)
except Exception:
if conf.debug_dissector:
raise
p = conf.raw_layer(load=remain)
remain = b""
else:
if conf.padding_layer in p:
pad = p[conf.padding_layer]
remain = pad.load
del(pad.underlayer.payload)
if self.next_cls_cb is not None:
cls = self.next_cls_cb(pkt, lst, p, remain)
if cls is not None:
c += 1
else:
remain = b""
lst.append(p)
return remain + ret, lst
def addfield(self, pkt, s, val):
return s + b"".join(raw(v) for v in val)
class StrFixedLenField(StrField):
__slots__ = ["length_from"]
def __init__(self, name, default, length=None, length_from=None):
StrField.__init__(self, name, default)
self.length_from = length_from
if length is not None:
self.length_from = lambda pkt, length=length: length
def i2repr(self, pkt, v):
if isinstance(v, bytes):
v = v.rstrip(b"\0")
return super(StrFixedLenField, self).i2repr(pkt, v)
def getfield(self, pkt, s):
len_pkt = self.length_from(pkt)
return s[len_pkt:], self.m2i(pkt, s[:len_pkt])
def addfield(self, pkt, s, val):
len_pkt = self.length_from(pkt)
return s + struct.pack("%is" % len_pkt, self.i2m(pkt, val))
def randval(self):
try:
len_pkt = self.length_from(None)
except Exception:
len_pkt = RandNum(0, 200)
return RandBin(len_pkt)
class StrFixedLenEnumField(StrFixedLenField):
__slots__ = ["enum"]
def __init__(self, name, default, length=None, enum=None, length_from=None): # noqa: E501
StrFixedLenField.__init__(self, name, default, length=length, length_from=length_from) # noqa: E501
self.enum = enum
def i2repr(self, pkt, v):
r = v.rstrip("\0" if isinstance(v, str) else b"\0")
rr = repr(r)
if v in self.enum:
rr = "%s (%s)" % (rr, self.enum[v])
elif r in self.enum:
rr = "%s (%s)" % (rr, self.enum[r])
return rr
class NetBIOSNameField(StrFixedLenField):
def __init__(self, name, default, length=31):
StrFixedLenField.__init__(self, name, default, length)
def i2m(self, pkt, x):
len_pkt = self.length_from(pkt) // 2
x = raw(x)
if x is None:
x = b""
x += b" " * len_pkt
x = x[:len_pkt]
x = b"".join(chb(0x41 + (orb(b) >> 4)) + chb(0x41 + (orb(b) & 0xf)) for b in x) # noqa: E501
x = b" " + x
return x
def m2i(self, pkt, x):
x = x.strip(b"\x00").strip(b" ")
return b"".join(map(lambda x, y: chb((((orb(x) - 1) & 0xf) << 4) + ((orb(y) - 1) & 0xf)), x[::2], x[1::2])) # noqa: E501
class StrLenField(StrField):
__slots__ = ["length_from", "max_length"]
def __init__(self, name, default, fld=None, length_from=None, max_length=None): # noqa: E501
StrField.__init__(self, name, default)
self.length_from = length_from
self.max_length = max_length
def getfield(self, pkt, s):
len_pkt = self.length_from(pkt)
return s[len_pkt:], self.m2i(pkt, s[:len_pkt])
def randval(self):
return RandBin(RandNum(0, self.max_length or 1200))
class XStrField(StrField):
"""
StrField which value is printed as hexadecimal.
"""
def i2repr(self, pkt, x):
if x is None:
return repr(x)
return bytes_hex(x).decode()
class XStrLenField(StrLenField):
"""
StrLenField which value is printed as hexadecimal.
"""
def i2repr(self, pkt, x):
if not x:
return repr(x)
return bytes_hex(x[:self.length_from(pkt)]).decode()
class XStrFixedLenField(StrFixedLenField):
"""
StrFixedLenField which value is printed as hexadecimal.
"""
def i2repr(self, pkt, x):
if not x:
return repr(x)
return bytes_hex(x[:self.length_from(pkt)]).decode()
class StrLenFieldUtf16(StrLenField):
def h2i(self, pkt, x):
return plain_str(x).encode('utf-16')[2:]
def i2h(self, pkt, x):
return x.decode('utf-16')
class BoundStrLenField(StrLenField):
__slots__ = ["minlen", "maxlen"]
def __init__(self, name, default, minlen=0, maxlen=255, fld=None, length_from=None): # noqa: E501
StrLenField.__init__(self, name, default, fld, length_from)
self.minlen = minlen
self.maxlen = maxlen
def randval(self):
return RandBin(RandNum(self.minlen, self.maxlen))
class FieldListField(Field):
__slots__ = ["field", "count_from", "length_from"]
islist = 1
def __init__(self, name, default, field, length_from=None, count_from=None): # noqa: E501
if default is None:
default = [] # Create a new list for each instance
self.field = field
Field.__init__(self, name, default)
self.count_from = count_from
self.length_from = length_from
def i2count(self, pkt, val):
if isinstance(val, list):
return len(val)
return 1
def i2len(self, pkt, val):
return int(sum(self.field.i2len(pkt, v) for v in val))
def i2m(self, pkt, val):
if val is None:
val = []
return val
def any2i(self, pkt, x):
if not isinstance(x, list):
return [self.field.any2i(pkt, x)]
else:
return [self.field.any2i(pkt, e) for e in x]
def i2repr(self, pkt, x):
return "[%s]" % ", ".join(self.field.i2repr(pkt, v) for v in x)
def addfield(self, pkt, s, val):
val = self.i2m(pkt, val)
for v in val:
s = self.field.addfield(pkt, s, v)
return s
def getfield(self, pkt, s):
c = len_pkt = None
if self.length_from is not None:
len_pkt = self.length_from(pkt)
elif self.count_from is not None:
c = self.count_from(pkt)
val = []
ret = b""
if len_pkt is not None:
s, ret = s[:len_pkt], s[len_pkt:]
while s:
if c is not None:
if c <= 0:
break
c -= 1
s, v = self.field.getfield(pkt, s)
val.append(v)
return s + ret, val
class FieldLenField(Field):
__slots__ = ["length_of", "count_of", "adjust"]
def __init__(self, name, default, length_of=None, fmt="H", count_of=None, adjust=lambda pkt, x: x, fld=None): # noqa: E501
Field.__init__(self, name, default, fmt)
self.length_of = length_of
self.count_of = count_of
self.adjust = adjust
if fld is not None:
# FIELD_LENGTH_MANAGEMENT_DEPRECATION(self.__class__.__name__)
self.length_of = fld
def i2m(self, pkt, x):
if x is None:
if self.length_of is not None:
fld, fval = pkt.getfield_and_val(self.length_of)
f = fld.i2len(pkt, fval)
else:
fld, fval = pkt.getfield_and_val(self.count_of)
f = fld.i2count(pkt, fval)
x = self.adjust(pkt, f)
return x
class StrNullField(StrField):
def addfield(self, pkt, s, val):
return s + self.i2m(pkt, val) + b"\x00"
def getfield(self, pkt, s):
len_str = s.find(b"\x00")
if len_str < 0:
# XXX \x00 not found
return b"", s
return s[len_str + 1:], self.m2i(pkt, s[:len_str])
def randval(self):
return RandTermString(RandNum(0, 1200), b"\x00")
class StrStopField(StrField):
__slots__ = ["stop", "additional"]
def __init__(self, name, default, stop, additional=0):
Field.__init__(self, name, default)
self.stop = stop
self.additional = additional
def getfield(self, pkt, s):
len_str = s.find(self.stop)
if len_str < 0:
return b"", s
# raise Scapy_Exception,"StrStopField: stop value [%s] not found" %stop # noqa: E501
len_str += len(self.stop) + self.additional
return s[len_str:], s[:len_str]
def randval(self):
return RandTermString(RandNum(0, 1200), self.stop)
class LenField(Field):
__slots__ = ["adjust"]
def __init__(self, name, default, fmt="H", adjust=lambda x: x):
Field.__init__(self, name, default, fmt)
self.adjust = adjust
def i2m(self, pkt, x):
if x is None:
x = self.adjust(len(pkt.payload))
return x
class BCDFloatField(Field):
def i2m(self, pkt, x):
return int(256 * x)
def m2i(self, pkt, x):
return x / 256.0
class BitField(Field):
__slots__ = ["rev", "size"]
def __init__(self, name, default, size):
Field.__init__(self, name, default)
self.rev = size < 0
self.size = abs(size)
def reverse(self, val):
if self.size == 16:
# Replaces socket.ntohs (but work on both little/big endian)
val = struct.unpack('>H', struct.pack('<H', int(val)))[0]
elif self.size == 32:
# Same here but for socket.ntohl
val = struct.unpack('>I', struct.pack('<I', int(val)))[0]
return val
def addfield(self, pkt, s, val):
val = self.i2m(pkt, val)
if isinstance(s, tuple):
s, bitsdone, v = s
else:
bitsdone = 0
v = 0
if self.rev:
val = self.reverse(val)
v <<= self.size
v |= val & ((1 << self.size) - 1)
bitsdone += self.size
while bitsdone >= 8:
bitsdone -= 8
s = s + struct.pack("!B", v >> bitsdone)
v &= (1 << bitsdone) - 1
if bitsdone:
return s, bitsdone, v
else:
return s
def getfield(self, pkt, s):
if isinstance(s, tuple):
s, bn = s
else:
bn = 0
# we don't want to process all the string
nb_bytes = (self.size + bn - 1) // 8 + 1
w = s[:nb_bytes]
# split the substring byte by byte
_bytes = struct.unpack('!%dB' % nb_bytes, w)
b = 0
for c in range(nb_bytes):
b |= int(_bytes[c]) << (nb_bytes - c - 1) * 8
# get rid of high order bits
b &= (1 << (nb_bytes * 8 - bn)) - 1
# remove low order bits
b = b >> (nb_bytes * 8 - self.size - bn)
if self.rev:
b = self.reverse(b)
bn += self.size
s = s[bn // 8:]
bn = bn % 8
b = self.m2i(pkt, b)
if bn:
return (s, bn), b
else:
return s, b
def randval(self):
return RandNum(0, 2**self.size - 1)
def i2len(self, pkt, x):
return float(self.size) / 8
class BitFieldLenField(BitField):
__slots__ = ["length_of", "count_of", "adjust"]
def __init__(self, name, default, size, length_of=None, count_of=None, adjust=lambda pkt, x: x): # noqa: E501
BitField.__init__(self, name, default, size)
self.length_of = length_of
self.count_of = count_of
self.adjust = adjust
def i2m(self, pkt, x):
return (FieldLenField.i2m.__func__ if six.PY2 else FieldLenField.i2m)(self, pkt, x) # noqa: E501
class XBitField(BitField):
def i2repr(self, pkt, x):
return lhex(self.i2h(pkt, x))
class _EnumField(Field):
def __init__(self, name, default, enum, fmt="H"):
""" Initializes enum fields.
@param name: name of this field
@param default: default value of this field
@param enum: either a dict or a tuple of two callables. Dict keys are # noqa: E501
the internal values, while the dict values are the
user-friendly representations. If the tuple is provided, # noqa: E501
the first callable receives the internal value as
parameter and returns the user-friendly representation
and the second callable does the converse. The first
callable may return None to default to a literal string
(repr()) representation.
@param fmt: struct.pack format used to parse and serialize the
internal value from and to machine representation.
"""
if isinstance(enum, ObservableDict):
enum.observe(self)
if isinstance(enum, tuple):
self.i2s_cb = enum[0]
self.s2i_cb = enum[1]
self.i2s = None
self.s2i = None
else:
i2s = self.i2s = {}
s2i = self.s2i = {}
self.i2s_cb = None
self.s2i_cb = None
if isinstance(enum, list):
keys = list(range(len(enum)))
elif isinstance(enum, DADict):
keys = enum.keys()
else:
keys = list(enum)
if any(isinstance(x, str) for x in keys):
i2s, s2i = s2i, i2s
for k in keys:
i2s[k] = enum[k]
s2i[enum[k]] = k
Field.__init__(self, name, default, fmt)
def any2i_one(self, pkt, x):
if isinstance(x, str):
try:
x = self.s2i[x]
except TypeError:
x = self.s2i_cb(x)
return x
def i2repr_one(self, pkt, x):
if self not in conf.noenum and not isinstance(x, VolatileValue):
try:
return self.i2s[x]
except KeyError:
pass
except TypeError:
ret = self.i2s_cb(x)
if ret is not None:
return ret
return repr(x)
def any2i(self, pkt, x):
if isinstance(x, list):
return [self.any2i_one(pkt, z) for z in x]
else:
return self.any2i_one(pkt, x)
def i2repr(self, pkt, x):
if isinstance(x, list):
return [self.i2repr_one(pkt, z) for z in x]
else:
return self.i2repr_one(pkt, x)
def notify_set(self, enum, key, value):
log_runtime.debug("At %s: Change to %s at 0x%x" % (self, value, key))
self.i2s[key] = value
self.s2i[value] = key
def notify_del(self, enum, key):
log_runtime.debug("At %s: Delete value at 0x%x" % (self, key))
value = self.i2s[key]
del self.i2s[key]
del self.s2i[value]
class EnumField(_EnumField):
__slots__ = ["i2s", "s2i", "s2i_cb", "i2s_cb"]
class CharEnumField(EnumField):
def __init__(self, name, default, enum, fmt="1s"):
EnumField.__init__(self, name, default, enum, fmt)
if self.i2s is not None:
k = list(self.i2s)
if k and len(k[0]) != 1:
self.i2s, self.s2i = self.s2i, self.i2s
def any2i_one(self, pkt, x):
if len(x) != 1:
if self.s2i is None:
x = self.s2i_cb(x)
else:
x = self.s2i[x]
return x
class BitEnumField(BitField, _EnumField):
__slots__ = EnumField.__slots__
def __init__(self, name, default, size, enum):
_EnumField.__init__(self, name, default, enum)
self.rev = size < 0
self.size = abs(size)
def any2i(self, pkt, x):
return _EnumField.any2i(self, pkt, x)
def i2repr(self, pkt, x):
return _EnumField.i2repr(self, pkt, x)
class ShortEnumField(EnumField):
__slots__ = EnumField.__slots__
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "H")
class LEShortEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "<H")
class ByteEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "B")
class XByteEnumField(ByteEnumField):
def i2repr_one(self, pkt, x):
if self not in conf.noenum and not isinstance(x, VolatileValue):
try:
return self.i2s[x]
except KeyError:
pass
except TypeError:
ret = self.i2s_cb(x)
if ret is not None:
return ret
return lhex(x)
class IntEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "I")
class SignedIntEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "i")
def randval(self):
return RandSInt()
class LEIntEnumField(EnumField):
def __init__(self, name, default, enum):
EnumField.__init__(self, name, default, enum, "<I")
class XShortEnumField(ShortEnumField):
def i2repr_one(self, pkt, x):
if self not in conf.noenum and not isinstance(x, VolatileValue):
try:
return self.i2s[x]
except KeyError:
pass
except TypeError:
ret = self.i2s_cb(x)
if ret is not None:
return ret
return lhex(x)
class _MultiEnumField(_EnumField):
def __init__(self, name, default, enum, depends_on, fmt="H"):
self.depends_on = depends_on
self.i2s_multi = enum
self.s2i_multi = {}
self.s2i_all = {}
for m in enum:
self.s2i_multi[m] = s2i = {}
for k, v in six.iteritems(enum[m]):
s2i[v] = k
self.s2i_all[v] = k
Field.__init__(self, name, default, fmt)
def any2i_one(self, pkt, x):
if isinstance(x, str):
v = self.depends_on(pkt)
if v in self.s2i_multi:
s2i = self.s2i_multi[v]
if x in s2i:
return s2i[x]
return self.s2i_all[x]
return x
def i2repr_one(self, pkt, x):
v = self.depends_on(pkt)
if v in self.i2s_multi:
return self.i2s_multi[v].get(x, x)
return x
class MultiEnumField(_MultiEnumField, EnumField):
__slots__ = ["depends_on", "i2s_multi", "s2i_multi", "s2i_all"]
class BitMultiEnumField(BitField, _MultiEnumField):
__slots__ = EnumField.__slots__ + MultiEnumField.__slots__
def __init__(self, name, default, size, enum, depends_on):
_MultiEnumField.__init__(self, name, default, enum, depends_on)
self.rev = size < 0
self.size = abs(size)
def any2i(self, pkt, x):
return _MultiEnumField.any2i(self, pkt, x)
def i2repr(self, pkt, x):
return _MultiEnumField.i2repr(self, pkt, x)
class ByteEnumKeysField(ByteEnumField):
"""ByteEnumField that picks valid values when fuzzed. """
def randval(self):
return RandEnumKeys(self.i2s)
class ShortEnumKeysField(ShortEnumField):
"""ShortEnumField that picks valid values when fuzzed. """
def randval(self):
return RandEnumKeys(self.i2s)
class IntEnumKeysField(IntEnumField):
"""IntEnumField that picks valid values when fuzzed. """
def randval(self):
return RandEnumKeys(self.i2s)
# Little endian fixed length field
class LEFieldLenField(FieldLenField):
def __init__(self, name, default, length_of=None, fmt="<H", count_of=None, adjust=lambda pkt, x: x, fld=None): # noqa: E501
FieldLenField.__init__(self, name, default, length_of=length_of, fmt=fmt, count_of=count_of, fld=fld, adjust=adjust) # noqa: E501
class FlagValue(object):
__slots__ = ["value", "names", "multi"]
def _fixvalue(self, value):
if isinstance(value, six.string_types):
value = value.split('+') if self.multi else list(value)
if isinstance(value, list):
y = 0
for i in value:
y |= 1 << self.names.index(i)
value = y
return None if value is None else int(value)
def __init__(self, value, names):
self.multi = isinstance(names, list)
self.names = names
self.value = self._fixvalue(value)
def __hash__(self):
return hash(self.value)
def __int__(self):
return self.value
def __eq__(self, other):
return self.value == self._fixvalue(other)
def __lt__(self, other):
return self.value < self._fixvalue(other)
def __le__(self, other):
return self.value <= self._fixvalue(other)
def __gt__(self, other):
return self.value > self._fixvalue(other)
def __ge__(self, other):
return self.value >= self._fixvalue(other)
def __ne__(self, other):
return self.value != self._fixvalue(other)
def __and__(self, other):
return self.__class__(self.value & self._fixvalue(other), self.names)
__rand__ = __and__
def __or__(self, other):
return self.__class__(self.value | self._fixvalue(other), self.names)
__ror__ = __or__
def __lshift__(self, other):
return self.value << self._fixvalue(other)
def __rshift__(self, other):
return self.value >> self._fixvalue(other)
def __nonzero__(self):
return bool(self.value)
__bool__ = __nonzero__
def flagrepr(self):
warning("obj.flagrepr() is obsolete. Use str(obj) instead.")
return str(self)
def __str__(self):
i = 0
r = []
x = int(self)
while x:
if x & 1:
r.append(self.names[i])
i += 1
x >>= 1
return ("+" if self.multi else "").join(r)
def __repr__(self):
return "<Flag %d (%s)>" % (self, self)
def __deepcopy__(self, memo):
return self.__class__(int(self), self.names)
def __getattr__(self, attr):
if attr in self.__slots__:
return super(FlagValue, self).__getattr__(attr)
try:
if self.multi:
return bool((2 ** self.names.index(attr)) & int(self))
return all(bool((2 ** self.names.index(flag)) & int(self))
for flag in attr)
except ValueError:
if '_' in attr:
try:
return self.__getattr__(attr.replace('_', '-'))
except AttributeError:
pass
return super(FlagValue, self).__getattr__(attr)
def __setattr__(self, attr, value):
if attr == "value" and not isinstance(value, six.integer_types):
raise ValueError(value)
if attr in self.__slots__:
return super(FlagValue, self).__setattr__(attr, value)
if attr in self.names:
if value:
self.value |= (2 ** self.names.index(attr))
else:
self.value &= ~(2 ** self.names.index(attr))
else:
return super(FlagValue, self).__setattr__(attr, value)
def copy(self):
return self.__class__(self.value, self.names)
class FlagsField(BitField):
""" Handle Flag type field
Make sure all your flags have a label
Example:
>>> from scapy.packet import Packet
>>> class FlagsTest(Packet):
fields_desc = [FlagsField("flags", 0, 8, ["f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7"])] # noqa: E501
>>> FlagsTest(flags=9).show2()
###[ FlagsTest ]###
flags = f0+f3
>>> FlagsTest(flags=0).show2().strip()
###[ FlagsTest ]###
flags =
:param name: field's name
:param default: default value for the field
:param size: number of bits in the field
:param names: (list or dict) label for each flag, Least Significant Bit tag's name is written first # noqa: E501
"""
ismutable = True
__slots__ = ["multi", "names"]
def __init__(self, name, default, size, names):
self.multi = isinstance(names, list)
self.names = names
BitField.__init__(self, name, default, size)
def _fixup_val(self, x):
"""Returns a FlagValue instance when needed. Internal method, to be
used in *2i() and i2*() methods.
"""
if isinstance(x, (list, tuple)):
return type(x)(
v if v is None or isinstance(v, FlagValue)
else FlagValue(v, self.names)
for v in x
)
return x if x is None or isinstance(x, FlagValue) else FlagValue(x, self.names) # noqa: E501
def any2i(self, pkt, x):
return self._fixup_val(super(FlagsField, self).any2i(pkt, x))
def m2i(self, pkt, x):
return self._fixup_val(super(FlagsField, self).m2i(pkt, x))
def i2h(self, pkt, x):
if isinstance(x, VolatileValue):
return super(FlagsField, self).i2h(pkt, x)
return self._fixup_val(super(FlagsField, self).i2h(pkt, x))
def i2repr(self, pkt, x):
if isinstance(x, (list, tuple)):
return repr(type(x)(
None if v is None else str(self._fixup_val(v)) for v in x
))
return None if x is None else str(self._fixup_val(x))
MultiFlagsEntry = collections.namedtuple('MultiFlagEntry', ['short', 'long'])
class MultiFlagsField(BitField):
__slots__ = FlagsField.__slots__ + ["depends_on"]
def __init__(self, name, default, size, names, depends_on):
self.names = names
self.depends_on = depends_on
super(MultiFlagsField, self).__init__(name, default, size)
def any2i(self, pkt, x):
assert isinstance(x, six.integer_types + (set,)), 'set expected'
if pkt is not None:
if isinstance(x, six.integer_types):
x = self.m2i(pkt, x)
else:
v = self.depends_on(pkt)
if v is not None:
assert v in self.names, 'invalid dependency'
these_names = self.names[v]
s = set()
for i in x:
for val in six.itervalues(these_names):
if val.short == i:
s.add(i)
break
else:
assert False, 'Unknown flag "{}" with this dependency'.format(i) # noqa: E501
continue
x = s
return x
def i2m(self, pkt, x):
v = self.depends_on(pkt)
these_names = self.names.get(v, {})
r = 0
for flag_set in x:
for i, val in six.iteritems(these_names):
if val.short == flag_set:
r |= 1 << i
break
else:
r |= 1 << int(flag_set[len('bit '):])
return r
def m2i(self, pkt, x):
v = self.depends_on(pkt)
these_names = self.names.get(v, {})
r = set()
i = 0
while x:
if x & 1:
if i in these_names:
r.add(these_names[i].short)
else:
r.add('bit {}'.format(i))
x >>= 1
i += 1
return r
def i2repr(self, pkt, x):
v = self.depends_on(pkt)
these_names = self.names.get(v, {})
r = set()
for flag_set in x:
for i in six.itervalues(these_names):
if i.short == flag_set:
r.add("{} ({})".format(i.long, i.short))
break
else:
r.add(flag_set)
return repr(r)
class FixedPointField(BitField):
__slots__ = ['frac_bits']
def __init__(self, name, default, size, frac_bits=16):
self.frac_bits = frac_bits
BitField.__init__(self, name, default, size)
def any2i(self, pkt, val):
if val is None:
return val
ival = int(val)
fract = int((val - ival) * 2**self.frac_bits)
return (ival << self.frac_bits) | fract
def i2h(self, pkt, val):
int_part = val >> self.frac_bits
frac_part = val & (1 << self.frac_bits) - 1
frac_part /= 2.0**self.frac_bits
return int_part + frac_part
def i2repr(self, pkt, val):
return self.i2h(pkt, val)
# Base class for IPv4 and IPv6 Prefixes inspired by IPField and IP6Field.
# Machine values are encoded in a multiple of wordbytes bytes.
class _IPPrefixFieldBase(Field):
__slots__ = ["wordbytes", "maxbytes", "aton", "ntoa", "length_from"]
def __init__(self, name, default, wordbytes, maxbytes, aton, ntoa, length_from): # noqa: E501
self.wordbytes = wordbytes
self.maxbytes = maxbytes
self.aton = aton
self.ntoa = ntoa
Field.__init__(self, name, default, "%is" % self.maxbytes)
self.length_from = length_from
def _numbytes(self, pfxlen):
wbits = self.wordbytes * 8
return ((pfxlen + (wbits - 1)) // wbits) * self.wordbytes
def h2i(self, pkt, x):
# "fc00:1::1/64" -> ("fc00:1::1", 64)
[pfx, pfxlen] = x.split('/')
self.aton(pfx) # check for validity
return (pfx, int(pfxlen))
def i2h(self, pkt, x):
# ("fc00:1::1", 64) -> "fc00:1::1/64"
(pfx, pfxlen) = x
return "%s/%i" % (pfx, pfxlen)
def i2m(self, pkt, x):
# ("fc00:1::1", 64) -> (b"\xfc\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 64) # noqa: E501
(pfx, pfxlen) = x
s = self.aton(pfx)
return (s[:self._numbytes(pfxlen)], pfxlen)
def m2i(self, pkt, x):
# (b"\xfc\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", 64) -> ("fc00:1::1", 64) # noqa: E501
(s, pfxlen) = x
if len(s) < self.maxbytes:
s = s + (b"\0" * (self.maxbytes - len(s)))
return (self.ntoa(s), pfxlen)
def any2i(self, pkt, x):
if x is None:
return (self.ntoa(b"\0" * self.maxbytes), 1)
return self.h2i(pkt, x)
def i2len(self, pkt, x):
(_, pfxlen) = x
return pfxlen
def addfield(self, pkt, s, val):
(rawpfx, pfxlen) = self.i2m(pkt, val)
fmt = "!%is" % self._numbytes(pfxlen)
return s + struct.pack(fmt, rawpfx)
def getfield(self, pkt, s):
pfxlen = self.length_from(pkt)
numbytes = self._numbytes(pfxlen)
fmt = "!%is" % numbytes
return s[numbytes:], self.m2i(pkt, (struct.unpack(fmt, s[:numbytes])[0], pfxlen)) # noqa: E501
class IPPrefixField(_IPPrefixFieldBase):
def __init__(self, name, default, wordbytes=1, length_from=None):
_IPPrefixFieldBase.__init__(self, name, default, wordbytes, 4, inet_aton, inet_ntoa, length_from) # noqa: E501
class IP6PrefixField(_IPPrefixFieldBase):
def __init__(self, name, default, wordbytes=1, length_from=None):
_IPPrefixFieldBase.__init__(self, name, default, wordbytes, 16, lambda a: inet_pton(socket.AF_INET6, a), lambda n: inet_ntop(socket.AF_INET6, n), length_from) # noqa: E501
class UTCTimeField(IntField):
__slots__ = ["epoch", "delta", "strf", "use_nano"]
def __init__(self, name, default, epoch=None, use_nano=False, strf="%a, %d %b %Y %H:%M:%S +0000"): # noqa: E501
IntField.__init__(self, name, default)
mk_epoch = EPOCH if epoch is None else time.mktime(epoch)
self.epoch = mk_epoch
self.delta = mk_epoch - EPOCH
self.strf = strf
self.use_nano = use_nano
def i2repr(self, pkt, x):
if x is None:
x = 0
elif self.use_nano:
x = x / 1e9
x = int(x) + self.delta
t = time.strftime(self.strf, time.gmtime(x))
return "%s (%d)" % (t, x)
def i2m(self, pkt, x):
return int(x) if x is not None else 0
class SecondsIntField(IntField):
__slots__ = ["use_msec"]
def __init__(self, name, default, use_msec=False):
IntField.__init__(self, name, default)
self.use_msec = use_msec
def i2repr(self, pkt, x):
if x is None:
x = 0
elif self.use_msec:
x = x / 1e3
return "%s sec" % x
| 1 | 13,861 | A similar field is already define in `scapy/layers/bluetooth.py`. Can you merge both definitions ? | secdev-scapy | py |
@@ -204,6 +204,19 @@ class CppGenerator : public BaseGenerator {
if (num_includes) code_ += "";
}
+ void GenExtraIncludes() {
+ if(parser_.opts.cpp_includes.empty()) {
+ return;
+ }
+ // non-const copy needed for std::strtok
+ std::string data = parser_.opts.cpp_includes;
+ for(char* pch = std::strtok(&data[0], ",");
+ pch != nullptr; pch = std::strtok(nullptr, ",")) {
+ code_ += "#include <" + std::string(pch) + ">";
+ }
+ code_ += "";
+ }
+
std::string EscapeKeyword(const std::string &name) const {
return keywords_.find(name) == keywords_.end() ? name : name + "_";
} | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// independent from idl_parser, since this code is not needed for most clients
#include "flatbuffers/code_generators.h"
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include <unordered_set>
namespace flatbuffers {
// Pedantic warning free version of toupper().
inline char ToUpper(char c) { return static_cast<char>(::toupper(c)); }
// Make numerical literal with type-suffix.
// This function is only needed for C++! Other languages do not need it.
static inline std::string NumToStringCpp(std::string val, BaseType type) {
// Avoid issues with -2147483648, -9223372036854775808.
switch (type) {
case BASE_TYPE_INT:
return (val != "-2147483648") ? val : ("(-2147483647 - 1)");
case BASE_TYPE_ULONG: return (val == "0") ? val : (val + "ULL");
case BASE_TYPE_LONG:
if (val == "-9223372036854775808")
return "(-9223372036854775807LL - 1LL)";
else
return (val == "0") ? val : (val + "LL");
default: return val;
}
}
static std::string GeneratedFileName(const std::string &path,
const std::string &file_name) {
return path + file_name + "_generated.h";
}
namespace cpp {
class CppGenerator : public BaseGenerator {
public:
CppGenerator(const Parser &parser, const std::string &path,
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "::"),
cur_name_space_(nullptr),
float_const_gen_("std::numeric_limits<double>::",
"std::numeric_limits<float>::", "quiet_NaN()",
"infinity()") {
static const char *const keywords[] = {
"alignas",
"alignof",
"and",
"and_eq",
"asm",
"atomic_cancel",
"atomic_commit",
"atomic_noexcept",
"auto",
"bitand",
"bitor",
"bool",
"break",
"case",
"catch",
"char",
"char16_t",
"char32_t",
"class",
"compl",
"concept",
"const",
"constexpr",
"const_cast",
"continue",
"co_await",
"co_return",
"co_yield",
"decltype",
"default",
"delete",
"do",
"double",
"dynamic_cast",
"else",
"enum",
"explicit",
"export",
"extern",
"false",
"float",
"for",
"friend",
"goto",
"if",
"import",
"inline",
"int",
"long",
"module",
"mutable",
"namespace",
"new",
"noexcept",
"not",
"not_eq",
"nullptr",
"operator",
"or",
"or_eq",
"private",
"protected",
"public",
"register",
"reinterpret_cast",
"requires",
"return",
"short",
"signed",
"sizeof",
"static",
"static_assert",
"static_cast",
"struct",
"switch",
"synchronized",
"template",
"this",
"thread_local",
"throw",
"true",
"try",
"typedef",
"typeid",
"typename",
"union",
"unsigned",
"using",
"virtual",
"void",
"volatile",
"wchar_t",
"while",
"xor",
"xor_eq",
nullptr,
};
for (auto kw = keywords; *kw; kw++) keywords_.insert(*kw);
}
std::string GenIncludeGuard() const {
// Generate include guard.
std::string guard = file_name_;
// Remove any non-alpha-numeric characters that may appear in a filename.
struct IsAlnum {
bool operator()(char c) const { return !is_alnum(c); }
};
guard.erase(std::remove_if(guard.begin(), guard.end(), IsAlnum()),
guard.end());
guard = "FLATBUFFERS_GENERATED_" + guard;
guard += "_";
// For further uniqueness, also add the namespace.
auto name_space = parser_.current_namespace_;
for (auto it = name_space->components.begin();
it != name_space->components.end(); ++it) {
guard += *it + "_";
}
guard += "H_";
std::transform(guard.begin(), guard.end(), guard.begin(), ToUpper);
return guard;
}
void GenIncludeDependencies() {
int num_includes = 0;
for (auto it = parser_.native_included_files_.begin();
it != parser_.native_included_files_.end(); ++it) {
code_ += "#include \"" + *it + "\"";
num_includes++;
}
for (auto it = parser_.included_files_.begin();
it != parser_.included_files_.end(); ++it) {
if (it->second.empty()) continue;
auto noext = flatbuffers::StripExtension(it->second);
auto basename = flatbuffers::StripPath(noext);
code_ += "#include \"" + parser_.opts.include_prefix +
(parser_.opts.keep_include_path ? noext : basename) +
"_generated.h\"";
num_includes++;
}
if (num_includes) code_ += "";
}
std::string EscapeKeyword(const std::string &name) const {
return keywords_.find(name) == keywords_.end() ? name : name + "_";
}
std::string Name(const Definition &def) const {
return EscapeKeyword(def.name);
}
std::string Name(const EnumVal &ev) const { return EscapeKeyword(ev.name); }
// Iterate through all definitions we haven't generate code for (enums,
// structs, and tables) and output them to a single file.
bool generate() {
code_.Clear();
code_ += "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n";
const auto include_guard = GenIncludeGuard();
code_ += "#ifndef " + include_guard;
code_ += "#define " + include_guard;
code_ += "";
if (parser_.opts.gen_nullable) {
code_ += "#pragma clang system_header\n\n";
}
code_ += "#include \"flatbuffers/flatbuffers.h\"";
if (parser_.uses_flexbuffers_) {
code_ += "#include \"flatbuffers/flexbuffers.h\"";
}
code_ += "";
if (parser_.opts.include_dependence_headers) { GenIncludeDependencies(); }
FLATBUFFERS_ASSERT(!cur_name_space_);
// Generate forward declarations for all structs/tables, since they may
// have circular references.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
code_ += "struct " + Name(struct_def) + ";";
if (parser_.opts.generate_object_based_api) {
auto nativeName =
NativeName(Name(struct_def), &struct_def, parser_.opts);
if (!struct_def.fixed) { code_ += "struct " + nativeName + ";"; }
}
code_ += "";
}
}
// Generate forward declarations for all equal operators
if (parser_.opts.generate_object_based_api && parser_.opts.gen_compare) {
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
auto nativeName =
NativeName(Name(struct_def), &struct_def, parser_.opts);
code_ += "bool operator==(const " + nativeName + " &lhs, const " +
nativeName + " &rhs);";
code_ += "bool operator!=(const " + nativeName + " &lhs, const " +
nativeName + " &rhs);";
}
}
code_ += "";
}
// Generate preablmle code for mini reflection.
if (parser_.opts.mini_reflect != IDLOptions::kNone) {
// To break cyclic dependencies, first pre-declare all tables/structs.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenMiniReflectPre(&struct_def);
}
}
}
// Generate code for all the enum declarations.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (!enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenEnum(enum_def);
}
}
// Generate code for all structs, then all tables.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenStruct(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTable(struct_def);
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.fixed && !struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenTablePost(struct_def);
}
}
// Generate code for union verifiers.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (enum_def.is_union && !enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenUnionPost(enum_def);
}
}
// Generate code for mini reflection.
if (parser_.opts.mini_reflect != IDLOptions::kNone) {
// Then the unions/enums that may refer to them.
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
const auto &enum_def = **it;
if (!enum_def.generated) {
SetNameSpace(enum_def.defined_namespace);
GenMiniReflect(nullptr, &enum_def);
}
}
// Then the full tables/structs.
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
const auto &struct_def = **it;
if (!struct_def.generated) {
SetNameSpace(struct_def.defined_namespace);
GenMiniReflect(&struct_def, nullptr);
}
}
}
// Generate convenient global helper functions:
if (parser_.root_struct_def_) {
auto &struct_def = *parser_.root_struct_def_;
SetNameSpace(struct_def.defined_namespace);
auto name = Name(struct_def);
auto qualified_name = cur_name_space_->GetFullyQualifiedName(name);
auto cpp_name = TranslateNameSpace(qualified_name);
code_.SetValue("STRUCT_NAME", name);
code_.SetValue("CPP_NAME", cpp_name);
code_.SetValue("NULLABLE_EXT", NullableExtension());
// The root datatype accessor:
code_ += "inline \\";
code_ +=
"const {{CPP_NAME}} *{{NULLABLE_EXT}}Get{{STRUCT_NAME}}(const void "
"*buf) {";
code_ += " return flatbuffers::GetRoot<{{CPP_NAME}}>(buf);";
code_ += "}";
code_ += "";
code_ += "inline \\";
code_ +=
"const {{CPP_NAME}} "
"*{{NULLABLE_EXT}}GetSizePrefixed{{STRUCT_NAME}}(const void "
"*buf) {";
code_ += " return flatbuffers::GetSizePrefixedRoot<{{CPP_NAME}}>(buf);";
code_ += "}";
code_ += "";
if (parser_.opts.mutable_buffer) {
code_ += "inline \\";
code_ += "{{STRUCT_NAME}} *GetMutable{{STRUCT_NAME}}(void *buf) {";
code_ += " return flatbuffers::GetMutableRoot<{{STRUCT_NAME}}>(buf);";
code_ += "}";
code_ += "";
}
if (parser_.file_identifier_.length()) {
// Return the identifier
code_ += "inline const char *{{STRUCT_NAME}}Identifier() {";
code_ += " return \"" + parser_.file_identifier_ + "\";";
code_ += "}";
code_ += "";
// Check if a buffer has the identifier.
code_ += "inline \\";
code_ += "bool {{STRUCT_NAME}}BufferHasIdentifier(const void *buf) {";
code_ += " return flatbuffers::BufferHasIdentifier(";
code_ += " buf, {{STRUCT_NAME}}Identifier());";
code_ += "}";
code_ += "";
}
// The root verifier.
if (parser_.file_identifier_.length()) {
code_.SetValue("ID", name + "Identifier()");
} else {
code_.SetValue("ID", "nullptr");
}
code_ += "inline bool Verify{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::Verifier &verifier) {";
code_ += " return verifier.VerifyBuffer<{{CPP_NAME}}>({{ID}});";
code_ += "}";
code_ += "";
code_ += "inline bool VerifySizePrefixed{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::Verifier &verifier) {";
code_ +=
" return verifier.VerifySizePrefixedBuffer<{{CPP_NAME}}>({{ID}});";
code_ += "}";
code_ += "";
if (parser_.file_extension_.length()) {
// Return the extension
code_ += "inline const char *{{STRUCT_NAME}}Extension() {";
code_ += " return \"" + parser_.file_extension_ + "\";";
code_ += "}";
code_ += "";
}
// Finish a buffer with a given root object:
code_ += "inline void Finish{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::FlatBufferBuilder &fbb,";
code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
if (parser_.file_identifier_.length())
code_ += " fbb.Finish(root, {{STRUCT_NAME}}Identifier());";
else
code_ += " fbb.Finish(root);";
code_ += "}";
code_ += "";
code_ += "inline void FinishSizePrefixed{{STRUCT_NAME}}Buffer(";
code_ += " flatbuffers::FlatBufferBuilder &fbb,";
code_ += " flatbuffers::Offset<{{CPP_NAME}}> root) {";
if (parser_.file_identifier_.length())
code_ += " fbb.FinishSizePrefixed(root, {{STRUCT_NAME}}Identifier());";
else
code_ += " fbb.FinishSizePrefixed(root);";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// A convenient root unpack function.
auto native_name =
NativeName(WrapInNameSpace(struct_def), &struct_def, parser_.opts);
code_.SetValue("UNPACK_RETURN",
GenTypeNativePtr(native_name, nullptr, false));
code_.SetValue("UNPACK_TYPE",
GenTypeNativePtr(native_name, nullptr, true));
code_ += "inline {{UNPACK_RETURN}} UnPack{{STRUCT_NAME}}(";
code_ += " const void *buf,";
code_ += " const flatbuffers::resolver_function_t *res = nullptr) {";
code_ += " return {{UNPACK_TYPE}}\\";
code_ += "(Get{{STRUCT_NAME}}(buf)->UnPack(res));";
code_ += "}";
code_ += "";
}
}
if (cur_name_space_) SetNameSpace(nullptr);
// Close the include guard.
code_ += "#endif // " + include_guard;
const auto file_path = GeneratedFileName(path_, file_name_);
const auto final_code = code_.ToString();
return SaveFile(file_path.c_str(), final_code, false);
}
private:
CodeWriter code_;
std::unordered_set<std::string> keywords_;
// This tracks the current namespace so we can insert namespace declarations.
const Namespace *cur_name_space_;
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
// Translates a qualified name in flatbuffer text format to the same name in
// the equivalent C++ namespace.
static std::string TranslateNameSpace(const std::string &qualified_name) {
std::string cpp_qualified_name = qualified_name;
size_t start_pos = 0;
while ((start_pos = cpp_qualified_name.find(".", start_pos)) !=
std::string::npos) {
cpp_qualified_name.replace(start_pos, 1, "::");
}
return cpp_qualified_name;
}
void GenComment(const std::vector<std::string> &dc, const char *prefix = "") {
std::string text;
::flatbuffers::GenComment(dc, &text, nullptr, prefix);
code_ += text + "\\";
}
// Return a C++ type from the table in idl.h
std::string GenTypeBasic(const Type &type, bool user_facing_type) const {
// clang-format off
static const char *const ctypename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \
RTYPE) \
#CTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
// clang-format on
if (user_facing_type) {
if (type.enum_def) return WrapInNameSpace(*type.enum_def);
if (type.base_type == BASE_TYPE_BOOL) return "bool";
}
return ctypename[type.base_type];
}
// Return a C++ pointer type, specialized to the actual struct/table types,
// and vector element types.
std::string GenTypePointer(const Type &type) const {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return "flatbuffers::String";
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeWire(type.VectorType(), "", false);
return "flatbuffers::Vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
return WrapInNameSpace(*type.struct_def);
}
case BASE_TYPE_UNION:
// fall through
default: { return "void"; }
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// building a flatbuffer.
std::string GenTypeWire(const Type &type, const char *postfix,
bool user_facing_type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + postfix;
} else if (IsStruct(type)) {
return "const " + GenTypePointer(type) + " *";
} else {
return "flatbuffers::Offset<" + GenTypePointer(type) + ">" + postfix;
}
}
// Return a C++ type for any type (scalar/pointer) that reflects its
// serialized size.
std::string GenTypeSize(const Type &type) const {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, false);
} else if (IsStruct(type)) {
return GenTypePointer(type);
} else {
return "flatbuffers::uoffset_t";
}
}
std::string NullableExtension() {
return parser_.opts.gen_nullable ? " _Nullable " : "";
}
static std::string NativeName(const std::string &name, const StructDef *sd,
const IDLOptions &opts) {
return sd && !sd->fixed ? opts.object_prefix + name + opts.object_suffix
: name;
}
const std::string &PtrType(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_ptr_type") : nullptr;
return attr ? attr->constant : parser_.opts.cpp_object_api_pointer_type;
}
const std::string NativeString(const FieldDef *field) {
auto attr = field ? field->attributes.Lookup("cpp_str_type") : nullptr;
auto &ret = attr ? attr->constant : parser_.opts.cpp_object_api_string_type;
if (ret.empty()) { return "std::string"; }
return ret;
}
bool FlexibleStringConstructor(const FieldDef *field) {
auto attr = field
? (field->attributes.Lookup("cpp_str_flex_ctor") != nullptr)
: false;
auto ret =
attr ? attr : parser_.opts.cpp_object_api_string_flexible_constructor;
return ret && NativeString(field) !=
"std::string"; // Only for custom string types.
}
std::string GenTypeNativePtr(const std::string &type, const FieldDef *field,
bool is_constructor) {
auto &ptr_type = PtrType(field);
if (ptr_type != "naked") {
return (ptr_type != "default_ptr_type"
? ptr_type
: parser_.opts.cpp_object_api_pointer_type) +
"<" + type + ">";
} else if (is_constructor) {
return "";
} else {
return type + " *";
}
}
std::string GenPtrGet(const FieldDef &field) {
auto cpp_ptr_type_get = field.attributes.Lookup("cpp_ptr_type_get");
if (cpp_ptr_type_get) return cpp_ptr_type_get->constant;
auto &ptr_type = PtrType(&field);
return ptr_type == "naked" ? "" : ".get()";
}
std::string GenTypeNative(const Type &type, bool invector,
const FieldDef &field) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
return NativeString(&field);
}
case BASE_TYPE_VECTOR: {
const auto type_name = GenTypeNative(type.VectorType(), true, field);
if (type.struct_def &&
type.struct_def->attributes.Lookup("native_custom_alloc")) {
auto native_custom_alloc =
type.struct_def->attributes.Lookup("native_custom_alloc");
return "std::vector<" + type_name + "," +
native_custom_alloc->constant + "<" + type_name + ">>";
} else
return "std::vector<" + type_name + ">";
}
case BASE_TYPE_STRUCT: {
auto type_name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) { type_name = native_type->constant; }
if (invector || field.native_inline) {
return type_name;
} else {
return GenTypeNativePtr(type_name, &field, false);
}
} else {
return GenTypeNativePtr(
NativeName(type_name, type.struct_def, parser_.opts), &field,
false);
}
}
case BASE_TYPE_UNION: {
return type.enum_def->name + "Union";
}
default: { return GenTypeBasic(type, true); }
}
}
// Return a C++ type for any type (scalar/pointer) specifically for
// using a flatbuffer.
std::string GenTypeGet(const Type &type, const char *afterbasic,
const char *beforeptr, const char *afterptr,
bool user_facing_type) {
if (IsScalar(type.base_type)) {
return GenTypeBasic(type, user_facing_type) + afterbasic;
} else {
return beforeptr + GenTypePointer(type) + afterptr;
}
}
std::string GenEnumDecl(const EnumDef &enum_def) const {
const IDLOptions &opts = parser_.opts;
return (opts.scoped_enums ? "enum class " : "enum ") + Name(enum_def);
}
std::string GenEnumValDecl(const EnumDef &enum_def,
const std::string &enum_val) const {
const IDLOptions &opts = parser_.opts;
return opts.prefixed_enums ? Name(enum_def) + "_" + enum_val : enum_val;
}
std::string GetEnumValUse(const EnumDef &enum_def,
const EnumVal &enum_val) const {
const IDLOptions &opts = parser_.opts;
if (opts.scoped_enums) {
return Name(enum_def) + "::" + Name(enum_val);
} else if (opts.prefixed_enums) {
return Name(enum_def) + "_" + Name(enum_val);
} else {
return Name(enum_val);
}
}
std::string StripUnionType(const std::string &name) {
return name.substr(0, name.size() - strlen(UnionTypeFieldSuffix()));
}
std::string GetUnionElement(const EnumVal &ev, bool wrap, bool actual_type,
bool native_type = false) {
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
auto name = actual_type ? ev.union_type.struct_def->name : Name(ev);
return wrap ? WrapInNameSpace(ev.union_type.struct_def->defined_namespace,
name)
: name;
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
return actual_type ? (native_type ? "std::string" : "flatbuffers::String")
: Name(ev);
} else {
FLATBUFFERS_ASSERT(false);
return Name(ev);
}
}
std::string UnionVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + Name(enum_def) +
"(flatbuffers::Verifier &verifier, const void *obj, " +
Name(enum_def) + " type)";
}
std::string UnionVectorVerifySignature(const EnumDef &enum_def) {
return "bool Verify" + Name(enum_def) + "Vector" +
"(flatbuffers::Verifier &verifier, " +
"const flatbuffers::Vector<flatbuffers::Offset<void>> *values, " +
"const flatbuffers::Vector<uint8_t> *types)";
}
std::string UnionUnPackSignature(const EnumDef &enum_def, bool inclass) {
return (inclass ? "static " : "") + std::string("void *") +
(inclass ? "" : Name(enum_def) + "Union::") +
"UnPack(const void *obj, " + Name(enum_def) +
" type, const flatbuffers::resolver_function_t *resolver)";
}
std::string UnionPackSignature(const EnumDef &enum_def, bool inclass) {
return "flatbuffers::Offset<void> " +
(inclass ? "" : Name(enum_def) + "Union::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ") const";
}
std::string TableCreateSignature(const StructDef &struct_def, bool predecl,
const IDLOptions &opts) {
return "flatbuffers::Offset<" + Name(struct_def) + "> Create" +
Name(struct_def) + "(flatbuffers::FlatBufferBuilder &_fbb, const " +
NativeName(Name(struct_def), &struct_def, opts) +
" *_o, const flatbuffers::rehasher_function_t *_rehasher" +
(predecl ? " = nullptr" : "") + ")";
}
std::string TablePackSignature(const StructDef &struct_def, bool inclass,
const IDLOptions &opts) {
return std::string(inclass ? "static " : "") + "flatbuffers::Offset<" +
Name(struct_def) + "> " + (inclass ? "" : Name(struct_def) + "::") +
"Pack(flatbuffers::FlatBufferBuilder &_fbb, " + "const " +
NativeName(Name(struct_def), &struct_def, opts) + "* _o, " +
"const flatbuffers::rehasher_function_t *_rehasher" +
(inclass ? " = nullptr" : "") + ")";
}
std::string TableUnPackSignature(const StructDef &struct_def, bool inclass,
const IDLOptions &opts) {
return NativeName(Name(struct_def), &struct_def, opts) + " *" +
(inclass ? "" : Name(struct_def) + "::") +
"UnPack(const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
std::string TableUnPackToSignature(const StructDef &struct_def, bool inclass,
const IDLOptions &opts) {
return "void " + (inclass ? "" : Name(struct_def) + "::") + "UnPackTo(" +
NativeName(Name(struct_def), &struct_def, opts) + " *" +
"_o, const flatbuffers::resolver_function_t *_resolver" +
(inclass ? " = nullptr" : "") + ") const";
}
void GenMiniReflectPre(const StructDef *struct_def) {
code_.SetValue("NAME", struct_def->name);
code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable();";
code_ += "";
}
void GenMiniReflect(const StructDef *struct_def, const EnumDef *enum_def) {
code_.SetValue("NAME", struct_def ? struct_def->name : enum_def->name);
code_.SetValue("SEQ_TYPE",
struct_def ? (struct_def->fixed ? "ST_STRUCT" : "ST_TABLE")
: (enum_def->is_union ? "ST_UNION" : "ST_ENUM"));
auto num_fields =
struct_def ? struct_def->fields.vec.size() : enum_def->size();
code_.SetValue("NUM_FIELDS", NumToString(num_fields));
std::vector<std::string> names;
std::vector<Type> types;
if (struct_def) {
for (auto it = struct_def->fields.vec.begin();
it != struct_def->fields.vec.end(); ++it) {
const auto &field = **it;
names.push_back(Name(field));
types.push_back(field.value.type);
}
} else {
for (auto it = enum_def->Vals().begin(); it != enum_def->Vals().end();
++it) {
const auto &ev = **it;
names.push_back(Name(ev));
types.push_back(enum_def->is_union ? ev.union_type
: Type(enum_def->underlying_type));
}
}
std::string ts;
std::vector<std::string> type_refs;
for (auto it = types.begin(); it != types.end(); ++it) {
auto &type = *it;
if (!ts.empty()) ts += ",\n ";
auto is_vector = type.base_type == BASE_TYPE_VECTOR;
auto bt = is_vector ? type.element : type.base_type;
auto et = IsScalar(bt) || bt == BASE_TYPE_STRING
? bt - BASE_TYPE_UTYPE + ET_UTYPE
: ET_SEQUENCE;
int ref_idx = -1;
std::string ref_name =
type.struct_def
? WrapInNameSpace(*type.struct_def)
: type.enum_def ? WrapInNameSpace(*type.enum_def) : "";
if (!ref_name.empty()) {
auto rit = type_refs.begin();
for (; rit != type_refs.end(); ++rit) {
if (*rit == ref_name) {
ref_idx = static_cast<int>(rit - type_refs.begin());
break;
}
}
if (rit == type_refs.end()) {
ref_idx = static_cast<int>(type_refs.size());
type_refs.push_back(ref_name);
}
}
ts += "{ flatbuffers::" + std::string(ElementaryTypeNames()[et]) + ", " +
NumToString(is_vector) + ", " + NumToString(ref_idx) + " }";
}
std::string rs;
for (auto it = type_refs.begin(); it != type_refs.end(); ++it) {
if (!rs.empty()) rs += ",\n ";
rs += *it + "TypeTable";
}
std::string ns;
for (auto it = names.begin(); it != names.end(); ++it) {
if (!ns.empty()) ns += ",\n ";
ns += "\"" + *it + "\"";
}
std::string vs;
const auto consecutive_enum_from_zero =
enum_def && enum_def->MinValue()->IsZero() &&
((enum_def->size() - 1) == enum_def->Distance());
if (enum_def && !consecutive_enum_from_zero) {
for (auto it = enum_def->Vals().begin(); it != enum_def->Vals().end();
++it) {
const auto &ev = **it;
if (!vs.empty()) vs += ", ";
vs += NumToStringCpp(enum_def->ToString(ev),
enum_def->underlying_type.base_type);
}
} else if (struct_def && struct_def->fixed) {
for (auto it = struct_def->fields.vec.begin();
it != struct_def->fields.vec.end(); ++it) {
const auto &field = **it;
vs += NumToString(field.value.offset);
vs += ", ";
}
vs += NumToString(struct_def->bytesize);
}
code_.SetValue("TYPES", ts);
code_.SetValue("REFS", rs);
code_.SetValue("NAMES", ns);
code_.SetValue("VALUES", vs);
code_ += "inline const flatbuffers::TypeTable *{{NAME}}TypeTable() {";
if (num_fields) {
code_ += " static const flatbuffers::TypeCode type_codes[] = {";
code_ += " {{TYPES}}";
code_ += " };";
}
if (!type_refs.empty()) {
code_ += " static const flatbuffers::TypeFunction type_refs[] = {";
code_ += " {{REFS}}";
code_ += " };";
}
if (!vs.empty()) {
// Problem with uint64_t values greater than 9223372036854775807ULL.
code_ += " static const int64_t values[] = { {{VALUES}} };";
}
auto has_names =
num_fields && parser_.opts.mini_reflect == IDLOptions::kTypesAndNames;
if (has_names) {
code_ += " static const char * const names[] = {";
code_ += " {{NAMES}}";
code_ += " };";
}
code_ += " static const flatbuffers::TypeTable tt = {";
code_ += std::string(" flatbuffers::{{SEQ_TYPE}}, {{NUM_FIELDS}}, ") +
(num_fields ? "type_codes, " : "nullptr, ") +
(!type_refs.empty() ? "type_refs, " : "nullptr, ") +
(!vs.empty() ? "values, " : "nullptr, ") +
(has_names ? "names" : "nullptr");
code_ += " };";
code_ += " return &tt;";
code_ += "}";
code_ += "";
}
// Generate an enum declaration,
// an enum string lookup table,
// and an enum array of values
void GenEnum(const EnumDef &enum_def) {
code_.SetValue("ENUM_NAME", Name(enum_def));
code_.SetValue("BASE_TYPE", GenTypeBasic(enum_def.underlying_type, false));
code_.SetValue("SEP", "");
GenComment(enum_def.doc_comment);
code_ += GenEnumDecl(enum_def) + "\\";
// MSVC doesn't support int64/uint64 enum without explicitly declared enum
// type. The value 4611686018427387904ULL is truncated to zero with warning:
// "warning C4309: 'initializing': truncation of constant value".
auto add_type = parser_.opts.scoped_enums;
add_type |= (enum_def.underlying_type.base_type == BASE_TYPE_LONG);
add_type |= (enum_def.underlying_type.base_type == BASE_TYPE_ULONG);
if (add_type) code_ += " : {{BASE_TYPE}}\\";
code_ += " {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) {
const auto &ev = **it;
if (!ev.doc_comment.empty()) {
auto prefix = code_.GetValue("SEP") + " ";
GenComment(ev.doc_comment, prefix.c_str());
code_.SetValue("SEP", "");
}
code_.SetValue("KEY", GenEnumValDecl(enum_def, Name(ev)));
code_.SetValue("VALUE",
NumToStringCpp(enum_def.ToString(ev),
enum_def.underlying_type.base_type));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("SEP", ",\n");
}
const EnumVal *minv = enum_def.MinValue();
const EnumVal *maxv = enum_def.MaxValue();
if (parser_.opts.scoped_enums || parser_.opts.prefixed_enums) {
FLATBUFFERS_ASSERT(minv && maxv);
code_.SetValue("SEP", ",\n");
if (enum_def.attributes.Lookup("bit_flags")) {
code_.SetValue("KEY", GenEnumValDecl(enum_def, "NONE"));
code_.SetValue("VALUE", "0");
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY", GenEnumValDecl(enum_def, "ANY"));
code_.SetValue("VALUE",
NumToStringCpp(enum_def.AllFlags(),
enum_def.underlying_type.base_type));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
} else { // MIN & MAX are useless for bit_flags
code_.SetValue("KEY", GenEnumValDecl(enum_def, "MIN"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, minv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
code_.SetValue("KEY", GenEnumValDecl(enum_def, "MAX"));
code_.SetValue("VALUE", GenEnumValDecl(enum_def, maxv->name));
code_ += "{{SEP}} {{KEY}} = {{VALUE}}\\";
}
}
code_ += "";
code_ += "};";
if (parser_.opts.scoped_enums && enum_def.attributes.Lookup("bit_flags")) {
code_ +=
"FLATBUFFERS_DEFINE_BITMASK_OPERATORS({{ENUM_NAME}}, {{BASE_TYPE}})";
}
code_ += "";
// Generate an array of all enumeration values
auto num_fields = NumToString(enum_def.size());
code_ += "inline const {{ENUM_NAME}} (&EnumValues{{ENUM_NAME}}())[" +
num_fields + "] {";
code_ += " static const {{ENUM_NAME}} values[] = {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) {
const auto &ev = **it;
auto value = GetEnumValUse(enum_def, ev);
auto suffix = *it != enum_def.Vals().back() ? "," : "";
code_ += " " + value + suffix;
}
code_ += " };";
code_ += " return values;";
code_ += "}";
code_ += "";
// Generate a generate string table for enum values.
// Problem is, if values are very sparse that could generate really big
// tables. Ideally in that case we generate a map lookup instead, but for
// the moment we simply don't output a table at all.
auto range = enum_def.Distance();
// Average distance between values above which we consider a table
// "too sparse". Change at will.
static const uint64_t kMaxSparseness = 5;
if (range / static_cast<uint64_t>(enum_def.size()) < kMaxSparseness) {
code_ += "inline const char * const *EnumNames{{ENUM_NAME}}() {";
code_ += " static const char * const names[" +
NumToString(range + 1 + 1) + "] = {";
auto val = enum_def.Vals().front();
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
auto ev = *it;
for (auto k = enum_def.Distance(val, ev); k > 1; --k) {
code_ += " \"\",";
}
val = ev;
code_ += " \"" + Name(*ev) + "\",";
}
code_ += " nullptr";
code_ += " };";
code_ += " return names;";
code_ += "}";
code_ += "";
code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {";
code_ += " if (e < " + GetEnumValUse(enum_def, *enum_def.MinValue()) +
" || e > " + GetEnumValUse(enum_def, *enum_def.MaxValue()) +
") return \"\";";
code_ += " const size_t index = static_cast<size_t>(e)\\";
if (enum_def.MinValue()->IsNonZero()) {
auto vals = GetEnumValUse(enum_def, *enum_def.MinValue());
code_ += " - static_cast<size_t>(" + vals + ")\\";
}
code_ += ";";
code_ += " return EnumNames{{ENUM_NAME}}()[index];";
code_ += "}";
code_ += "";
} else {
code_ += "inline const char *EnumName{{ENUM_NAME}}({{ENUM_NAME}} e) {";
code_ += " switch (e) {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
const auto &ev = **it;
code_ += " case " + GetEnumValUse(enum_def, ev) + ": return \"" +
Name(ev) + "\";";
}
code_ += " default: return \"\";";
code_ += " }";
code_ += "}";
code_ += "";
}
// Generate type traits for unions to map from a type to union enum value.
if (enum_def.is_union && !enum_def.uses_multiple_type_instances) {
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
const auto &ev = **it;
if (it == enum_def.Vals().begin()) {
code_ += "template<typename T> struct {{ENUM_NAME}}Traits {";
} else {
auto name = GetUnionElement(ev, true, true);
code_ += "template<> struct {{ENUM_NAME}}Traits<" + name + "> {";
}
auto value = GetEnumValUse(enum_def, ev);
code_ += " static const {{ENUM_NAME}} enum_value = " + value + ";";
code_ += "};";
code_ += "";
}
}
if (parser_.opts.generate_object_based_api && enum_def.is_union) {
// Generate a union type
code_.SetValue("NAME", Name(enum_def));
FLATBUFFERS_ASSERT(enum_def.Lookup("NONE"));
code_.SetValue("NONE", GetEnumValUse(enum_def, *enum_def.Lookup("NONE")));
code_ += "struct {{NAME}}Union {";
code_ += " {{NAME}} type;";
code_ += " void *value;";
code_ += "";
code_ += " {{NAME}}Union() : type({{NONE}}), value(nullptr) {}";
code_ += " {{NAME}}Union({{NAME}}Union&& u) FLATBUFFERS_NOEXCEPT :";
code_ += " type({{NONE}}), value(nullptr)";
code_ += " { std::swap(type, u.type); std::swap(value, u.value); }";
code_ += " {{NAME}}Union(const {{NAME}}Union &) FLATBUFFERS_NOEXCEPT;";
code_ +=
" {{NAME}}Union &operator=(const {{NAME}}Union &u) "
"FLATBUFFERS_NOEXCEPT";
code_ +=
" { {{NAME}}Union t(u); std::swap(type, t.type); std::swap(value, "
"t.value); return *this; }";
code_ +=
" {{NAME}}Union &operator=({{NAME}}Union &&u) FLATBUFFERS_NOEXCEPT";
code_ +=
" { std::swap(type, u.type); std::swap(value, u.value); return "
"*this; }";
code_ += " ~{{NAME}}Union() { Reset(); }";
code_ += "";
code_ += " void Reset();";
code_ += "";
if (!enum_def.uses_multiple_type_instances) {
code_ += "#ifndef FLATBUFFERS_CPP98_STL";
code_ += " template <typename T>";
code_ += " void Set(T&& val) {";
code_ += " using RT = typename std::remove_reference<T>::type;";
code_ += " Reset();";
code_ += " type = {{NAME}}Traits<typename RT::TableType>::enum_value;";
code_ += " if (type != {{NONE}}) {";
code_ += " value = new RT(std::forward<T>(val));";
code_ += " }";
code_ += " }";
code_ += "#endif // FLATBUFFERS_CPP98_STL";
code_ += "";
}
code_ += " " + UnionUnPackSignature(enum_def, true) + ";";
code_ += " " + UnionPackSignature(enum_def, true) + ";";
code_ += "";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
const auto &ev = **it;
if (ev.IsZero()) { continue; }
const auto native_type =
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts);
code_.SetValue("NATIVE_TYPE", native_type);
code_.SetValue("NATIVE_NAME", Name(ev));
code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev));
code_ += " {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() {";
code_ += " return type == {{NATIVE_ID}} ?";
code_ += " reinterpret_cast<{{NATIVE_TYPE}} *>(value) : nullptr;";
code_ += " }";
code_ += " const {{NATIVE_TYPE}} *As{{NATIVE_NAME}}() const {";
code_ += " return type == {{NATIVE_ID}} ?";
code_ +=
" reinterpret_cast<const {{NATIVE_TYPE}} *>(value) : nullptr;";
code_ += " }";
}
code_ += "};";
code_ += "";
if (parser_.opts.gen_compare) {
code_ += "";
code_ +=
"inline bool operator==(const {{NAME}}Union &lhs, const "
"{{NAME}}Union &rhs) {";
code_ += " if (lhs.type != rhs.type) return false;";
code_ += " switch (lhs.type) {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
const auto &ev = **it;
code_.SetValue("NATIVE_ID", GetEnumValUse(enum_def, ev));
if (ev.IsNonZero()) {
const auto native_type =
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts);
code_.SetValue("NATIVE_TYPE", native_type);
code_ += " case {{NATIVE_ID}}: {";
code_ +=
" return *(reinterpret_cast<const {{NATIVE_TYPE}} "
"*>(lhs.value)) ==";
code_ +=
" *(reinterpret_cast<const {{NATIVE_TYPE}} "
"*>(rhs.value));";
code_ += " }";
} else {
code_ += " case {{NATIVE_ID}}: {";
code_ += " return true;"; // "NONE" enum value.
code_ += " }";
}
}
code_ += " default: {";
code_ += " return false;";
code_ += " }";
code_ += " }";
code_ += "}";
code_ += "";
code_ +=
"inline bool operator!=(const {{NAME}}Union &lhs, const "
"{{NAME}}Union &rhs) {";
code_ += " return !(lhs == rhs);";
code_ += "}";
code_ += "";
}
}
if (enum_def.is_union) {
code_ += UnionVerifySignature(enum_def) + ";";
code_ += UnionVectorVerifySignature(enum_def) + ";";
code_ += "";
}
}
void GenUnionPost(const EnumDef &enum_def) {
// Generate a verifier function for this union that can be called by the
// table verifier functions. It uses a switch case to select a specific
// verifier function to call, this should be safe even if the union type
// has been corrupted, since the verifiers will simply fail when called
// on the wrong type.
code_.SetValue("ENUM_NAME", Name(enum_def));
code_ += "inline " + UnionVerifySignature(enum_def) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end(); ++it) {
const auto &ev = **it;
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
if (ev.IsNonZero()) {
code_.SetValue("TYPE", GetUnionElement(ev, true, true));
code_ += " case {{LABEL}}: {";
auto getptr =
" auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return verifier.Verify<{{TYPE}}>(static_cast<const "
"uint8_t *>(obj), 0);";
} else {
code_ += getptr;
code_ += " return verifier.VerifyTable(ptr);";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += getptr;
code_ += " return verifier.VerifyString(ptr);";
} else {
FLATBUFFERS_ASSERT(false);
}
code_ += " }";
} else {
code_ += " case {{LABEL}}: {";
code_ += " return true;"; // "NONE" enum value.
code_ += " }";
}
}
code_ += " default: return false;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionVectorVerifySignature(enum_def) + " {";
code_ += " if (!values || !types) return !values && !types;";
code_ += " if (values->size() != types->size()) return false;";
code_ += " for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) {";
code_ += " if (!Verify" + Name(enum_def) + "(";
code_ += " verifier, values->Get(i), types->GetEnum<" +
Name(enum_def) + ">(i))) {";
code_ += " return false;";
code_ += " }";
code_ += " }";
code_ += " return true;";
code_ += "}";
code_ += "";
if (parser_.opts.generate_object_based_api) {
// Generate union Unpack() and Pack() functions.
code_ += "inline " + UnionUnPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
const auto &ev = **it;
if (ev.IsZero()) { continue; }
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE", GetUnionElement(ev, true, true));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(obj);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return new " +
WrapInNameSpace(*ev.union_type.struct_def) + "(*ptr);";
} else {
code_ += " return ptr->UnPack(resolver);";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += " return new std::string(ptr->c_str(), ptr->size());";
} else {
FLATBUFFERS_ASSERT(false);
}
code_ += " }";
}
code_ += " default: return nullptr;";
code_ += " }";
code_ += "}";
code_ += "";
code_ += "inline " + UnionPackSignature(enum_def, false) + " {";
code_ += " switch (type) {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
auto &ev = **it;
if (ev.IsZero()) { continue; }
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE",
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts));
code_.SetValue("NAME", GetUnionElement(ev, false, true));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<const {{TYPE}} *>(value);";
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
if (ev.union_type.struct_def->fixed) {
code_ += " return _fbb.CreateStruct(*ptr).Union();";
} else {
code_ +=
" return Create{{NAME}}(_fbb, ptr, _rehasher).Union();";
}
} else if (ev.union_type.base_type == BASE_TYPE_STRING) {
code_ += " return _fbb.CreateString(*ptr).Union();";
} else {
FLATBUFFERS_ASSERT(false);
}
code_ += " }";
}
code_ += " default: return 0;";
code_ += " }";
code_ += "}";
code_ += "";
// Union copy constructor
code_ +=
"inline {{ENUM_NAME}}Union::{{ENUM_NAME}}Union(const "
"{{ENUM_NAME}}Union &u) FLATBUFFERS_NOEXCEPT : type(u.type), "
"value(nullptr) {";
code_ += " switch (type) {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
const auto &ev = **it;
if (ev.IsZero()) { continue; }
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE",
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts));
code_ += " case {{LABEL}}: {";
bool copyable = true;
if (ev.union_type.base_type == BASE_TYPE_STRUCT) {
// Don't generate code to copy if table is not copyable.
// TODO(wvo): make tables copyable instead.
for (auto fit = ev.union_type.struct_def->fields.vec.begin();
fit != ev.union_type.struct_def->fields.vec.end(); ++fit) {
const auto &field = **fit;
if (!field.deprecated && field.value.type.struct_def &&
!field.native_inline) {
copyable = false;
break;
}
}
}
if (copyable) {
code_ +=
" value = new {{TYPE}}(*reinterpret_cast<{{TYPE}} *>"
"(u.value));";
} else {
code_ +=
" FLATBUFFERS_ASSERT(false); // {{TYPE}} not copyable.";
}
code_ += " break;";
code_ += " }";
}
code_ += " default:";
code_ += " break;";
code_ += " }";
code_ += "}";
code_ += "";
// Union Reset() function.
FLATBUFFERS_ASSERT(enum_def.Lookup("NONE"));
code_.SetValue("NONE", GetEnumValUse(enum_def, *enum_def.Lookup("NONE")));
code_ += "inline void {{ENUM_NAME}}Union::Reset() {";
code_ += " switch (type) {";
for (auto it = enum_def.Vals().begin(); it != enum_def.Vals().end();
++it) {
const auto &ev = **it;
if (ev.IsZero()) { continue; }
code_.SetValue("LABEL", GetEnumValUse(enum_def, ev));
code_.SetValue("TYPE",
NativeName(GetUnionElement(ev, true, true, true),
ev.union_type.struct_def, parser_.opts));
code_ += " case {{LABEL}}: {";
code_ += " auto ptr = reinterpret_cast<{{TYPE}} *>(value);";
code_ += " delete ptr;";
code_ += " break;";
code_ += " }";
}
code_ += " default: break;";
code_ += " }";
code_ += " value = nullptr;";
code_ += " type = {{NONE}};";
code_ += "}";
code_ += "";
}
}
// Generates a value with optionally a cast applied if the field has a
// different underlying type from its interface type (currently only the
// case for enums. "from" specify the direction, true meaning from the
// underlying type to the interface type.
std::string GenUnderlyingCast(const FieldDef &field, bool from,
const std::string &val) {
if (from && field.value.type.base_type == BASE_TYPE_BOOL) {
return val + " != 0";
} else if ((field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) ||
field.value.type.base_type == BASE_TYPE_BOOL) {
return "static_cast<" + GenTypeBasic(field.value.type, from) + ">(" +
val + ")";
} else {
return val;
}
}
std::string GenFieldOffsetName(const FieldDef &field) {
std::string uname = Name(field);
std::transform(uname.begin(), uname.end(), uname.begin(), ToUpper);
return "VT_" + uname;
}
void GenFullyQualifiedNameGetter(const StructDef &struct_def,
const std::string &name) {
if (!parser_.opts.generate_name_strings) { return; }
auto fullname = struct_def.defined_namespace->GetFullyQualifiedName(name);
code_.SetValue("NAME", fullname);
code_.SetValue("CONSTEXPR", "FLATBUFFERS_CONSTEXPR");
code_ += " static {{CONSTEXPR}} const char *GetFullyQualifiedName() {";
code_ += " return \"{{NAME}}\";";
code_ += " }";
}
std::string GenDefaultConstant(const FieldDef &field) {
if (IsFloat(field.value.type.base_type))
return float_const_gen_.GenFloatConstant(field);
else
return NumToStringCpp(field.value.constant, field.value.type.base_type);
}
std::string GetDefaultScalarValue(const FieldDef &field, bool is_ctor) {
if (field.value.type.enum_def && IsScalar(field.value.type.base_type)) {
auto ev = field.value.type.enum_def->FindByValue(field.value.constant);
if (ev) {
return WrapInNameSpace(field.value.type.enum_def->defined_namespace,
GetEnumValUse(*field.value.type.enum_def, *ev));
} else {
return GenUnderlyingCast(
field, true,
NumToStringCpp(field.value.constant, field.value.type.base_type));
}
} else if (field.value.type.base_type == BASE_TYPE_BOOL) {
return field.value.constant == "0" ? "false" : "true";
} else if (field.attributes.Lookup("cpp_type")) {
if (is_ctor) {
if (PtrType(&field) == "naked") {
return "nullptr";
} else {
return "";
}
} else {
return "0";
}
} else {
return GenDefaultConstant(field);
}
}
void GenParam(const FieldDef &field, bool direct, const char *prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("PARAM_NAME", Name(field));
if (direct && field.value.type.base_type == BASE_TYPE_STRING) {
code_.SetValue("PARAM_TYPE", "const char *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else if (direct && field.value.type.base_type == BASE_TYPE_VECTOR) {
const auto vtype = field.value.type.VectorType();
std::string type;
if (IsStruct(vtype)) {
type = WrapInNameSpace(*vtype.struct_def);
} else {
type = GenTypeWire(vtype, "", false);
}
code_.SetValue("PARAM_TYPE", "const std::vector<" + type + "> *");
code_.SetValue("PARAM_VALUE", "nullptr");
} else {
code_.SetValue("PARAM_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("PARAM_VALUE", GetDefaultScalarValue(field, false));
}
code_ += "{{PRE}}{{PARAM_TYPE}}{{PARAM_NAME}} = {{PARAM_VALUE}}\\";
}
// Generate a member, including a default value for scalars and raw pointers.
void GenMember(const FieldDef &field) {
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE &&
(field.value.type.base_type != BASE_TYPE_VECTOR ||
field.value.type.element != BASE_TYPE_UTYPE)) {
auto type = GenTypeNative(field.value.type, false, field);
auto cpp_type = field.attributes.Lookup("cpp_type");
auto full_type =
(cpp_type
? (field.value.type.base_type == BASE_TYPE_VECTOR
? "std::vector<" +
GenTypeNativePtr(cpp_type->constant, &field,
false) +
"> "
: GenTypeNativePtr(cpp_type->constant, &field, false))
: type + " ");
code_.SetValue("FIELD_TYPE", full_type);
code_.SetValue("FIELD_NAME", Name(field));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}};";
}
}
// Generate the default constructor for this struct. Properly initialize all
// scalar members with default values.
void GenDefaultConstructor(const StructDef &struct_def) {
std::string initializer_list;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE) {
auto cpp_type = field.attributes.Lookup("cpp_type");
auto native_default = field.attributes.Lookup("native_default");
// Scalar types get parsed defaults, raw pointers get nullptrs.
if (IsScalar(field.value.type.base_type)) {
if (!initializer_list.empty()) { initializer_list += ",\n "; }
initializer_list += Name(field);
initializer_list +=
"(" +
(native_default ? std::string(native_default->constant)
: GetDefaultScalarValue(field, true)) +
")";
} else if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
if (native_default) {
if (!initializer_list.empty()) {
initializer_list += ",\n ";
}
initializer_list +=
Name(field) + "(" + native_default->constant + ")";
}
}
} else if (cpp_type && field.value.type.base_type != BASE_TYPE_VECTOR) {
if (!initializer_list.empty()) { initializer_list += ",\n "; }
initializer_list += Name(field) + "(0)";
}
}
}
if (!initializer_list.empty()) {
initializer_list = "\n : " + initializer_list;
}
code_.SetValue("NATIVE_NAME",
NativeName(Name(struct_def), &struct_def, parser_.opts));
code_.SetValue("INIT_LIST", initializer_list);
code_ += " {{NATIVE_NAME}}(){{INIT_LIST}} {";
code_ += " }";
}
void GenCompareOperator(const StructDef &struct_def,
std::string accessSuffix = "") {
std::string compare_op;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && // Deprecated fields won't be accessible.
field.value.type.base_type != BASE_TYPE_UTYPE &&
(field.value.type.base_type != BASE_TYPE_VECTOR ||
field.value.type.element != BASE_TYPE_UTYPE)) {
if (!compare_op.empty()) { compare_op += " &&\n "; }
auto accessor = Name(field) + accessSuffix;
compare_op += "(lhs." + accessor + " == rhs." + accessor + ")";
}
}
std::string cmp_lhs;
std::string cmp_rhs;
if (compare_op.empty()) {
cmp_lhs = "";
cmp_rhs = "";
compare_op = " return true;";
} else {
cmp_lhs = "lhs";
cmp_rhs = "rhs";
compare_op = " return\n " + compare_op + ";";
}
code_.SetValue("CMP_OP", compare_op);
code_.SetValue("CMP_LHS", cmp_lhs);
code_.SetValue("CMP_RHS", cmp_rhs);
code_ += "";
code_ +=
"inline bool operator==(const {{NATIVE_NAME}} &{{CMP_LHS}}, const "
"{{NATIVE_NAME}} &{{CMP_RHS}}) {";
code_ += "{{CMP_OP}}";
code_ += "}";
code_ += "";
code_ +=
"inline bool operator!=(const {{NATIVE_NAME}} &lhs, const "
"{{NATIVE_NAME}} &rhs) {";
code_ += " return !(lhs == rhs);";
code_ += "}";
code_ += "";
}
void GenOperatorNewDelete(const StructDef &struct_def) {
if (auto native_custom_alloc =
struct_def.attributes.Lookup("native_custom_alloc")) {
code_ += " inline void *operator new (std::size_t count) {";
code_ += " return " + native_custom_alloc->constant +
"<{{NATIVE_NAME}}>().allocate(count / sizeof({{NATIVE_NAME}}));";
code_ += " }";
code_ += " inline void operator delete (void *ptr) {";
code_ += " return " + native_custom_alloc->constant +
"<{{NATIVE_NAME}}>().deallocate(static_cast<{{NATIVE_NAME}}*>("
"ptr),1);";
code_ += " }";
}
}
void GenNativeTable(const StructDef &struct_def) {
const auto native_name =
NativeName(Name(struct_def), &struct_def, parser_.opts);
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_.SetValue("NATIVE_NAME", native_name);
// Generate a C++ object that can hold an unpacked version of this table.
code_ += "struct {{NATIVE_NAME}} : public flatbuffers::NativeTable {";
code_ += " typedef {{STRUCT_NAME}} TableType;";
GenFullyQualifiedNameGetter(struct_def, native_name);
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
GenMember(**it);
}
GenOperatorNewDelete(struct_def);
GenDefaultConstructor(struct_def);
code_ += "};";
if (parser_.opts.gen_compare) GenCompareOperator(struct_def);
code_ += "";
}
// Generate the code to call the appropriate Verify function(s) for a field.
void GenVerifyCall(const FieldDef &field, const char *prefix) {
code_.SetValue("PRE", prefix);
code_.SetValue("NAME", Name(field));
code_.SetValue("REQUIRED", field.required ? "Required" : "");
code_.SetValue("SIZE", GenTypeSize(field.value.type));
code_.SetValue("OFFSET", GenFieldOffsetName(field));
if (IsScalar(field.value.type.base_type) || IsStruct(field.value.type)) {
code_ +=
"{{PRE}}VerifyField{{REQUIRED}}<{{SIZE}}>(verifier, {{OFFSET}})\\";
} else {
code_ += "{{PRE}}VerifyOffset{{REQUIRED}}(verifier, {{OFFSET}})\\";
}
switch (field.value.type.base_type) {
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_.SetValue("SUFFIX", UnionTypeFieldSuffix());
code_ +=
"{{PRE}}Verify{{ENUM_NAME}}(verifier, {{NAME}}(), "
"{{NAME}}{{SUFFIX}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyTable({{NAME}}())\\";
}
break;
}
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.VerifyString({{NAME}}())\\";
break;
}
case BASE_TYPE_VECTOR: {
code_ += "{{PRE}}verifier.VerifyVector({{NAME}}())\\";
switch (field.value.type.element) {
case BASE_TYPE_STRING: {
code_ += "{{PRE}}verifier.VerifyVectorOfStrings({{NAME}}())\\";
break;
}
case BASE_TYPE_STRUCT: {
if (!field.value.type.struct_def->fixed) {
code_ += "{{PRE}}verifier.VerifyVectorOfTables({{NAME}}())\\";
}
break;
}
case BASE_TYPE_UNION: {
code_.SetValue("ENUM_NAME", field.value.type.enum_def->name);
code_ +=
"{{PRE}}Verify{{ENUM_NAME}}Vector(verifier, {{NAME}}(), "
"{{NAME}}_type())\\";
break;
}
default: break;
}
break;
}
default: { break; }
}
}
// Generate CompareWithValue method for a key field.
void GenKeyFieldMethods(const FieldDef &field) {
FLATBUFFERS_ASSERT(field.key);
const bool is_string = (field.value.type.base_type == BASE_TYPE_STRING);
code_ += " bool KeyCompareLessThan(const {{STRUCT_NAME}} *o) const {";
if (is_string) {
// use operator< of flatbuffers::String
code_ += " return *{{FIELD_NAME}}() < *o->{{FIELD_NAME}}();";
} else {
code_ += " return {{FIELD_NAME}}() < o->{{FIELD_NAME}}();";
}
code_ += " }";
if (is_string) {
code_ += " int KeyCompareWithValue(const char *val) const {";
code_ += " return strcmp({{FIELD_NAME}}()->c_str(), val);";
code_ += " }";
} else {
FLATBUFFERS_ASSERT(IsScalar(field.value.type.base_type));
auto type = GenTypeBasic(field.value.type, false);
if (parser_.opts.scoped_enums && field.value.type.enum_def &&
IsScalar(field.value.type.base_type)) {
type = GenTypeGet(field.value.type, " ", "const ", " *", true);
}
// Returns {field<val: -1, field==val: 0, field>val: +1}.
code_.SetValue("KEY_TYPE", type);
code_ += " int KeyCompareWithValue({{KEY_TYPE}} val) const {";
code_ +=
" return static_cast<int>({{FIELD_NAME}}() > val) - "
"static_cast<int>({{FIELD_NAME}}() < val);";
code_ += " }";
}
}
// Generate an accessor struct, builder structs & function for a table.
void GenTable(const StructDef &struct_def) {
if (parser_.opts.generate_object_based_api) { GenNativeTable(struct_def); }
// Generate an accessor struct, with methods of the form:
// type name() const { return GetField<type>(offset, defaultval); }
GenComment(struct_def.doc_comment);
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_ +=
"struct {{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS"
" : private flatbuffers::Table {";
if (parser_.opts.generate_object_based_api) {
code_ += " typedef {{NATIVE_NAME}} NativeTableType;";
}
if (parser_.opts.mini_reflect != IDLOptions::kNone) {
code_ +=
" static const flatbuffers::TypeTable *MiniReflectTypeTable() {";
code_ += " return {{STRUCT_NAME}}TypeTable();";
code_ += " }";
}
GenFullyQualifiedNameGetter(struct_def, Name(struct_def));
// Generate field id constants.
if (struct_def.fields.vec.size() > 0) {
// We need to add a trailing comma to all elements except the last one as
// older versions of gcc complain about this.
code_.SetValue("SEP", "");
code_ +=
" enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset));
code_ += "{{SEP}} {{OFFSET_NAME}} = {{OFFSET_VALUE}}\\";
code_.SetValue("SEP", ",\n");
}
code_ += "";
code_ += " };";
}
// Generate the accessors.
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) {
// Deprecated fields won't be accessible.
continue;
}
const bool is_struct = IsStruct(field.value.type);
const bool is_scalar = IsScalar(field.value.type.base_type);
code_.SetValue("FIELD_NAME", Name(field));
// Call a different accessor for pointers, that indirects.
std::string accessor = "";
if (is_scalar) {
accessor = "GetField<";
} else if (is_struct) {
accessor = "GetStruct<";
} else {
accessor = "GetPointer<";
}
auto offset_str = GenFieldOffsetName(field);
auto offset_type =
GenTypeGet(field.value.type, "", "const ", " *", false);
auto call = accessor + offset_type + ">(" + offset_str;
// Default value as second arg for non-pointer types.
if (is_scalar) { call += ", " + GenDefaultConstant(field); }
call += ")";
std::string afterptr = " *" + NullableExtension();
GenComment(field.doc_comment, " ");
code_.SetValue("FIELD_TYPE", GenTypeGet(field.value.type, " ", "const ",
afterptr.c_str(), true));
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, call));
code_.SetValue("NULLABLE_EXT", NullableExtension());
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (field.value.type.base_type == BASE_TYPE_UNION) {
auto u = field.value.type.enum_def;
if (!field.value.type.enum_def->uses_multiple_type_instances)
code_ +=
" template<typename T> "
"const T *{{NULLABLE_EXT}}{{FIELD_NAME}}_as() const;";
for (auto u_it = u->Vals().begin(); u_it != u->Vals().end(); ++u_it) {
auto &ev = **u_it;
if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; }
auto full_struct_name = GetUnionElement(ev, true, true);
// @TODO: Mby make this decisions more universal? How?
code_.SetValue("U_GET_TYPE",
EscapeKeyword(field.name + UnionTypeFieldSuffix()));
code_.SetValue(
"U_ELEMENT_TYPE",
WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_FIELD_NAME", Name(field) + "_as_" + Name(ev));
code_.SetValue("U_NULLABLE", NullableExtension());
// `const Type *union_name_asType() const` accessor.
code_ += " {{U_FIELD_TYPE}}{{U_NULLABLE}}{{U_FIELD_NAME}}() const {";
code_ +=
" return {{U_GET_TYPE}}() == {{U_ELEMENT_TYPE}} ? "
"static_cast<{{U_FIELD_TYPE}}>({{FIELD_NAME}}()) "
": nullptr;";
code_ += " }";
}
}
if (parser_.opts.mutable_buffer) {
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("SET_FN", "SetField<" + type + ">");
code_.SetValue("OFFSET_NAME", offset_str);
code_.SetValue("FIELD_TYPE", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + Name(field)));
code_.SetValue("DEFAULT_VALUE", GenDefaultConstant(field));
code_ +=
" bool mutate_{{FIELD_NAME}}({{FIELD_TYPE}} "
"_{{FIELD_NAME}}) {";
code_ +=
" return {{SET_FN}}({{OFFSET_NAME}}, {{FIELD_VALUE}}, "
"{{DEFAULT_VALUE}});";
code_ += " }";
} else {
auto postptr = " *" + NullableExtension();
auto type =
GenTypeGet(field.value.type, " ", "", postptr.c_str(), true);
auto underlying = accessor + type + ">(" + offset_str + ")";
code_.SetValue("FIELD_TYPE", type);
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, true, underlying));
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
}
}
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
std::string qualified_name = nested->constant;
auto nested_root = parser_.LookupStruct(nested->constant);
if (nested_root == nullptr) {
qualified_name = parser_.current_namespace_->GetFullyQualifiedName(
nested->constant);
nested_root = parser_.LookupStruct(qualified_name);
}
FLATBUFFERS_ASSERT(nested_root); // Guaranteed to exist by parser.
(void)nested_root;
code_.SetValue("CPP_NAME", TranslateNameSpace(qualified_name));
code_ += " const {{CPP_NAME}} *{{FIELD_NAME}}_nested_root() const {";
code_ +=
" return "
"flatbuffers::GetRoot<{{CPP_NAME}}>({{FIELD_NAME}}()->Data());";
code_ += " }";
}
if (field.flexbuffer) {
code_ +=
" flexbuffers::Reference {{FIELD_NAME}}_flexbuffer_root()"
" const {";
// Both Data() and size() are const-methods, therefore call order
// doesn't matter.
code_ +=
" return flexbuffers::GetRoot({{FIELD_NAME}}()->Data(), "
"{{FIELD_NAME}}()->size());";
code_ += " }";
}
// Generate a comparison function for this field if it is a key.
if (field.key) { GenKeyFieldMethods(field); }
}
// Generate a verifier function that can check a buffer from an untrusted
// source will never cause reads outside the buffer.
code_ += " bool Verify(flatbuffers::Verifier &verifier) const {";
code_ += " return VerifyTableStart(verifier)\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) { continue; }
GenVerifyCall(field, " &&\n ");
}
code_ += " &&\n verifier.EndTable();";
code_ += " }";
if (parser_.opts.generate_object_based_api) {
// Generate the UnPack() pre declaration.
code_ +=
" " + TableUnPackSignature(struct_def, true, parser_.opts) + ";";
code_ +=
" " + TableUnPackToSignature(struct_def, true, parser_.opts) + ";";
code_ += " " + TablePackSignature(struct_def, true, parser_.opts) + ";";
}
code_ += "};"; // End of table.
code_ += "";
// Explicit specializations for union accessors
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated || field.value.type.base_type != BASE_TYPE_UNION) {
continue;
}
auto u = field.value.type.enum_def;
if (u->uses_multiple_type_instances) continue;
code_.SetValue("FIELD_NAME", Name(field));
for (auto u_it = u->Vals().begin(); u_it != u->Vals().end(); ++u_it) {
auto &ev = **u_it;
if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; }
auto full_struct_name = GetUnionElement(ev, true, true);
code_.SetValue(
"U_ELEMENT_TYPE",
WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev)));
code_.SetValue("U_FIELD_TYPE", "const " + full_struct_name + " *");
code_.SetValue("U_ELEMENT_NAME", full_struct_name);
code_.SetValue("U_FIELD_NAME", Name(field) + "_as_" + Name(ev));
// `template<> const T *union_name_as<T>() const` accessor.
code_ +=
"template<> "
"inline {{U_FIELD_TYPE}}{{STRUCT_NAME}}::{{FIELD_NAME}}_as"
"<{{U_ELEMENT_NAME}}>() const {";
code_ += " return {{U_FIELD_NAME}}();";
code_ += "}";
code_ += "";
}
}
GenBuilders(struct_def);
if (parser_.opts.generate_object_based_api) {
// Generate a pre-declaration for a CreateX method that works with an
// unpacked C++ object.
code_ += TableCreateSignature(struct_def, true, parser_.opts) + ";";
code_ += "";
}
}
void GenBuilders(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", Name(struct_def));
// Generate a builder struct:
code_ += "struct {{STRUCT_NAME}}Builder {";
code_ += " flatbuffers::FlatBufferBuilder &fbb_;";
code_ += " flatbuffers::uoffset_t start_;";
bool has_string_or_vector_fields = false;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
const bool is_scalar = IsScalar(field.value.type.base_type);
const bool is_string = field.value.type.base_type == BASE_TYPE_STRING;
const bool is_vector = field.value.type.base_type == BASE_TYPE_VECTOR;
if (is_string || is_vector) { has_string_or_vector_fields = true; }
std::string offset = GenFieldOffsetName(field);
std::string name = GenUnderlyingCast(field, false, Name(field));
std::string value = is_scalar ? GenDefaultConstant(field) : "";
// Generate accessor functions of the form:
// void add_name(type name) {
// fbb_.AddElement<type>(offset, name, default);
// }
code_.SetValue("FIELD_NAME", Name(field));
code_.SetValue("FIELD_TYPE", GenTypeWire(field.value.type, " ", true));
code_.SetValue("ADD_OFFSET", Name(struct_def) + "::" + offset);
code_.SetValue("ADD_NAME", name);
code_.SetValue("ADD_VALUE", value);
if (is_scalar) {
const auto type = GenTypeWire(field.value.type, "", false);
code_.SetValue("ADD_FN", "AddElement<" + type + ">");
} else if (IsStruct(field.value.type)) {
code_.SetValue("ADD_FN", "AddStruct");
} else {
code_.SetValue("ADD_FN", "AddOffset");
}
code_ += " void add_{{FIELD_NAME}}({{FIELD_TYPE}}{{FIELD_NAME}}) {";
code_ += " fbb_.{{ADD_FN}}(\\";
if (is_scalar) {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}}, {{ADD_VALUE}});";
} else {
code_ += "{{ADD_OFFSET}}, {{ADD_NAME}});";
}
code_ += " }";
}
}
// Builder constructor
code_ +=
" explicit {{STRUCT_NAME}}Builder(flatbuffers::FlatBufferBuilder "
"&_fbb)";
code_ += " : fbb_(_fbb) {";
code_ += " start_ = fbb_.StartTable();";
code_ += " }";
// Assignment operator;
code_ +=
" {{STRUCT_NAME}}Builder &operator="
"(const {{STRUCT_NAME}}Builder &);";
// Finish() function.
code_ += " flatbuffers::Offset<{{STRUCT_NAME}}> Finish() {";
code_ += " const auto end = fbb_.EndTable(start_);";
code_ += " auto o = flatbuffers::Offset<{{STRUCT_NAME}}>(end);";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated && field.required) {
code_.SetValue("FIELD_NAME", Name(field));
code_.SetValue("OFFSET_NAME", GenFieldOffsetName(field));
code_ += " fbb_.Required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}});";
}
}
code_ += " return o;";
code_ += " }";
code_ += "};";
code_ += "";
// Generate a convenient CreateX function that uses the above builder
// to create a table in one go.
code_ +=
"inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) { GenParam(field, false, ",\n "); }
}
code_ += ") {";
code_ += " {{STRUCT_NAME}}Builder builder_(_fbb);";
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1;
size; size /= 2) {
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
const auto &field = **it;
if (!field.deprecated && (!struct_def.sortbysize ||
size == SizeOf(field.value.type.base_type))) {
code_.SetValue("FIELD_NAME", Name(field));
code_ += " builder_.add_{{FIELD_NAME}}({{FIELD_NAME}});";
}
}
}
code_ += " return builder_.Finish();";
code_ += "}";
code_ += "";
// Generate a CreateXDirect function with vector types as parameters
if (has_string_or_vector_fields) {
code_ +=
"inline flatbuffers::Offset<{{STRUCT_NAME}}> "
"Create{{STRUCT_NAME}}Direct(";
code_ += " flatbuffers::FlatBufferBuilder &_fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) { GenParam(field, true, ",\n "); }
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name =
struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += ") {";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", Name(field));
if (field.value.type.base_type == BASE_TYPE_STRING) {
if (!field.shared) {
code_.SetValue("CREATE_STRING", "CreateString");
} else {
code_.SetValue("CREATE_STRING", "CreateSharedString");
}
code_ +=
" auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? "
"_fbb.{{CREATE_STRING}}({{FIELD_NAME}}) : 0;";
} else if (field.value.type.base_type == BASE_TYPE_VECTOR) {
code_ += " auto {{FIELD_NAME}}__ = {{FIELD_NAME}} ? \\";
const auto vtype = field.value.type.VectorType();
if (IsStruct(vtype)) {
const auto type = WrapInNameSpace(*vtype.struct_def);
code_ += "_fbb.CreateVectorOfStructs<" + type + ">\\";
} else {
const auto type = GenTypeWire(vtype, "", false);
code_ += "_fbb.CreateVector<" + type + ">\\";
}
code_ += "(*{{FIELD_NAME}}) : 0;";
}
}
}
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (!field.deprecated) {
code_.SetValue("FIELD_NAME", Name(field));
code_ += ",\n {{FIELD_NAME}}\\";
if (field.value.type.base_type == BASE_TYPE_STRING ||
field.value.type.base_type == BASE_TYPE_VECTOR) {
code_ += "__\\";
}
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
std::string GenUnionUnpackVal(const FieldDef &afield,
const char *vec_elem_access,
const char *vec_type_access) {
return afield.value.type.enum_def->name + "Union::UnPack(" + "_e" +
vec_elem_access + ", " +
EscapeKeyword(afield.name + UnionTypeFieldSuffix()) + "()" +
vec_type_access + ", _resolver)";
}
std::string GenUnpackVal(const Type &type, const std::string &val,
bool invector, const FieldDef &afield) {
switch (type.base_type) {
case BASE_TYPE_STRING: {
if (FlexibleStringConstructor(&afield)) {
return NativeString(&afield) + "(" + val + "->c_str(), " + val +
"->size())";
} else {
return val + "->str()";
}
}
case BASE_TYPE_STRUCT: {
const auto name = WrapInNameSpace(*type.struct_def);
if (IsStruct(type)) {
auto native_type = type.struct_def->attributes.Lookup("native_type");
if (native_type) {
return "flatbuffers::UnPack(*" + val + ")";
} else if (invector || afield.native_inline) {
return "*" + val;
} else {
const auto ptype = GenTypeNativePtr(name, &afield, true);
return ptype + "(new " + name + "(*" + val + "))";
}
} else {
const auto ptype = GenTypeNativePtr(
NativeName(name, type.struct_def, parser_.opts), &afield, true);
return ptype + "(" + val + "->UnPack(_resolver))";
}
}
case BASE_TYPE_UNION: {
return GenUnionUnpackVal(
afield, invector ? "->Get(_i)" : "",
invector ? ("->GetEnum<" + type.enum_def->name + ">(_i)").c_str()
: "");
}
default: {
return val;
break;
}
}
}
std::string GenUnpackFieldStatement(const FieldDef &field,
const FieldDef *union_field) {
std::string code;
switch (field.value.type.base_type) {
case BASE_TYPE_VECTOR: {
auto cpp_type = field.attributes.Lookup("cpp_type");
std::string indexing;
if (field.value.type.enum_def) {
indexing += "static_cast<" +
WrapInNameSpace(*field.value.type.enum_def) + ">(";
}
indexing += "_e->Get(_i)";
if (field.value.type.enum_def) { indexing += ")"; }
if (field.value.type.element == BASE_TYPE_BOOL) { indexing += " != 0"; }
// Generate code that pushes data from _e to _o in the form:
// for (uoffset_t i = 0; i < _e->size(); ++i) {
// _o->field.push_back(_e->Get(_i));
// }
auto name = Name(field);
if (field.value.type.element == BASE_TYPE_UTYPE) {
name = StripUnionType(Name(field));
}
auto access =
field.value.type.element == BASE_TYPE_UTYPE
? ".type"
: (field.value.type.element == BASE_TYPE_UNION ? ".value" : "");
code += "{ _o->" + name + ".resize(_e->size()); ";
code += "for (flatbuffers::uoffset_t _i = 0;";
code += " _i < _e->size(); _i++) { ";
if (cpp_type) {
// Generate code that resolves the cpp pointer type, of the form:
// if (resolver)
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
code += "//vector resolver, " + PtrType(&field) + "\n";
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + name + "[_i]" + access +
"), ";
code += "static_cast<flatbuffers::hash_value_t>(" + indexing + "));";
if (PtrType(&field) == "naked") {
code += " else ";
code += "_o->" + name + "[_i]" + access + " = nullptr";
} else {
// code += " else ";
// code += "_o->" + name + "[_i]" + access + " = " +
// GenTypeNativePtr(cpp_type->constant, &field, true) + "();";
code += "/* else do nothing */";
}
} else {
code += "_o->" + name + "[_i]" + access + " = ";
code += GenUnpackVal(field.value.type.VectorType(), indexing, true,
field);
}
code += "; } }";
break;
}
case BASE_TYPE_UTYPE: {
FLATBUFFERS_ASSERT(union_field->value.type.base_type ==
BASE_TYPE_UNION);
// Generate code that sets the union type, of the form:
// _o->field.type = _e;
code += "_o->" + union_field->name + ".type = _e;";
break;
}
case BASE_TYPE_UNION: {
// Generate code that sets the union value, of the form:
// _o->field.value = Union::Unpack(_e, field_type(), resolver);
code += "_o->" + Name(field) + ".value = ";
code += GenUnionUnpackVal(field, "", "");
code += ";";
break;
}
default: {
auto cpp_type = field.attributes.Lookup("cpp_type");
if (cpp_type) {
// Generate code that resolves the cpp pointer type, of the form:
// if (resolver)
// (*resolver)(&_o->field, (hash_value_t)(_e));
// else
// _o->field = nullptr;
code += "//scalar resolver, " + PtrType(&field) + " \n";
code += "if (_resolver) ";
code += "(*_resolver)";
code += "(reinterpret_cast<void **>(&_o->" + Name(field) + "), ";
code += "static_cast<flatbuffers::hash_value_t>(_e));";
if (PtrType(&field) == "naked") {
code += " else ";
code += "_o->" + Name(field) + " = nullptr;";
} else {
// code += " else ";
// code += "_o->" + Name(field) + " = " +
// GenTypeNativePtr(cpp_type->constant, &field, true) + "();";
code += "/* else do nothing */;";
}
} else {
// Generate code for assigning the value, of the form:
// _o->field = value;
code += "_o->" + Name(field) + " = ";
code += GenUnpackVal(field.value.type, "_e", false, field) + ";";
}
break;
}
}
return code;
}
std::string GenCreateParam(const FieldDef &field) {
const IDLOptions &opts = parser_.opts;
std::string value = "_o->";
if (field.value.type.base_type == BASE_TYPE_UTYPE) {
value += StripUnionType(Name(field));
value += ".type";
} else {
value += Name(field);
}
if (field.value.type.base_type != BASE_TYPE_VECTOR &&
field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(field.value.type, false);
value =
"_rehasher ? "
"static_cast<" +
type + ">((*_rehasher)(" + value + GenPtrGet(field) + ")) : 0";
}
std::string code;
switch (field.value.type.base_type) {
// String fields are of the form:
// _fbb.CreateString(_o->field)
// or
// _fbb.CreateSharedString(_o->field)
case BASE_TYPE_STRING: {
if (!field.shared) {
code += "_fbb.CreateString(";
} else {
code += "_fbb.CreateSharedString(";
}
code += value;
code.push_back(')');
// For optional fields, check to see if there actually is any data
// in _o->field before attempting to access it. If there isn't,
// depending on set_empty_to_null either set it to 0 or an empty string.
if (!field.required) {
auto empty_value =
opts.set_empty_to_null ? "0" : "_fbb.CreateSharedString(\"\")";
code = value + ".empty() ? " + empty_value + " : " + code;
}
break;
}
// Vector fields come in several flavours, of the forms:
// _fbb.CreateVector(_o->field);
// _fbb.CreateVector((const utype*)_o->field.data(), _o->field.size());
// _fbb.CreateVectorOfStrings(_o->field)
// _fbb.CreateVectorOfStructs(_o->field)
// _fbb.CreateVector<Offset<T>>(_o->field.size() [&](size_t i) {
// return CreateT(_fbb, _o->Get(i), rehasher);
// });
case BASE_TYPE_VECTOR: {
auto vector_type = field.value.type.VectorType();
switch (vector_type.base_type) {
case BASE_TYPE_STRING: {
if (NativeString(&field) == "std::string") {
code += "_fbb.CreateVectorOfStrings(" + value + ")";
} else {
// Use by-function serialization to emulate
// CreateVectorOfStrings(); this works also with non-std strings.
code +=
"_fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>"
" ";
code += "(" + value + ".size(), ";
code += "[](size_t i, _VectorArgs *__va) { ";
code +=
"return __va->__fbb->CreateString(__va->_" + value + "[i]);";
code += " }, &_va )";
}
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(vector_type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "_fbb.CreateVectorOfNativeStructs<";
code += WrapInNameSpace(*vector_type.struct_def) + ">";
} else {
code += "_fbb.CreateVectorOfStructs";
}
code += "(" + value + ")";
} else {
code += "_fbb.CreateVector<flatbuffers::Offset<";
code += WrapInNameSpace(*vector_type.struct_def) + ">> ";
code += "(" + value + ".size(), ";
code += "[](size_t i, _VectorArgs *__va) { ";
code += "return Create" + vector_type.struct_def->name;
code += "(*__va->__fbb, __va->_" + value + "[i]" +
GenPtrGet(field) + ", ";
code += "__va->__rehasher); }, &_va )";
}
break;
}
case BASE_TYPE_BOOL: {
code += "_fbb.CreateVector(" + value + ")";
break;
}
case BASE_TYPE_UNION: {
code +=
"_fbb.CreateVector<flatbuffers::"
"Offset<void>>(" +
value +
".size(), [](size_t i, _VectorArgs *__va) { "
"return __va->_" +
value + "[i].Pack(*__va->__fbb, __va->__rehasher); }, &_va)";
break;
}
case BASE_TYPE_UTYPE: {
value = StripUnionType(value);
code += "_fbb.CreateVector<uint8_t>(" + value +
".size(), [](size_t i, _VectorArgs *__va) { "
"return static_cast<uint8_t>(__va->_" +
value + "[i].type); }, &_va)";
break;
}
default: {
if (field.value.type.enum_def) {
// For enumerations, we need to get access to the array data for
// the underlying storage type (eg. uint8_t).
const auto basetype = GenTypeBasic(
field.value.type.enum_def->underlying_type, false);
code += "_fbb.CreateVectorScalarCast<" + basetype +
">(flatbuffers::data(" + value + "), " + value +
".size())";
} else if (field.attributes.Lookup("cpp_type")) {
auto type = GenTypeBasic(vector_type, false);
code += "_fbb.CreateVector<" + type + ">(" + value + ".size(), ";
code += "[](size_t i, _VectorArgs *__va) { ";
code += "return __va->__rehasher ? ";
code += "static_cast<" + type + ">((*__va->__rehasher)";
code += "(__va->_" + value + "[i]" + GenPtrGet(field) + ")) : 0";
code += "; }, &_va )";
} else {
code += "_fbb.CreateVector(" + value + ")";
}
break;
}
}
// If set_empty_to_null option is enabled, for optional fields, check to
// see if there actually is any data in _o->field before attempting to
// access it.
if (opts.set_empty_to_null && !field.required) {
code = value + ".size() ? " + code + " : 0";
}
break;
}
case BASE_TYPE_UNION: {
// _o->field.Pack(_fbb);
code += value + ".Pack(_fbb)";
break;
}
case BASE_TYPE_STRUCT: {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) {
code += "flatbuffers::Pack(" + value + ")";
} else if (field.native_inline) {
code += "&" + value;
} else {
code += value + " ? " + value + GenPtrGet(field) + " : 0";
}
} else {
// _o->field ? CreateT(_fbb, _o->field.get(), _rehasher);
const auto type = field.value.type.struct_def->name;
code += value + " ? Create" + type;
code += "(_fbb, " + value + GenPtrGet(field) + ", _rehasher)";
code += " : 0";
}
break;
}
default: {
code += value;
break;
}
}
return code;
}
// Generate code for tables that needs to come after the regular definition.
void GenTablePost(const StructDef &struct_def) {
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_.SetValue("NATIVE_NAME",
NativeName(Name(struct_def), &struct_def, parser_.opts));
if (parser_.opts.generate_object_based_api) {
// Generate the X::UnPack() method.
code_ += "inline " +
TableUnPackSignature(struct_def, false, parser_.opts) + " {";
code_ += " auto _o = new {{NATIVE_NAME}}();";
code_ += " UnPackTo(_o, _resolver);";
code_ += " return _o;";
code_ += "}";
code_ += "";
code_ += "inline " +
TableUnPackToSignature(struct_def, false, parser_.opts) + " {";
code_ += " (void)_o;";
code_ += " (void)_resolver;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.deprecated) { continue; }
// Assign a value from |this| to |_o|. Values from |this| are stored
// in a variable |_e| by calling this->field_type(). The value is then
// assigned to |_o| using the GenUnpackFieldStatement.
const bool is_union = field.value.type.base_type == BASE_TYPE_UTYPE;
const auto statement =
GenUnpackFieldStatement(field, is_union ? *(it + 1) : nullptr);
code_.SetValue("FIELD_NAME", Name(field));
auto prefix = " { auto _e = {{FIELD_NAME}}(); ";
auto check = IsScalar(field.value.type.base_type) ? "" : "if (_e) ";
auto postfix = " };";
code_ += std::string(prefix) + check + statement + postfix;
}
code_ += "}";
code_ += "";
// Generate the X::Pack member function that simply calls the global
// CreateX function.
code_ += "inline " + TablePackSignature(struct_def, false, parser_.opts) +
" {";
code_ += " return Create{{STRUCT_NAME}}(_fbb, _o, _rehasher);";
code_ += "}";
code_ += "";
// Generate a CreateX method that works with an unpacked C++ object.
code_ += "inline " +
TableCreateSignature(struct_def, false, parser_.opts) + " {";
code_ += " (void)_rehasher;";
code_ += " (void)_o;";
code_ +=
" struct _VectorArgs "
"{ flatbuffers::FlatBufferBuilder *__fbb; "
"const " +
NativeName(Name(struct_def), &struct_def, parser_.opts) +
"* __o; "
"const flatbuffers::rehasher_function_t *__rehasher; } _va = { "
"&_fbb, _o, _rehasher}; (void)_va;";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) { continue; }
code_ += " auto _" + Name(field) + " = " + GenCreateParam(field) + ";";
}
// Need to call "Create" with the struct namespace.
const auto qualified_create_name =
struct_def.defined_namespace->GetFullyQualifiedName("Create");
code_.SetValue("CREATE_NAME", TranslateNameSpace(qualified_create_name));
code_ += " return {{CREATE_NAME}}{{STRUCT_NAME}}(";
code_ += " _fbb\\";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) { continue; }
bool pass_by_address = false;
if (field.value.type.base_type == BASE_TYPE_STRUCT) {
if (IsStruct(field.value.type)) {
auto native_type =
field.value.type.struct_def->attributes.Lookup("native_type");
if (native_type) { pass_by_address = true; }
}
}
// Call the CreateX function using values from |_o|.
if (pass_by_address) {
code_ += ",\n &_" + Name(field) + "\\";
} else {
code_ += ",\n _" + Name(field) + "\\";
}
}
code_ += ");";
code_ += "}";
code_ += "";
}
}
static void GenPadding(
const FieldDef &field, std::string *code_ptr, int *id,
const std::function<void(int bits, std::string *code_ptr, int *id)> &f) {
if (field.padding) {
for (int i = 0; i < 4; i++) {
if (static_cast<int>(field.padding) & (1 << i)) {
f((1 << i) * 8, code_ptr, id);
}
}
FLATBUFFERS_ASSERT(!(field.padding & ~0xF));
}
}
static void PaddingDefinition(int bits, std::string *code_ptr, int *id) {
*code_ptr += " int" + NumToString(bits) + "_t padding" +
NumToString((*id)++) + "__;";
}
static void PaddingInitializer(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += ",\n padding" + NumToString((*id)++) + "__(0)";
}
static void PaddingNoop(int bits, std::string *code_ptr, int *id) {
(void)bits;
*code_ptr += " (void)padding" + NumToString((*id)++) + "__;";
}
// Generate an accessor struct with constructor for a flatbuffers struct.
void GenStruct(const StructDef &struct_def) {
// Generate an accessor struct, with private variables of the form:
// type name_;
// Generates manual padding and alignment.
// Variables are private because they contain little endian data on all
// platforms.
GenComment(struct_def.doc_comment);
code_.SetValue("ALIGN", NumToString(struct_def.minalign));
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_ +=
"FLATBUFFERS_MANUALLY_ALIGNED_STRUCT({{ALIGN}}) "
"{{STRUCT_NAME}} FLATBUFFERS_FINAL_CLASS {";
code_ += " private:";
int padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
code_.SetValue("FIELD_TYPE",
GenTypeGet(field.value.type, " ", "", " ", false));
code_.SetValue("FIELD_NAME", Name(field));
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}_;";
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingDefinition);
code_ += padding;
}
}
// Generate GetFullyQualifiedName
code_ += "";
code_ += " public:";
// Make TypeTable accessible via the generated struct.
if (parser_.opts.mini_reflect != IDLOptions::kNone) {
code_ +=
" static const flatbuffers::TypeTable *MiniReflectTypeTable() {";
code_ += " return {{STRUCT_NAME}}TypeTable();";
code_ += " }";
}
GenFullyQualifiedNameGetter(struct_def, Name(struct_def));
// Generate a default constructor.
code_ += " {{STRUCT_NAME}}() {";
code_ += " memset(static_cast<void *>(this), 0, sizeof({{STRUCT_NAME}}));";
code_ += " }";
// Generate a constructor that takes all fields as arguments.
std::string arg_list;
std::string init_list;
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
const auto member_name = Name(field) + "_";
const auto arg_name = "_" + Name(field);
const auto arg_type =
GenTypeGet(field.value.type, " ", "const ", " &", true);
if (it != struct_def.fields.vec.begin()) {
arg_list += ", ";
init_list += ",\n ";
}
arg_list += arg_type;
arg_list += arg_name;
init_list += member_name;
if (IsScalar(field.value.type.base_type)) {
auto type = GenUnderlyingCast(field, false, arg_name);
init_list += "(flatbuffers::EndianScalar(" + type + "))";
} else {
init_list += "(" + arg_name + ")";
}
if (field.padding) {
GenPadding(field, &init_list, &padding_id, PaddingInitializer);
}
}
if (!arg_list.empty()) {
code_.SetValue("ARG_LIST", arg_list);
code_.SetValue("INIT_LIST", init_list);
code_ += " {{STRUCT_NAME}}({{ARG_LIST}})";
code_ += " : {{INIT_LIST}} {";
padding_id = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingNoop);
code_ += padding;
}
}
code_ += " }";
}
// Generate accessor methods of the form:
// type name() const { return flatbuffers::EndianScalar(name_); }
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
auto field_type = GenTypeGet(field.value.type, " ", "const ", " &", true);
auto is_scalar = IsScalar(field.value.type.base_type);
auto member = Name(field) + "_";
auto value =
is_scalar ? "flatbuffers::EndianScalar(" + member + ")" : member;
code_.SetValue("FIELD_NAME", Name(field));
code_.SetValue("FIELD_TYPE", field_type);
code_.SetValue("FIELD_VALUE", GenUnderlyingCast(field, true, value));
GenComment(field.doc_comment, " ");
code_ += " {{FIELD_TYPE}}{{FIELD_NAME}}() const {";
code_ += " return {{FIELD_VALUE}};";
code_ += " }";
if (parser_.opts.mutable_buffer) {
auto mut_field_type = GenTypeGet(field.value.type, " ", "", " &", true);
code_.SetValue("FIELD_TYPE", mut_field_type);
if (is_scalar) {
code_.SetValue("ARG", GenTypeBasic(field.value.type, true));
code_.SetValue("FIELD_VALUE",
GenUnderlyingCast(field, false, "_" + Name(field)));
code_ += " void mutate_{{FIELD_NAME}}({{ARG}} _{{FIELD_NAME}}) {";
code_ +=
" flatbuffers::WriteScalar(&{{FIELD_NAME}}_, "
"{{FIELD_VALUE}});";
code_ += " }";
} else {
code_ += " {{FIELD_TYPE}}mutable_{{FIELD_NAME}}() {";
code_ += " return {{FIELD_NAME}}_;";
code_ += " }";
}
}
// Generate a comparison function for this field if it is a key.
if (field.key) { GenKeyFieldMethods(field); }
}
code_.SetValue("NATIVE_NAME", Name(struct_def));
GenOperatorNewDelete(struct_def);
code_ += "};";
code_.SetValue("STRUCT_BYTE_SIZE", NumToString(struct_def.bytesize));
code_ += "FLATBUFFERS_STRUCT_END({{STRUCT_NAME}}, {{STRUCT_BYTE_SIZE}});";
if (parser_.opts.gen_compare) GenCompareOperator(struct_def, "()");
code_ += "";
}
// Set up the correct namespace. Only open a namespace if the existing one is
// different (closing/opening only what is necessary).
//
// The file must start and end with an empty (or null) namespace so that
// namespaces are properly opened and closed.
void SetNameSpace(const Namespace *ns) {
if (cur_name_space_ == ns) { return; }
// Compute the size of the longest common namespace prefix.
// If cur_name_space is A::B::C::D and ns is A::B::E::F::G,
// the common prefix is A::B:: and we have old_size = 4, new_size = 5
// and common_prefix_size = 2
size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0;
size_t new_size = ns ? ns->components.size() : 0;
size_t common_prefix_size = 0;
while (common_prefix_size < old_size && common_prefix_size < new_size &&
ns->components[common_prefix_size] ==
cur_name_space_->components[common_prefix_size]) {
common_prefix_size++;
}
// Close cur_name_space in reverse order to reach the common prefix.
// In the previous example, D then C are closed.
for (size_t j = old_size; j > common_prefix_size; --j) {
code_ += "} // namespace " + cur_name_space_->components[j - 1];
}
if (old_size != common_prefix_size) { code_ += ""; }
// open namespace parts to reach the ns namespace
// in the previous example, E, then F, then G are opened
for (auto j = common_prefix_size; j != new_size; ++j) {
code_ += "namespace " + ns->components[j] + " {";
}
if (new_size != common_prefix_size) { code_ += ""; }
cur_name_space_ = ns;
}
const TypedFloatConstantGenerator float_const_gen_;
};
} // namespace cpp
bool GenerateCPP(const Parser &parser, const std::string &path,
const std::string &file_name) {
cpp::CppGenerator generator(parser, path, file_name);
return generator.generate();
}
std::string CPPMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
const auto filebase =
flatbuffers::StripPath(flatbuffers::StripExtension(file_name));
const auto included_files = parser.GetIncludedFilesRecursive(file_name);
std::string make_rule = GeneratedFileName(path, filebase) + ": ";
for (auto it = included_files.begin(); it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
} // namespace flatbuffers
| 1 | 16,009 | `std::strtok` isn't necessary. The `std::string::find_first_of` is better C++ alternative to `std::strtok`. For example, see `CheckedError Parser::ParseEnumFromString` method. | google-flatbuffers | java |
@@ -3,9 +3,6 @@ namespace Psalm\CodeLocation;
class DocblockTypeLocation extends \Psalm\CodeLocation
{
- /** @var int */
- public $raw_line_number;
-
public function __construct(
\Psalm\FileSource $file_source,
int $file_start, | 1 | <?php
namespace Psalm\CodeLocation;
class DocblockTypeLocation extends \Psalm\CodeLocation
{
/** @var int */
public $raw_line_number;
public function __construct(
\Psalm\FileSource $file_source,
int $file_start,
int $file_end,
int $line_number
) {
$this->file_start = $file_start;
// matches how CodeLocation works
$this->file_end = $file_end - 1;
$this->raw_file_start = $file_start;
$this->raw_file_end = $file_end;
$this->raw_line_number = $line_number;
$this->file_path = $file_source->getFilePath();
$this->file_name = $file_source->getFileName();
$this->single_line = false;
$this->preview_start = $this->file_start;
}
}
| 1 | 9,153 | This property is already declared in a parent with the same visibility/type/value. This one is redundant. | vimeo-psalm | php |
@@ -78,6 +78,8 @@ def autorun_get_interactive_session(cmds, **kargs):
self.s = ""
def write(self, x):
self.s += x
+ def flush(self):
+ pass
sw = StringWriter()
sstdout,sstderr = sys.stdout,sys.stderr | 1 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
Run commands when the Scapy interpreter starts.
"""
import code,sys
from scapy.config import conf
from scapy.themes import *
from scapy.error import Scapy_Exception
from scapy.utils import tex_escape
#########################
##### Autorun stuff #####
#########################
class StopAutorun(Scapy_Exception):
code_run = ""
class ScapyAutorunInterpreter(code.InteractiveInterpreter):
def __init__(self, *args, **kargs):
code.InteractiveInterpreter.__init__(self, *args, **kargs)
self.error = 0
def showsyntaxerror(self, *args, **kargs):
self.error = 1
return code.InteractiveInterpreter.showsyntaxerror(self, *args, **kargs)
def showtraceback(self, *args, **kargs):
self.error = 1
exc_type, exc_value, exc_tb = sys.exc_info()
if isinstance(exc_value, StopAutorun):
raise exc_value
return code.InteractiveInterpreter.showtraceback(self, *args, **kargs)
def autorun_commands(cmds,my_globals=None,verb=0):
sv = conf.verb
import __builtin__
try:
try:
if my_globals is None:
my_globals = __import__("scapy.all").all.__dict__
conf.verb = verb
interp = ScapyAutorunInterpreter(my_globals)
cmd = ""
cmds = cmds.splitlines()
cmds.append("") # ensure we finish multi-line commands
cmds.reverse()
__builtin__.__dict__["_"] = None
while 1:
if cmd:
sys.stderr.write(sys.__dict__.get("ps2","... "))
else:
sys.stderr.write(str(sys.__dict__.get("ps1",ColorPrompt())))
l = cmds.pop()
print l
cmd += "\n"+l
if interp.runsource(cmd):
continue
if interp.error:
return 0
cmd = ""
if len(cmds) <= 1:
break
except SystemExit:
pass
finally:
conf.verb = sv
return _
def autorun_get_interactive_session(cmds, **kargs):
class StringWriter:
def __init__(self):
self.s = ""
def write(self, x):
self.s += x
sw = StringWriter()
sstdout,sstderr = sys.stdout,sys.stderr
try:
try:
sys.stdout = sys.stderr = sw
res = autorun_commands(cmds, **kargs)
except StopAutorun,e:
e.code_run = sw.s
raise
finally:
sys.stdout,sys.stderr = sstdout,sstderr
return sw.s,res
def autorun_get_text_interactive_session(cmds, **kargs):
ct = conf.color_theme
try:
conf.color_theme = NoTheme()
s,res = autorun_get_interactive_session(cmds, **kargs)
finally:
conf.color_theme = ct
return s,res
def autorun_get_ansi_interactive_session(cmds, **kargs):
ct = conf.color_theme
try:
conf.color_theme = DefaultTheme()
s,res = autorun_get_interactive_session(cmds, **kargs)
finally:
conf.color_theme = ct
return s,res
def autorun_get_html_interactive_session(cmds, **kargs):
ct = conf.color_theme
to_html = lambda s: s.replace("<","<").replace(">",">").replace("#[#","<").replace("#]#",">")
try:
try:
conf.color_theme = HTMLTheme2()
s,res = autorun_get_interactive_session(cmds, **kargs)
except StopAutorun,e:
e.code_run = to_html(e.code_run)
raise
finally:
conf.color_theme = ct
return to_html(s),res
def autorun_get_latex_interactive_session(cmds, **kargs):
ct = conf.color_theme
to_latex = lambda s: tex_escape(s).replace("@[@","{").replace("@]@","}").replace("@`@","\\")
try:
try:
conf.color_theme = LatexTheme2()
s,res = autorun_get_interactive_session(cmds, **kargs)
except StopAutorun,e:
e.code_run = to_latex(e.code_run)
raise
finally:
conf.color_theme = ct
return to_latex(s),res
| 1 | 9,409 | This is required, otherwise multiprocessing will (for some reason) crash | secdev-scapy | py |
@@ -19,6 +19,7 @@ package main
import (
"encoding/base64"
"fmt"
+ "github.com/algorand/go-algorand/data/basics"
"io"
"io/ioutil"
"os" | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/protocol"
"github.com/spf13/cobra"
)
var (
toAddress string
account string
amount uint64
fee uint64
firstValid uint64
lastValid uint64
txFilename string
outFilename string
rejectsFilename string
noteBase64 string
noteText string
sign bool
closeToAddress string
noWaitAfterSend bool
)
func init() {
clerkCmd.AddCommand(sendCmd)
clerkCmd.AddCommand(rawsendCmd)
clerkCmd.AddCommand(inspectCmd)
clerkCmd.AddCommand(signCmd)
// Wallet to be used for the clerk operation
clerkCmd.PersistentFlags().StringVarP(&walletName, "wallet", "w", "", "Set the wallet to be used for the selected operation")
// send flags
sendCmd.Flags().StringVarP(&account, "from", "f", "", "Account address to send the money from (If not specified, uses default account)")
sendCmd.Flags().StringVarP(&toAddress, "to", "t", "", "Address to send to money to (required)")
sendCmd.Flags().Uint64VarP(&amount, "amount", "a", 0, "The amount to be transferred (required), in microAlgos")
sendCmd.Flags().Uint64Var(&fee, "fee", 0, "The transaction fee (automatically determined by default), in microAlgos")
sendCmd.Flags().Uint64Var(&firstValid, "firstvalid", 0, "The first round where the transaction may be committed to the ledger (currently ignored)")
sendCmd.Flags().Uint64Var(&lastValid, "lastvalid", 0, "The last round where the transaction may be committed to the ledger (currently ignored)")
sendCmd.Flags().StringVar(¬eBase64, "noteb64", "", "Note (URL-base64 encoded)")
sendCmd.Flags().StringVarP(¬eText, "note", "n", "", "Note text (ignored if --noteb64 used also)")
sendCmd.Flags().StringVarP(&txFilename, "out", "o", "", "Dump an unsigned tx to the given file. In order to dump a signed transaction, pass -s")
sendCmd.Flags().BoolVarP(&sign, "sign", "s", false, "Use with -o to indicate that the dumped transaction should be signed")
sendCmd.Flags().StringVarP(&closeToAddress, "close-to", "c", "", "Close account and send remainder to this address")
sendCmd.Flags().BoolVarP(&noWaitAfterSend, "no-wait", "N", false, "Don't wait for transaction to commit")
sendCmd.MarkFlagRequired("to")
sendCmd.MarkFlagRequired("amount")
// rawsend flags
rawsendCmd.Flags().StringVarP(&txFilename, "filename", "f", "", "Filename of file containing raw transactions")
rawsendCmd.Flags().StringVarP(&rejectsFilename, "rejects", "r", "", "Filename for writing rejects to (default is txFilename.rej)")
rawsendCmd.Flags().BoolVarP(&noWaitAfterSend, "no-wait", "N", false, "Don't wait for transactions to commit")
rawsendCmd.MarkFlagRequired("filename")
signCmd.Flags().StringVarP(&txFilename, "infile", "i", "", "Partially-signed transaction file to add signature to")
signCmd.Flags().StringVarP(&outFilename, "outfile", "o", "", "Filename for writing the signed transaction")
signCmd.MarkFlagRequired("infile")
signCmd.MarkFlagRequired("outfile")
}
var clerkCmd = &cobra.Command{
Use: "clerk",
Short: "Provides the tools to control transactions ",
Long: `Collection of commands to support the mangement of transaction information.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
//If no arguments passed, we should fallback to help
cmd.HelpFunc()(cmd, args)
},
}
var sendCmd = &cobra.Command{
Use: "send",
Short: "Send money to an address",
Long: `Send money from one account to another. Note: by default, the money will be withdrawn from the default account. Creates a transaction sending amount tokens from fromAddr to toAddr. If the optional --fee is not provided, the transaction will use the recommended amount. If the optional --firstvalid and --lastvalid are provided, the transaction will only be valid from round firstValid to round lastValid. If broadcast of the transaction is successful, the transaction ID will be returned.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
// -s is invalid without -o
if txFilename == "" && sign {
reportErrorln(soFlagError)
}
dataDir := ensureSingleDataDir()
accountList := makeAccountsList(dataDir)
// Check if from was specified, else use default
if account == "" {
account = accountList.getDefaultAccount()
}
// Resolving friendly names
fromAddressResolved := accountList.getAddressByName(account)
toAddressResolved := accountList.getAddressByName(toAddress)
// Parse notes field
var noteBytes []byte
var err error
if cmd.Flags().Changed("noteb64") {
noteBytes, err = base64.StdEncoding.DecodeString(noteBase64)
if err != nil {
reportErrorf(malformedNote, noteBase64, err)
}
} else if cmd.Flags().Changed("note") {
noteBytes = []byte(noteText)
} else {
// Make sure that back-to-back, similar transactions will have a different txid
noteBytes = make([]byte, 8)
crypto.RandBytes(noteBytes[:])
}
// If closing an account, resolve that address as well
var closeToAddressResolved string
if closeToAddress != "" {
closeToAddressResolved = accountList.getAddressByName(closeToAddress)
}
client := ensureFullClient(dataDir)
if txFilename == "" {
// Sign and broadcast the tx
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
tx, err := client.SendPaymentFromWallet(wh, pw, fromAddressResolved, toAddressResolved, fee, amount, noteBytes, closeToAddressResolved)
// update information from Transaction
txid := tx.ID().String()
fee = tx.Fee.Raw
if err != nil {
reportErrorf(errorBroadcastingTX, err)
}
// Report tx details to user
reportInfof(infoTxIssued, amount, fromAddressResolved, toAddressResolved, txid, fee)
if noWaitAfterSend {
return
}
// Get current round information
stat, err := client.Status()
if err != nil {
reportErrorf(errorRequestFail, err)
}
for {
// Check if we know about the transaction yet
txn, err := client.PendingTransactionInformation(txid)
if err != nil {
reportErrorf(errorRequestFail, err)
}
if txn.ConfirmedRound > 0 {
reportInfof(infoTxCommitted, txid, txn.ConfirmedRound)
break
}
if txn.PoolError != "" {
reportErrorf(txPoolError, txid, txn.PoolError)
}
reportInfof(infoTxPending, txid, stat.LastRound)
stat, err = client.WaitForRound(stat.LastRound + 1)
if err != nil {
reportErrorf(errorRequestFail, err)
}
}
} else {
payment, err := client.ConstructPayment(fromAddressResolved, toAddressResolved, fee, amount, noteBytes, closeToAddressResolved)
if err != nil {
reportErrorf(errorConstructingTX, err)
}
var stxn transactions.SignedTxn
if sign {
// Sign the transaction
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
stxn, err = client.SignTransactionWithWallet(wh, pw, payment)
if err != nil {
reportErrorf(errorConstructingTX, err)
}
} else {
// Wrap in a transactions.SignedTxn with an empty sig.
// This way protocol.Encode will encode the transaction type
stxn, err = transactions.AssembleSignedTxn(payment, crypto.Signature{}, crypto.MultisigSig{})
if err != nil {
reportErrorf(errorConstructingTX, err)
}
stxn = populateBlankMultisig(client, dataDir, walletName, stxn)
}
// Write the SignedTxn to the output file
err = ioutil.WriteFile(txFilename, protocol.Encode(stxn), 0600)
if err != nil {
reportErrorf(fileWriteError, txFilename, err)
}
}
},
}
var rawsendCmd = &cobra.Command{
Use: "rawsend",
Short: "Send raw transactions",
Long: `Send raw transactions. The transactions must be stored in a file, encoded using msgpack as transactions.SignedTxn. Multiple transactions can be concatenated together in a file.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, args []string) {
if rejectsFilename == "" {
rejectsFilename = txFilename + ".rej"
}
data, err := ioutil.ReadFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dec := protocol.NewDecoderBytes(data)
client := ensureAlgodClient(ensureSingleDataDir())
txns := make(map[transactions.Txid]transactions.SignedTxn)
for {
var txn transactions.SignedTxn
err = dec.Decode(&txn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
_, present := txns[txn.ID()]
if present {
reportErrorf(txDupError, txn.ID().String(), txFilename)
}
txns[txn.ID()] = txn
}
txnErrors := make(map[transactions.Txid]string)
pendingTxns := make(map[transactions.Txid]string)
for txid, txn := range txns {
// Broadcast the transaction
txidStr, err := client.BroadcastTransaction(txn)
if err != nil {
txnErrors[txid] = err.Error()
reportWarnf(errorBroadcastingTX, err)
continue
}
reportInfof(infoRawTxIssued, txidStr)
pendingTxns[txid] = txidStr
}
if noWaitAfterSend {
return
}
// Get current round information
stat, err := client.Status()
if err != nil {
reportErrorf(errorRequestFail, err)
}
for txid, txidStr := range pendingTxns {
for {
// Check if we know about the transaction yet
txn, err := client.PendingTransactionInformation(txidStr)
if err != nil {
txnErrors[txid] = err.Error()
reportWarnf(errorRequestFail, err)
continue
}
if txn.ConfirmedRound > 0 {
reportInfof(infoTxCommitted, txidStr, txn.ConfirmedRound)
break
}
if txn.PoolError != "" {
txnErrors[txid] = txn.PoolError
reportWarnf(txPoolError, txidStr, txn.PoolError)
continue
}
reportInfof(infoTxPending, txidStr, stat.LastRound)
stat, err = client.WaitForRound(stat.LastRound + 1)
if err != nil {
reportErrorf(errorRequestFail, err)
}
}
}
if len(txnErrors) > 0 {
fmt.Printf("Encountered errors in sending %d transactions:\n", len(txnErrors))
var rejectsData []byte
for txid, errmsg := range txnErrors {
fmt.Printf(" %s: %s\n", txid, errmsg)
rejectsData = append(rejectsData, protocol.Encode(txns[txid])...)
}
f, err := os.OpenFile(rejectsFilename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
if err != nil {
reportErrorf(fileWriteError, rejectsFilename, err.Error())
}
_, err = f.Write(rejectsData)
if err != nil {
reportErrorf(fileWriteError, rejectsFilename, err.Error())
}
f.Close()
fmt.Printf("Rejected transactions written to %s\n", rejectsFilename)
os.Exit(1)
}
},
}
var inspectCmd = &cobra.Command{
Use: "inspect",
Short: "print a transaction file",
Run: func(cmd *cobra.Command, args []string) {
for _, txFilename := range args {
data, err := ioutil.ReadFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dec := protocol.NewDecoderBytes(data)
count := 0
for {
var txn transactions.SignedTxn
err = dec.Decode(&txn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
sti, err := inspectTxn(txn)
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
fmt.Printf("%s[%d]\n%s\n\n", txFilename, count, string(protocol.EncodeJSON(sti)))
count++
}
}
},
}
var signCmd = &cobra.Command{
Use: "sign -i INFILE -o OUTFILE",
Short: "Sign a transaction file",
Long: `Sign the passed transaction file, which may contain one or more transactions. If the infile and the outfile are the same, this overwrites the file with the new, signed data.`,
Args: validateNoPosArgsFn,
Run: func(cmd *cobra.Command, _ []string) {
data, err := ioutil.ReadFile(txFilename)
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
dataDir := ensureSingleDataDir()
client := ensureKmdClient(dataDir)
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
var outData []byte
dec := protocol.NewDecoderBytes(data)
for {
// transaction file comes in as a SignedTxn with no signature
var unsignedTxn transactions.SignedTxn
err = dec.Decode(&unsignedTxn)
if err == io.EOF {
break
}
if err != nil {
reportErrorf(txDecodeError, txFilename, err)
}
signedTxn, err := client.SignTransactionWithWallet(wh, pw, unsignedTxn.Txn)
if err != nil {
reportErrorf(errorSigningTX, err)
}
outData = append(outData, protocol.Encode(signedTxn)...)
}
err = ioutil.WriteFile(outFilename, outData, 0600)
if err != nil {
reportErrorf(fileWriteError, outFilename, err)
}
},
}
| 1 | 35,302 | Please put in a separate line-separated block | algorand-go-algorand | go |
@@ -100,7 +100,7 @@ func (cmd *Command) Wait() error {
func (cmd *Command) Kill() error {
err := cmd.connectionManager.Disconnect()
if err != nil {
- return err
+ fmt.Printf("Disconnect error: %s\n", err)
}
cmd.httpApiServer.Stop() | 1 | package client
import (
"fmt"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/mysterium/node/client_connection"
"github.com/mysterium/node/communication"
nats_dialog "github.com/mysterium/node/communication/nats/dialog"
nats_discovery "github.com/mysterium/node/communication/nats/discovery"
"github.com/mysterium/node/identity"
"github.com/mysterium/node/ip"
"github.com/mysterium/node/openvpn"
"github.com/mysterium/node/openvpn/middlewares/client/bytescount"
"github.com/mysterium/node/server"
"github.com/mysterium/node/tequilapi"
tequilapi_endpoints "github.com/mysterium/node/tequilapi/endpoints"
"time"
)
//NewCommand function creates new client command by given options
func NewCommand(options CommandOptions) *Command {
return NewCommandWith(
options,
server.NewClient(),
)
}
//NewCommandWith does the same as NewCommand with possibility to override mysterium api client for external communication
func NewCommandWith(
options CommandOptions,
mysteriumClient server.Client,
) *Command {
nats_discovery.Bootstrap()
openvpn.Bootstrap()
keystoreInstance := keystore.NewKeyStore(options.DirectoryKeystore, keystore.StandardScryptN, keystore.StandardScryptP)
identityManager := identity.NewIdentityManager(keystoreInstance)
dialogEstablisherFactory := func(myID identity.Identity) communication.DialogEstablisher {
return nats_dialog.NewDialogEstablisher(myID, identity.NewSigner(keystoreInstance, myID))
}
signerFactory := func(id identity.Identity) identity.Signer {
return identity.NewSigner(keystoreInstance, id)
}
statsKeeper := bytescount.NewSessionStatsKeeper(time.Now)
vpnClientFactory := client_connection.ConfigureVpnClientFactory(
mysteriumClient,
options.DirectoryConfig,
options.DirectoryRuntime,
signerFactory,
statsKeeper,
)
connectionManager := client_connection.NewManager(mysteriumClient, dialogEstablisherFactory, vpnClientFactory, statsKeeper)
router := tequilapi.NewAPIRouter()
tequilapi_endpoints.AddRoutesForIdentities(router, identityManager, mysteriumClient, signerFactory)
tequilapi_endpoints.AddRoutesForConnection(router, connectionManager, ip.NewResolver(), statsKeeper)
tequilapi_endpoints.AddRoutesForProposals(router, mysteriumClient)
httpAPIServer := tequilapi.NewServer(options.TequilapiAddress, options.TequilapiPort, router)
return &Command{
connectionManager,
httpAPIServer,
}
}
//Command represent entrypoint for Mysterium client with top level components
type Command struct {
connectionManager client_connection.Manager
httpApiServer tequilapi.APIServer
}
//Run starts Tequilapi service - does not block
func (cmd *Command) Run() error {
err := cmd.httpApiServer.StartServing()
if err != nil {
return err
}
port, err := cmd.httpApiServer.Port()
if err != nil {
return err
}
fmt.Printf("Api started on: %d\n", port)
return nil
}
//Wait blocks until tequilapi service is stopped
func (cmd *Command) Wait() error {
return cmd.httpApiServer.Wait()
}
//Kill stops tequilapi service
func (cmd *Command) Kill() error {
err := cmd.connectionManager.Disconnect()
if err != nil {
return err
}
cmd.httpApiServer.Stop()
fmt.Printf("Api stopped\n")
return nil
}
| 1 | 10,447 | Should error be eaten? If so, not clear why | mysteriumnetwork-node | go |
@@ -145,4 +145,7 @@ class AuthorizationPolicy(core_authorization.AuthorizationPolicy):
class RouteFactory(core_authorization.RouteFactory):
- pass
+ def __init__(self, request):
+ super(RouteFactory, self).__init__(request)
+ if self.on_collection and self.resource_name == 'bucket':
+ self.force_empty_list = True | 1 | from kinto.core import authorization as core_authorization
from pyramid.security import IAuthorizationPolicy
from zope.interface import implementer
# Vocab really matters when you deal with permissions. Let's do a quick recap
# of the terms used here:
#
# Object URI:
# An unique identifier for an object.
# for instance, /buckets/blog/collections/articles/records/article1
#
# Object:
# A common denomination of an object (e.g. "collection" or "record")
#
# Unbound permission:
# A permission not bound to an object (e.g. "create")
#
# Bound permission:
# A permission bound to an object (e.g. "collection:create")
# Dictionary which list all permissions a given permission enables.
PERMISSIONS_INHERITANCE_TREE = {
'bucket:write': {
'bucket': ['write']
},
'bucket:read': {
'bucket': ['write', 'read']
},
'bucket:group:create': {
'bucket': ['write', 'group:create']
},
'bucket:collection:create': {
'bucket': ['write', 'collection:create']
},
'group:write': {
'bucket': ['write'],
'group': ['write']
},
'group:read': {
'bucket': ['write', 'read'],
'group': ['write', 'read']
},
'collection:write': {
'bucket': ['write'],
'collection': ['write'],
},
'collection:read': {
'bucket': ['write', 'read'],
'collection': ['write', 'read'],
},
'collection:record:create': {
'bucket': ['write'],
'collection': ['write', 'record:create']
},
'record:write': {
'bucket': ['write'],
'collection': ['write'],
'record': ['write']
},
'record:read': {
'bucket': ['write', 'read'],
'collection': ['write', 'read'],
'record': ['write', 'read']
}
}
def get_object_type(object_uri):
"""Return the type of an object from its id."""
obj_parts = object_uri.split('/')
if len(obj_parts) % 2 == 0:
object_uri = '/'.join(obj_parts[:-1])
# Order matters here. More precise is tested first.
if 'records' in object_uri:
obj_type = 'record'
elif 'collections' in object_uri:
obj_type = 'collection'
elif 'groups' in object_uri:
obj_type = 'group'
elif 'buckets' in object_uri:
obj_type = 'bucket'
else:
obj_type = None
return obj_type
def build_permission_tuple(obj_type, unbound_permission, obj_parts):
"""Returns a tuple of (object_uri, unbound_permission)"""
PARTS_LENGTH = {
'bucket': 3,
'collection': 5,
'group': 5,
'record': 7
}
if obj_type not in PARTS_LENGTH:
raise ValueError('Invalid object type: %s' % obj_type)
if PARTS_LENGTH[obj_type] > len(obj_parts):
raise ValueError('You cannot build children keys from its parent key.'
'Trying to build type "%s" from object key "%s".' % (
obj_type, '/'.join(obj_parts)))
length = PARTS_LENGTH[obj_type]
return ('/'.join(obj_parts[:length]), unbound_permission)
def build_permissions_set(object_uri, unbound_permission,
inheritance_tree=None):
"""Build a set of all permissions that can grant access to the given
object URI and unbound permission.
>>> build_required_permissions('/buckets/blog', 'write')
set(('/buckets/blog', 'write'))
"""
if inheritance_tree is None:
inheritance_tree = PERMISSIONS_INHERITANCE_TREE
obj_type = get_object_type(object_uri)
# Unknown object type, does not map the INHERITANCE_TREE.
# In that case, the set of related permissions is empty.
if obj_type is None:
return set()
bound_permission = '%s:%s' % (obj_type, unbound_permission)
granters = set()
obj_parts = object_uri.split('/')
for obj, permission_list in inheritance_tree[bound_permission].items():
for permission in permission_list:
granters.add(build_permission_tuple(obj, permission, obj_parts))
return granters
@implementer(IAuthorizationPolicy)
class AuthorizationPolicy(core_authorization.AuthorizationPolicy):
def get_bound_permissions(self, *args, **kwargs):
return build_permissions_set(*args, **kwargs)
class RouteFactory(core_authorization.RouteFactory):
pass
| 1 | 9,166 | This attribute is not defined if the condition is not met. Instead, you could define another RouteFactory (e.g. `BucketRouteFactory` with a class attribute like `allow_empty_list`) | Kinto-kinto | py |
@@ -103,6 +103,13 @@ func WithCondition(cond hivev1.ClusterDeploymentCondition) Option {
}
}
+// WithInstalledTimestamp adds the specified InstalledTimestamp to ClusterDeployment.Status
+func WithInstalledTimestamp(timestamp metav1.Time) Option {
+ return func(clusterDeployment *hivev1.ClusterDeployment) {
+ clusterDeployment.Status.InstalledTimestamp = ×tamp
+ }
+}
+
func WithUnclaimedClusterPoolReference(namespace, poolName string) Option {
return WithClusterPoolReference(namespace, poolName, "")
} | 1 | package clusterdeployment
import (
"time"
configv1 "github.com/openshift/api/config/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
"github.com/openshift/hive/pkg/test/generic"
)
// Option defines a function signature for any function that wants to be passed into Build
type Option func(*hivev1.ClusterDeployment)
// Build runs each of the functions passed in to generate the object.
func Build(opts ...Option) *hivev1.ClusterDeployment {
retval := &hivev1.ClusterDeployment{}
for _, o := range opts {
o(retval)
}
return retval
}
type Builder interface {
Build(opts ...Option) *hivev1.ClusterDeployment
Options(opts ...Option) Builder
GenericOptions(opts ...generic.Option) Builder
}
func BasicBuilder() Builder {
return &builder{}
}
func FullBuilder(namespace, name string, typer runtime.ObjectTyper) Builder {
b := &builder{}
return b.GenericOptions(
generic.WithTypeMeta(typer),
generic.WithResourceVersion("1"),
generic.WithNamespace(namespace),
generic.WithName(name),
)
}
type builder struct {
options []Option
}
func (b *builder) Build(opts ...Option) *hivev1.ClusterDeployment {
return Build(append(b.options, opts...)...)
}
func (b *builder) Options(opts ...Option) Builder {
return &builder{
options: append(b.options, opts...),
}
}
func (b *builder) GenericOptions(opts ...generic.Option) Builder {
options := make([]Option, len(opts))
for i, o := range opts {
options[i] = Generic(o)
}
return b.Options(options...)
}
// Generic allows common functions applicable to all objects to be used as Options to Build
func Generic(opt generic.Option) Option {
return func(clusterDeployment *hivev1.ClusterDeployment) {
opt(clusterDeployment)
}
}
// WithName sets the object.Name field when building an object with Build.
func WithName(name string) Option {
return Generic(generic.WithName(name))
}
// WithNamespace sets the object.Namespace field when building an object with Build.
func WithNamespace(namespace string) Option {
return Generic(generic.WithNamespace(namespace))
}
// WithLabel sets the specified label on the supplied object.
func WithLabel(key, value string) Option {
return Generic(generic.WithLabel(key, value))
}
// WithCondition adds the specified condition to the ClusterDeployment
func WithCondition(cond hivev1.ClusterDeploymentCondition) Option {
return func(clusterDeployment *hivev1.ClusterDeployment) {
for i, c := range clusterDeployment.Status.Conditions {
if c.Type == cond.Type {
clusterDeployment.Status.Conditions[i] = cond
return
}
}
clusterDeployment.Status.Conditions = append(clusterDeployment.Status.Conditions, cond)
}
}
func WithUnclaimedClusterPoolReference(namespace, poolName string) Option {
return WithClusterPoolReference(namespace, poolName, "")
}
func WithClusterPoolReference(namespace, poolName, claimName string) Option {
return func(clusterDeployment *hivev1.ClusterDeployment) {
clusterDeployment.Spec.ClusterPoolRef = &hivev1.ClusterPoolReference{
Namespace: namespace,
PoolName: poolName,
ClaimName: claimName,
}
}
}
func Installed() Option {
return func(clusterDeployment *hivev1.ClusterDeployment) {
clusterDeployment.Spec.Installed = true
}
}
func InstalledTimestamp(instTime time.Time) Option {
return func(clusterDeployment *hivev1.ClusterDeployment) {
clusterDeployment.Spec.Installed = true
clusterDeployment.Status.InstalledTimestamp = &metav1.Time{Time: instTime}
}
}
func WithClusterVersion(version string) Option {
return func(clusterDeployment *hivev1.ClusterDeployment) {
clusterDeployment.Status.ClusterVersionStatus = &configv1.ClusterVersionStatus{
Desired: configv1.Update{
Version: version,
},
}
}
}
func WithPowerState(powerState hivev1.ClusterPowerState) Option {
return func(clusterDeployment *hivev1.ClusterDeployment) {
clusterDeployment.Spec.PowerState = powerState
}
}
func WithHibernateAfter(dur time.Duration) Option {
return func(clusterDeployment *hivev1.ClusterDeployment) {
clusterDeployment.Spec.HibernateAfter = &metav1.Duration{Duration: dur}
}
}
| 1 | 14,647 | Can we use the existing `InstalledTimestamp` function? | openshift-hive | go |
@@ -975,14 +975,15 @@ public class AddProductActivity extends AppCompatActivity {
if (!editionMode) {
if (addProductOverviewFragment.areRequiredFieldsEmpty()) {
viewPager.setCurrentItem(0, true);
- } else if (isNutritionDataAvailable() && addProductNutritionFactsFragment.containsInvalidValue()) {
+ } else if (isNutritionDataAvailable() && addProductNutritionFactsFragment.isFieldsInvalid()) {
viewPager.setCurrentItem(2, true);
} else {
saveProduct();
}
} else {
// edit mode, therefore do not check whether front image is empty or not however do check the nutrition facts values.
- if ((isNutritionDataAvailable()) && addProductNutritionFactsFragment.containsInvalidValue()) {
+ if (isNutritionDataAvailable() && addProductNutritionFactsFragment.isFieldsInvalid()) {
+ // If there are any invalid field and there is nutrition data, scroll to the nutrition fragmento
viewPager.setCurrentItem(2, true);
} else {
saveEditedProduct(); | 1 | package openfoodfacts.github.scrachx.openfood.views;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.os.Build;
import android.os.Bundle;
import android.util.Log;
import android.view.Gravity;
import android.view.LayoutInflater;
import android.view.MenuItem;
import android.view.View;
import android.widget.ImageView;
import android.widget.ProgressBar;
import android.widget.TextView;
import android.widget.Toast;
import androidx.appcompat.app.ActionBar;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.app.ActivityOptionsCompat;
import androidx.viewpager.widget.ViewPager;
import com.afollestad.materialdialogs.MaterialDialog;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.android.material.snackbar.Snackbar;
import com.squareup.picasso.Callback;
import com.squareup.picasso.Picasso;
import org.apache.commons.lang.StringUtils;
import java.io.File;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import javax.inject.Inject;
import butterknife.BindView;
import butterknife.ButterKnife;
import butterknife.OnClick;
import butterknife.OnPageChange;
import io.reactivex.SingleObserver;
import io.reactivex.android.schedulers.AndroidSchedulers;
import io.reactivex.disposables.Disposable;
import io.reactivex.schedulers.Schedulers;
import okhttp3.MediaType;
import okhttp3.RequestBody;
import openfoodfacts.github.scrachx.openfood.BuildConfig;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.fragments.AddProductIngredientsFragment;
import openfoodfacts.github.scrachx.openfood.fragments.AddProductNutritionFactsFragment;
import openfoodfacts.github.scrachx.openfood.fragments.AddProductOverviewFragment;
import openfoodfacts.github.scrachx.openfood.fragments.AddProductPhotosFragment;
import openfoodfacts.github.scrachx.openfood.images.ProductImage;
import openfoodfacts.github.scrachx.openfood.models.OfflineSavedProduct;
import openfoodfacts.github.scrachx.openfood.models.OfflineSavedProductDao;
import openfoodfacts.github.scrachx.openfood.models.Product;
import openfoodfacts.github.scrachx.openfood.models.ProductImageField;
import openfoodfacts.github.scrachx.openfood.models.State;
import openfoodfacts.github.scrachx.openfood.models.ToUploadProduct;
import openfoodfacts.github.scrachx.openfood.models.ToUploadProductDao;
import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIClient;
import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIService;
import openfoodfacts.github.scrachx.openfood.utils.FileUtils;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
import openfoodfacts.github.scrachx.openfood.views.adapters.ProductFragmentPagerAdapter;
import static openfoodfacts.github.scrachx.openfood.utils.Utils.isExternalStorageWritable;
public class AddProductActivity extends AppCompatActivity {
private static final String KEY_USER_ID = "user_id";
@SuppressWarnings("squid:S2068")
private static final String KEY_PASSWORD = "password";
public static final String PARAM_LANGUAGE = "lang";
private static final String ADD_TAG = AddProductActivity.class.getSimpleName();
public static final String UPLOADED_TO_SERVER = "uploadedToServer";
public static final String MODIFY_NUTRITION_PROMPT = "modify_nutrition_prompt";
public static final String MODIFY_CATEGORY_PROMPT = "modify_category_prompt";
public static final String KEY_EDIT_PRODUCT = "edit_product";
public static final String KEY_IS_EDITION = "is_edition";
private final Map<String, String> productDetails = new HashMap<>();
@Inject
OpenFoodAPIService client;
@BindView(R.id.overview_indicator)
View overviewIndicator;
@BindView(R.id.ingredients_indicator)
View ingredientsIndicator;
@BindView(R.id.nutrition_facts_indicator)
View nutritionFactsIndicator;
@BindView(R.id.text_nutrition_facts_indicator)
TextView nutritionFactsIndicatorText;
@BindView(R.id.viewpager)
ViewPager viewPager;
private AddProductOverviewFragment addProductOverviewFragment = new AddProductOverviewFragment();
private AddProductIngredientsFragment addProductIngredientsFragment = new AddProductIngredientsFragment();
private AddProductNutritionFactsFragment addProductNutritionFactsFragment = new AddProductNutritionFactsFragment();
private AddProductPhotosFragment addProductPhotosFragment = new AddProductPhotosFragment();
private Product mProduct;
private ToUploadProductDao mToUploadProductDao;
private OfflineSavedProductDao mOfflineSavedProductDao;
private Disposable mainDisposable;
private String[] imagesFilePath = new String[3];
private OfflineSavedProduct offlineSavedProduct;
private Map<String, String> initialValues;
private Bundle mainBundle = new Bundle();
private MaterialDialog materialDialog;
private boolean imageFrontUploaded;
private boolean imageIngredientsUploaded;
private boolean imageNutritionFactsUploaded;
private boolean editionMode;
// These fields are used to compare the existing values of a product already present on the server with the product which was saved offline and is being uploaded.
private String ingredientsTextOnServer;
private String productNameOnServer;
private String quantityOnServer;
private String linkOnServer;
private String ingredientsImageOnServer;
public static File getCameraPicLocation(Context context) {
File cacheDir = context.getCacheDir();
if (isExternalStorageWritable()) {
cacheDir = context.getExternalCacheDir();
}
File dir = new File(cacheDir, "EasyImage");
if (!dir.exists()) {
if (dir.mkdirs()) {
Log.i(ADD_TAG, "Directory created");
} else {
Log.i(ADD_TAG, "Couldn't create directory");
}
}
return dir;
}
public static void clearCachedCameraPic(Context context) {
File[] files = getCameraPicLocation(context).listFiles();
for (File file : files) {
if (file.delete()) {
Log.i(ADD_TAG, "Deleted cached photo");
} else {
Log.i(ADD_TAG, "Couldn't delete cached photo");
}
}
}
@OnPageChange(value = R.id.viewpager, callback = OnPageChange.Callback.PAGE_SELECTED)
void onPageSelected(int position) {
switch (position) {
case 0:
updateTimelineIndicator(1, 0, 0);
break;
case 1:
updateTimelineIndicator(2, 1, 0);
break;
case 2:
updateTimelineIndicator(2, 2, 1);
break;
default:
updateTimelineIndicator(1, 0, 0);
}
}
/**
* This method is used to update the timeline.
* 0 means inactive stage, 1 means active stage and 2 means completed stage
*
* @param overviewStage change the state of overview indicator
* @param ingredientsStage change the state of ingredients indicator
* @param nutritionFactsStage change the state of nutrition facts indicator
*/
private void updateTimelineIndicator(int overviewStage, int ingredientsStage, int nutritionFactsStage) {
updateTimeLine(overviewStage, overviewIndicator);
updateTimeLine(ingredientsStage, ingredientsIndicator);
updateTimeLine(nutritionFactsStage, nutritionFactsIndicator);
}
private static void updateTimeLine(int stage, View view) {
switch (stage) {
case 0:
view.setBackgroundResource(R.drawable.stage_inactive);
break;
case 1:
view.setBackgroundResource(R.drawable.stage_active);
break;
case 2:
view.setBackgroundResource(R.drawable.stage_complete);
break;
}
}
@Override
public void onBackPressed() {
if (offlineSavedProduct != null) {
checkFields();
} else {
new MaterialDialog.Builder(this)
.content(R.string.save_product)
.positiveText(R.string.txtSave)
.negativeText(R.string.txtPictureNeededDialogNo)
.onPositive((dialog, which) -> checkFields())
.onNegative((dialog, which) -> super.onBackPressed())
.show();
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (item.getItemId() == android.R.id.home) {
if (offlineSavedProduct != null) {
checkFields();
} else {
new MaterialDialog.Builder(this)
.content(R.string.save_product)
.positiveText(R.string.txtSave)
.negativeText(R.string.txtPictureNeededDialogNo)
.onPositive((dialog, which) -> checkFields())
.onNegative((dialog, which) -> finish())
.show();
}
}
return super.onOptionsItemSelected(item);
}
@Override
protected void onCreate(Bundle savedInstanceState) {
OFFApplication.getAppComponent().inject(this);
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_add_product);
ButterKnife.bind(this);
setTitle(R.string.offline_product_addition_title);
ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
getSupportActionBar().setDisplayHomeAsUpEnabled(true);
}
mToUploadProductDao = Utils.getAppDaoSession(this).getToUploadProductDao();
mOfflineSavedProductDao = Utils.getAppDaoSession(this).getOfflineSavedProductDao();
final State state = (State) getIntent().getSerializableExtra("state");
offlineSavedProduct = (OfflineSavedProduct) getIntent().getSerializableExtra("edit_offline_product");
Product mEditProduct = (Product) getIntent().getSerializableExtra(KEY_EDIT_PRODUCT);
if (getIntent().getBooleanExtra("perform_ocr", false)) {
mainBundle.putBoolean("perform_ocr", true);
}
if (getIntent().getBooleanExtra("send_updated", false)) {
mainBundle.putBoolean("send_updated", true);
}
if (state != null) {
mProduct = state.getProduct();
// Search if the barcode already exists in the OfflineSavedProducts db
offlineSavedProduct = mOfflineSavedProductDao.queryBuilder().where(OfflineSavedProductDao.Properties.Barcode.eq(mProduct.getCode())).unique();
}
if (mEditProduct != null) {
setTitle(R.string.edit_product_title);
mProduct = mEditProduct;
editionMode = true;
mainBundle.putBoolean(KEY_IS_EDITION, true);
initialValues = new HashMap<>();
} else if (offlineSavedProduct != null) {
mainBundle.putSerializable("edit_offline_product", offlineSavedProduct);
// Save the already existing images in productDetails for UI
imagesFilePath[0] = offlineSavedProduct.getProductDetailsMap().get("image_front");
imagesFilePath[1] = offlineSavedProduct.getProductDetailsMap().get("image_ingredients");
imagesFilePath[2] = offlineSavedProduct.getProductDetailsMap().get("image_nutrition_facts");
// get the status of images from productDetailsMap, whether uploaded or not
imageFrontUploaded = "true".equals(offlineSavedProduct.getProductDetailsMap().get("image_front_uploaded"));
imageIngredientsUploaded = "true".equals(offlineSavedProduct.getProductDetailsMap().get("image_ingredients_uploaded"));
imageNutritionFactsUploaded = "true".equals(offlineSavedProduct.getProductDetailsMap().get("image_nutrition_facts_uploaded"));
}
if (state == null && offlineSavedProduct == null && mEditProduct == null) {
Toast.makeText(this, R.string.error_adding_product, Toast.LENGTH_SHORT).show();
finish();
}
setupViewPager(viewPager);
}
public Map<String, String> getInitialValues() {
return initialValues;
}
@Override
public void onDestroy() {
super.onDestroy();
if (materialDialog != null && materialDialog.isShowing()) {
materialDialog.dismiss();
}
if (mainDisposable != null && !mainDisposable.isDisposed()) {
mainDisposable.dispose();
}
clearCachedCameraPic(this);
}
private void setupViewPager(ViewPager viewPager) {
ProductFragmentPagerAdapter adapterResult = new ProductFragmentPagerAdapter(getSupportFragmentManager());
mainBundle.putSerializable("product", mProduct);
addProductOverviewFragment.setArguments(mainBundle);
addProductIngredientsFragment.setArguments(mainBundle);
adapterResult.addFragment(addProductOverviewFragment, "Overview");
adapterResult.addFragment(addProductIngredientsFragment, "Ingredients");
if (isNutritionDataAvailable()) {
addProductNutritionFactsFragment.setArguments(mainBundle);
adapterResult.addFragment(addProductNutritionFactsFragment, "Nutrition Facts");
} else if (BuildConfig.FLAVOR.equals("obf") || BuildConfig.FLAVOR.equals("opf")) {
nutritionFactsIndicatorText.setText(R.string.photos);
addProductPhotosFragment.setArguments(mainBundle);
adapterResult.addFragment(addProductPhotosFragment, "Photos");
}
viewPager.setOffscreenPageLimit(2);
viewPager.setAdapter(adapterResult);
}
private void saveProduct() {
addProductOverviewFragment.getDetails();
addProductIngredientsFragment.getDetails();
if (isNutritionDataAvailable()) {
addProductNutritionFactsFragment.getDetails(productDetails);
}
addLoginInfoInProductDetails();
String code = productDetails.get("code");
String fields = "link,quantity,image_ingredients_url,ingredients_text_" + getProductLanguageForEdition() + ",product_name_" + getProductLanguageForEdition();
client.getProductByBarcodeSingle(code, fields, Utils.getUserAgent(Utils.HEADER_USER_AGENT_SEARCH))
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<State>() {
@Override
public void onSubscribe(Disposable d) {
MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.toastSending)
.content(R.string.please_wait)
.cancelable(false)
.progress(true, 0);
materialDialog = builder.build();
materialDialog.show();
}
@Override
public void onSuccess(State state) {
materialDialog.dismiss();
if (state.getStatus() == 0) {
// Product doesn't exist yet on the server. Add as it is.
checkFrontImageUploadStatus();
} else {
// Product already exists on the server. Compare values saved locally with the values existing on server.
ingredientsTextOnServer = state.getProduct().getIngredientsText(getProductLanguageForEdition());
productNameOnServer = state.getProduct().getProductName(getProductLanguageForEdition());
quantityOnServer = state.getProduct().getQuantity();
linkOnServer = state.getProduct().getManufactureUrl();
ingredientsImageOnServer = state.getProduct().getImageIngredientsUrl();
checkForExistingIngredients();
}
}
@Override
public void onError(Throwable e) {
materialDialog.dismiss();
saveProductOffline();
}
});
}
/**
* Checks if ingredients already exist on server and compare it with the ingredients stored locally.
*/
private void checkForExistingIngredients() {
String lc = getLanguageFromDetails();
if (ingredientsTextOnServer != null && !ingredientsTextOnServer.isEmpty() && productDetails.get("ingredients_text" + "_" + lc) != null) {
MaterialDialog.Builder builder = new MaterialDialog.Builder(this)
.title(R.string.ingredients_overwrite)
.customView(R.layout.dialog_compare_ingredients, true)
.positiveText(R.string.choose_mine)
.negativeText(R.string.keep_previous_version)
.onPositive((dialog, which) -> {
dialog.dismiss();
checkForExistingProductName();
})
.onNegative((dialog, which) -> {
dialog.dismiss();
productDetails.remove("ingredients_text" + "_" + lc);
productDetails.remove("image_ingredients");
imagesFilePath[1] = null;
checkForExistingProductName();
});
MaterialDialog dialog = builder.build();
dialog.show();
View view = dialog.getCustomView();
if (view != null) {
ImageView imageLocal = view.findViewById(R.id.image_ingredients_local);
ImageView imageServer = view.findViewById(R.id.image_ingredients_server);
TextView ingredientsLocal = view.findViewById(R.id.txt_ingredients_local);
TextView ingredientsServer = view.findViewById(R.id.txt_ingredients_server);
ProgressBar imageProgressServer = view.findViewById(R.id.image_progress_server);
ProgressBar imageProgressLocal = view.findViewById(R.id.image_progress_local);
ingredientsLocal.setText(productDetails.get("ingredients_text" + "_" + lc));
ingredientsServer.setText(ingredientsTextOnServer);
Picasso.get()
.load(ingredientsImageOnServer)
.error(R.drawable.placeholder_thumb)
.into(imageServer, new Callback() {
@Override
public void onSuccess() {
imageProgressServer.setVisibility(View.GONE);
// Add option to zoom image.
imageServer.setOnClickListener(v -> {
showFullscreen(ingredientsImageOnServer, imageServer);
});
}
@Override
public void onError(Exception ex) {
imageProgressServer.setVisibility(View.GONE);
}
});
Picasso.get()
.load(FileUtils.LOCALE_FILE_SCHEME + imagesFilePath[1])
.error(R.drawable.placeholder_thumb)
.into(imageLocal, new Callback() {
@Override
public void onSuccess() {
imageProgressLocal.setVisibility(View.GONE);
// Add option to zoom image.
imageLocal.setOnClickListener(v -> {
showFullscreen(FileUtils.LOCALE_FILE_SCHEME + imagesFilePath[1], imageLocal);
});
}
@Override
public void onError(Exception ex) {
imageProgressLocal.setVisibility(View.GONE);
}
});
}
} else {
checkForExistingProductName();
}
}
public void showFullscreen(String s, ImageView imageLocal) {
Intent intent = new Intent(AddProductActivity.this, ProductImageManagementActivity.class);
Bundle bundle = new Bundle();
bundle.putString("imageurl", s);
intent.putExtras(bundle);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
ActivityOptionsCompat options = ActivityOptionsCompat.
makeSceneTransitionAnimation(AddProductActivity.this, imageLocal,
getString(R.string.product_transition));
startActivity(intent, options.toBundle());
} else {
startActivity(intent);
}
}
private String getLanguageFromDetails() {
return productDetails.get(PARAM_LANGUAGE) != null ? productDetails.get(PARAM_LANGUAGE) : "en";
}
/**
* Checks if product name already exist on server and compare it with the product name stored locally.
*/
private void checkForExistingProductName() {
String lc = getLanguageFromDetails();
if (productNameOnServer != null && !productNameOnServer.isEmpty() && productDetails.get("product_name" + "_" + lc) != null) {
new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.product_name_overwrite)
.content(getString(R.string.yours) + productDetails.get("product_name" + "_" + lc) + "\n" + getString(R.string.currently_on,
getString(R.string.app_name_long)) + productNameOnServer)
.positiveText(R.string.choose_mine)
.negativeText(R.string.keep_previous_version)
.onPositive((dialog, which) -> {
dialog.dismiss();
checkForExistingQuantity();
})
.onNegative((dialog, which) -> {
dialog.dismiss();
productDetails.remove("product_name" + "_" + lc);
checkForExistingQuantity();
})
.build()
.show();
} else {
checkForExistingQuantity();
}
}
/**
* Checks if quantity already exist on server and compare it with the quantity stored locally.
*/
private void checkForExistingQuantity() {
if (quantityOnServer != null && !quantityOnServer.isEmpty() && productDetails.get("quantity") != null) {
new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.quantity_overwrite)
.content(getString(R.string.yours) + productDetails.get("quantity") + "\n" + getString(R.string.currently_on, getString(R.string.app_name_long)) + quantityOnServer)
.positiveText(R.string.choose_mine)
.negativeText(R.string.keep_previous_version)
.onPositive((dialog, which) -> {
dialog.dismiss();
checkForExistingLink();
})
.onNegative((dialog, which) -> {
dialog.dismiss();
productDetails.remove("quantity");
checkForExistingLink();
})
.build()
.show();
} else {
checkForExistingLink();
}
}
/**
* Checks if link already exist on server and compare it with the link stored locally.
*/
private void checkForExistingLink() {
if (linkOnServer != null && !linkOnServer.isEmpty() && productDetails.get("link") != null) {
new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.link_overwrite)
.content(getString(R.string.yours) + productDetails.get("link") + "\n" + getString(R.string.currently_on, getString(R.string.app_name_long)) + linkOnServer)
.positiveText(R.string.choose_mine)
.negativeText(R.string.keep_previous_version)
.onPositive((dialog, which) -> {
dialog.dismiss();
checkFrontImageUploadStatus();
})
.onNegative((dialog, which) -> {
dialog.dismiss();
productDetails.remove("link");
checkFrontImageUploadStatus();
})
.build()
.show();
} else {
checkFrontImageUploadStatus();
}
}
/**
* Upload and set the front image if it is not uploaded already.
*/
private void checkFrontImageUploadStatus() {
String code = productDetails.get("code");
if (!imageFrontUploaded && imagesFilePath[0] != null && !imagesFilePath[0].isEmpty()) {
// front image is not yet uploaded.
File photoFile = new File(imagesFilePath[0]);
Map<String, RequestBody> imgMap = new HashMap<>();
RequestBody barcode = createTextPlain(code);
RequestBody imageField = createTextPlain(ProductImageField.FRONT.toString() + '_' + getProductLanguageForEdition());
RequestBody image = ProductImage.createImageRequest(photoFile);
imgMap.put("code", barcode);
imgMap.put("imagefield", imageField);
imgMap.put("imgupload_front\"; filename=\"front_" + getProductLanguageForEdition() + ".png\"", image);
// Attribute the upload to the connected user
addLoginPasswordInfo(imgMap);
client.saveImageSingle(imgMap)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.uploading_front_image)
.content(R.string.please_wait)
.cancelable(false)
.progress(true, 0);
materialDialog = builder.build();
materialDialog.show();
}
@Override
public void onSuccess(JsonNode jsonNode) {
String status = jsonNode.get("status").asText();
if (status.equals("status not ok")) {
materialDialog.dismiss();
String error = jsonNode.get("error").asText();
if (error.equals("This picture has already been sent.")) {
imageFrontUploaded = true;
checkIngredientsImageUploadStatus();
} else {
new MaterialDialog.Builder(AddProductActivity.this).title(R.string.uploading_front_image)
.content(error).show();
}
} else {
imageFrontUploaded = true;
Map<String, String> queryMap = buildImageQueryMap(jsonNode);
client.editImageSingle(code, queryMap)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
}
@Override
public void onSuccess(JsonNode jsonNode) {
materialDialog.dismiss();
checkIngredientsImageUploadStatus();
}
@Override
public void onError(Throwable e) {
dialogNetworkIssueWhileUploadingImages();
}
});
}
}
@Override
public void onError(Throwable e) {
dialogNetworkIssueWhileUploadingImages();
}
});
} else {
// front image is uploaded, check the status of ingredients image.
checkIngredientsImageUploadStatus();
}
}
private RequestBody createTextPlain(String code) {
return RequestBody.create(MediaType.parse(OpenFoodAPIClient.TEXT_PLAIN), code);
}
/**
* Upload and set the ingredients image if it is not uploaded already.
*/
private void checkIngredientsImageUploadStatus() {
String code = productDetails.get("code");
if (!imageIngredientsUploaded && imagesFilePath[1] != null && !imagesFilePath[1].isEmpty()) {
// ingredients image is not yet uploaded.
File photoFile = new File(imagesFilePath[1]);
Map<String, RequestBody> imgMap = createRequestBodyMap(code, ProductImageField.INGREDIENTS);
RequestBody image = ProductImage.createImageRequest(photoFile);
imgMap.put("imgupload_ingredients\"; filename=\"ingredients_" + getProductLanguageForEdition() + ".png\"", image);
// Attribute the upload to the connected user
addLoginPasswordInfo(imgMap);
client.saveImageSingle(imgMap)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.uploading_ingredients_image)
.content(R.string.please_wait)
.cancelable(false)
.progress(true, 0);
materialDialog = builder.build();
materialDialog.show();
}
@Override
public void onSuccess(JsonNode jsonNode) {
String status = jsonNode.get("status").asText();
if (status.equals("status not ok")) {
materialDialog.dismiss();
String error = jsonNode.get("error").asText();
if (error.equals("This picture has already been sent.")) {
imageIngredientsUploaded = true;
checkNutritionFactsImageUploadStatus();
} else {
new MaterialDialog.Builder(AddProductActivity.this).title(R.string.uploading_ingredients_image)
.content(error).show();
}
} else {
imageIngredientsUploaded = true;
Map<String, String> queryMap = buildImageQueryMap(jsonNode);
client.editImageSingle(code, queryMap)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
}
@Override
public void onSuccess(JsonNode jsonNode) {
materialDialog.dismiss();
checkNutritionFactsImageUploadStatus();
}
@Override
public void onError(Throwable e) {
dialogNetworkIssueWhileUploadingImages();
}
});
}
}
@Override
public void onError(Throwable e) {
dialogNetworkIssueWhileUploadingImages();
}
});
} else {
// ingredients image is uploaded, check the status of nutrition facts image.
checkNutritionFactsImageUploadStatus();
}
}
private void addLoginPasswordInfo(Map<String, RequestBody> imgMap) {
final SharedPreferences settings = getSharedPreferences("login", 0);
final String login = settings.getString("user", "");
final String password = settings.getString("pass", "");
if (!login.isEmpty() && !password.isEmpty()) {
imgMap.put(KEY_USER_ID, createTextPlain(login));
imgMap.put(KEY_PASSWORD, createTextPlain(password));
}
imgMap.put("comment", createTextPlain(OpenFoodAPIClient.getCommentToUpload(login)));
}
public static Map<String, String> buildImageQueryMap(JsonNode jsonNode) {
String imagefield = jsonNode.get("imagefield").asText();
String imgid = jsonNode.get("image").get("imgid").asText();
Map<String, String> queryMap = new HashMap<>();
queryMap.put("imgid", imgid);
queryMap.put("id", imagefield);
return queryMap;
}
/**
* Upload and set the nutrition facts image if it is not uploaded already.
*/
private void checkNutritionFactsImageUploadStatus() {
String code = productDetails.get("code");
if (!imageNutritionFactsUploaded && imagesFilePath[2] != null && !imagesFilePath[2].isEmpty()) {
// nutrition facts image is not yet uploaded.
File photoFile = new File(imagesFilePath[2]);
Map<String, RequestBody> imgMap = createRequestBodyMap(code, ProductImageField.NUTRITION);
RequestBody image = ProductImage.createImageRequest( photoFile);
imgMap.put("imgupload_nutrition\"; filename=\"nutrition_" + getProductLanguageForEdition() + ".png\"", image);
// Attribute the upload to the connected user
addLoginPasswordInfo(imgMap);
client.saveImageSingle(imgMap)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.uploading_nutrition_image)
.content(R.string.please_wait)
.cancelable(false)
.progress(true, 0);
materialDialog = builder.build();
materialDialog.show();
}
@Override
public void onSuccess(JsonNode jsonNode) {
String status = jsonNode.get("status").asText();
if (status.equals("status not ok")) {
materialDialog.dismiss();
String error = jsonNode.get("error").asText();
if (error.equals("This picture has already been sent.")) {
imageNutritionFactsUploaded = true;
addProductToServer();
} else {
new MaterialDialog.Builder(AddProductActivity.this).title(R.string.uploading_nutrition_image)
.content(error).show();
}
} else {
imageNutritionFactsUploaded = true;
Map<String, String> queryMap = buildImageQueryMap(jsonNode);
client.editImageSingle(code, queryMap)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
}
@Override
public void onSuccess(JsonNode jsonNode) {
materialDialog.dismiss();
addProductToServer();
}
@Override
public void onError(Throwable e) {
dialogNetworkIssueWhileUploadingImages();
}
});
}
}
@Override
public void onError(Throwable e) {
dialogNetworkIssueWhileUploadingImages();
}
});
} else {
// nutrition facts image is uploaded, upload the product to server.
addProductToServer();
}
}
private Map<String, RequestBody> createRequestBodyMap(String code, ProductImageField nutrition) {
Map<String, RequestBody> imgMap = new HashMap<>();
imgMap.put("code", createTextPlain(code));
imgMap.put("imagefield", createTextPlain(nutrition.toString() + '_' + getProductLanguageForEdition()));
return imgMap;
}
private void dialogNetworkIssueWhileUploadingImages() {
materialDialog.dismiss();
if (!editionMode) {
saveProductOffline();
} else {
MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.device_offline_dialog_title)
.positiveText(R.string.txt_try_again)
.negativeText(R.string.dialog_cancel)
.onPositive((dialog, which) -> checkFrontImageUploadStatus())
.onNegative((dialog, which) -> dialog.dismiss());
materialDialog = builder.build();
materialDialog.show();
}
}
/**
* Performs network call and uploads the product to the server or stores it locally if there is no internet connection.
*/
private void addProductToServer() {
String code = productDetails.get("code");
for (Map.Entry<String, String> entry : productDetails.entrySet()) {
String key = entry.getKey();
String value = entry.getValue();
Log.d(key, value);
}
final SharedPreferences settings = getSharedPreferences("login", 0);
final String login = settings.getString("user", "");
boolean productHasChange = true;
if (editionMode && initialValues != null) {
Map<String, String> newValues = new HashMap<>(productDetails);
newValues.remove(KEY_USER_ID);
newValues.remove(KEY_PASSWORD);
productHasChange = !newValues.equals(initialValues);
}
if (productHasChange) {
saveProductToServer(code, OpenFoodAPIClient.getCommentToUpload(login));
} else {
Log.i(ADD_TAG, "not saved because no changes detected");
Intent intent = new Intent();
setResult(RESULT_OK, intent);
finish();
}
}
private void saveProductToServer(String code, String comment) {
Map<String, String> productValues = new HashMap<>(productDetails);
//the default language should not be changed: we keep the original one:
if (editionMode && StringUtils.isNotBlank(mProduct.getLang())) {
productValues.put(PARAM_LANGUAGE, mProduct.getLang());
}
client.saveProductSingle(code, productValues, comment)
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<State>() {
@Override
public void onSubscribe(Disposable d) {
mainDisposable = d;
MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.toastSending)
.content(R.string.please_wait)
.progress(true, 0)
.cancelable(false);
materialDialog = builder.build();
materialDialog.show();
}
@Override
public void onSuccess(State state) {
// Display toast notification for product upload
// First dismiss the upload dialog
materialDialog.dismiss();
Toast toast = new Toast(OFFApplication.getInstance());
View view = LayoutInflater.from(OFFApplication.getInstance()).inflate(R.layout.toast_upload_success, null);
toast.setGravity(Gravity.CENTER, 0, 0);
toast.setView(view);
toast.setDuration(Toast.LENGTH_SHORT);
toast.show();
mOfflineSavedProductDao.deleteInTx(mOfflineSavedProductDao.queryBuilder().where(OfflineSavedProductDao.Properties.Barcode.eq(code)).list());
Intent intent = new Intent();
intent.putExtra(UPLOADED_TO_SERVER, true);
setResult(RESULT_OK, intent);
finish();
}
@Override
public void onError(Throwable e) {
materialDialog.dismiss();
Log.e(ADD_TAG, e.getMessage());
// A network error happened
if (e instanceof IOException) {
dialogNetworkIssueWhileUploadingImages();
}
// Not a network error
else {
if (!editionMode) {
Toast.makeText(AddProductActivity.this, e.getMessage(), Toast.LENGTH_SHORT).show();
saveProductOffline();
} else {
MaterialDialog.Builder builder = new MaterialDialog.Builder(AddProductActivity.this)
.title(R.string.error_adding_product)
.positiveText(R.string.txt_try_again)
.negativeText(R.string.dialog_cancel)
.onPositive((dialog, which) -> checkFrontImageUploadStatus())
.onNegative((dialog, which) -> dialog.dismiss());
materialDialog = builder.build();
materialDialog.show();
}
}
}
});
}
/**
* Save the current product in the offline db
*/
private void saveProductOffline() {
// Add the images to the productDetails to display them in UI later.
productDetails.put("image_front", imagesFilePath[0]);
productDetails.put("image_ingredients", imagesFilePath[1]);
productDetails.put("image_nutrition_facts", imagesFilePath[2]);
// Add the status of images to the productDetails, whether uploaded or not
if (imageFrontUploaded) {
productDetails.put("image_front_uploaded", "true");
}
if (imageIngredientsUploaded) {
productDetails.put("image_ingredients_uploaded", "true");
}
if (imageNutritionFactsUploaded) {
productDetails.put("image_nutrition_facts_uploaded", "true");
}
OfflineSavedProduct offlineSavedProduct = new OfflineSavedProduct();
offlineSavedProduct.setBarcode(productDetails.get("code"));
offlineSavedProduct.setProductDetailsMap(productDetails);
mOfflineSavedProductDao.insertOrReplace(offlineSavedProduct);
Toast.makeText(OFFApplication.getInstance(), R.string.txtDialogsContentInfoSave, Toast.LENGTH_LONG).show();
Intent intent = new Intent();
intent.putExtra(UPLOADED_TO_SERVER, false);
setResult(RESULT_OK, intent);
finish();
}
public void proceed() {
switch (viewPager.getCurrentItem()) {
case 0:
viewPager.setCurrentItem(1, true);
break;
case 1:
viewPager.setCurrentItem(2, true);
break;
case 2:
checkFields();
break;
}
}
private void checkFields() {
if (!editionMode) {
if (addProductOverviewFragment.areRequiredFieldsEmpty()) {
viewPager.setCurrentItem(0, true);
} else if (isNutritionDataAvailable() && addProductNutritionFactsFragment.containsInvalidValue()) {
viewPager.setCurrentItem(2, true);
} else {
saveProduct();
}
} else {
// edit mode, therefore do not check whether front image is empty or not however do check the nutrition facts values.
if ((isNutritionDataAvailable()) && addProductNutritionFactsFragment.containsInvalidValue()) {
viewPager.setCurrentItem(2, true);
} else {
saveEditedProduct();
}
}
}
private boolean isNutritionDataAvailable() {
return BuildConfig.FLAVOR.equals("off") || BuildConfig.FLAVOR.equals("opff");
}
private void saveEditedProduct() {
addProductOverviewFragment.getAllDetails(productDetails);
addProductIngredientsFragment.getAllDetails(productDetails);
if (isNutritionDataAvailable()) {
addProductNutritionFactsFragment.getAllDetails(productDetails);
}
addLoginInfoInProductDetails();
checkFrontImageUploadStatus();
}
private void addLoginInfoInProductDetails() {
final SharedPreferences settings = getSharedPreferences("login", 0);
final String login = settings.getString("user", "");
final String password = settings.getString("pass", "");
if (!login.isEmpty() && !password.isEmpty()) {
productDetails.put(KEY_USER_ID, login);
productDetails.put(KEY_PASSWORD, password);
}
}
@OnClick(R.id.overview_indicator)
void switchToOverviewPage() {
viewPager.setCurrentItem(0, true);
}
@OnClick(R.id.ingredients_indicator)
void switchToIngredientsPage() {
viewPager.setCurrentItem(1, true);
}
@OnClick(R.id.nutrition_facts_indicator)
void switchToNutritionFactsPage() {
viewPager.setCurrentItem(2, true);
}
public void addToMap(String key, String value) {
productDetails.put(key, value);
}
public void addToPhotoMap(ProductImage image, int position) {
String lang = getProductLanguageForEdition();
boolean ocr = false;
Map<String, RequestBody> imgMap = new HashMap<>();
imgMap.put("code", image.getCode());
RequestBody imageField = createTextPlain(image.getImageField().toString() + '_' + lang);
imgMap.put("imagefield", imageField);
if (image.getImguploadFront() != null) {
imagesFilePath[0] = image.getFilePath();
imgMap.put("imgupload_front\"; filename=\"front_" + lang + ".png\"", image.getImguploadFront());
}
if (image.getImguploadIngredients() != null) {
imgMap.put("imgupload_ingredients\"; filename=\"ingredients_" + lang + ".png\"", image.getImguploadIngredients());
ocr = true;
imagesFilePath[1] = image.getFilePath();
}
if (image.getImguploadNutrition() != null) {
imgMap.put("imgupload_nutrition\"; filename=\"nutrition_" + lang + ".png\"", image.getImguploadNutrition());
imagesFilePath[2] = image.getFilePath();
}
if (image.getImguploadOther() != null) {
imgMap.put("imgupload_other\"; filename=\"other_" + lang + ".png\"", image.getImguploadOther());
}
// Attribute the upload to the connected user
addLoginPasswordInfo(imgMap);
savePhoto(imgMap, image, position, ocr);
}
private void savePhoto(Map<String, RequestBody> imgMap, ProductImage image, int position, boolean ocr) {
client.saveImageSingle(imgMap)
.observeOn(AndroidSchedulers.mainThread())
.doOnSubscribe(disposable -> showImageProgress(position))
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
mainDisposable = d;
}
@Override
public void onSuccess(JsonNode jsonNode) {
String status = jsonNode.get("status").asText();
if (status.equals("status not ok")) {
String error = jsonNode.get("error").asText();
final boolean alreadySent = error.equals("This picture has already been sent.");
if (alreadySent && ocr) {
hideImageProgress(position, false, getString(R.string.image_uploaded_successfully));
performOCR(image.getBarcode(), "ingredients_" + getProductLanguageForEdition());
} else {
hideImageProgress(position, true, error);
}
if (!alreadySent) {
new MaterialDialog.Builder(AddProductActivity.this).title(R.string.error_uploading_photo)
.content(error).show();
}
} else {
if (image.getImageField() == ProductImageField.FRONT) {
imageFrontUploaded = true;
} else if (image.getImageField() == ProductImageField.INGREDIENTS) {
imageIngredientsUploaded = true;
} else if (image.getImageField() == ProductImageField.NUTRITION) {
imageNutritionFactsUploaded = true;
}
hideImageProgress(position, false, getString(R.string.image_uploaded_successfully));
String imagefield = jsonNode.get("imagefield").asText();
String imgid = jsonNode.get("image").get("imgid").asText();
if (position != 3 && position != 4) {
// Not OTHER image
setPhoto(image, imagefield, imgid, ocr);
}
}
}
@Override
public void onError(Throwable e) {
// A network error happened
if (e instanceof IOException) {
hideImageProgress(position, false, getString(R.string.no_internet_connection));
Log.e(ADD_TAG, e.getMessage());
if (image.getImageField() == ProductImageField.OTHER) {
ToUploadProduct product = new ToUploadProduct(image.getBarcode(), image.getFilePath(), image.getImageField().toString());
mToUploadProductDao.insertOrReplace(product);
}
} else {
hideImageProgress(position, true, e.getMessage());
Log.i(this.getClass().getSimpleName(), e.getMessage());
Toast.makeText(OFFApplication.getInstance(), e.getMessage(), Toast.LENGTH_SHORT).show();
}
}
});
}
private void setPhoto(ProductImage image, String imagefield, String imgid, boolean ocr) {
Map<String, String> queryMap = new HashMap<>();
queryMap.put("imgid", imgid);
queryMap.put("id", imagefield);
client.editImageSingle(image.getBarcode(), queryMap)
.observeOn(AndroidSchedulers.mainThread())
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
}
@Override
public void onSuccess(JsonNode jsonNode) {
String status = jsonNode.get("status").asText();
if (ocr && status.equals("status ok")) {
performOCR(image.getBarcode(), imagefield);
}
}
@Override
public void onError(Throwable e) {
if (e instanceof IOException) {
if (ocr) {
View view = findViewById(R.id.coordinator_layout);
Snackbar.make(view, R.string.no_internet_unable_to_extract_ingredients, Snackbar.LENGTH_INDEFINITE)
.setAction(R.string.txt_try_again, v -> setPhoto(image, imagefield, imgid, true)).show();
}
} else {
Log.i(this.getClass().getSimpleName(), e.getMessage());
Toast.makeText(OFFApplication.getInstance(), e.getMessage(), Toast.LENGTH_SHORT).show();
}
}
});
}
public void performOCR(String code, String imageField) {
client.getIngredients(code, imageField)
.observeOn(AndroidSchedulers.mainThread())
.doOnSubscribe(disposable -> addProductIngredientsFragment.showOCRProgress())
.subscribe(new SingleObserver<JsonNode>() {
@Override
public void onSubscribe(Disposable d) {
}
@Override
public void onSuccess(JsonNode jsonNode) {
addProductIngredientsFragment.hideOCRProgress();
String status = jsonNode.get("status").toString();
if (status.equals("0")) {
String ocrResult = jsonNode.get("ingredients_text_from_image").asText();
addProductIngredientsFragment.setIngredients(status, ocrResult);
} else {
addProductIngredientsFragment.setIngredients(status, null);
}
}
@Override
public void onError(Throwable e) {
addProductIngredientsFragment.hideOCRProgress();
if (e instanceof IOException) {
View view = findViewById(R.id.coordinator_layout);
Snackbar.make(view, R.string.no_internet_unable_to_extract_ingredients, Snackbar.LENGTH_INDEFINITE)
.setAction(R.string.txt_try_again, v -> performOCR(code, imageField)).show();
} else {
Log.i(this.getClass().getSimpleName(), e.getMessage(), e);
Toast.makeText(AddProductActivity.this, e.getMessage(), Toast.LENGTH_SHORT).show();
}
}
});
}
private void hideImageProgress(int position, boolean errorUploading, String message) {
switch (position) {
case 0:
addProductOverviewFragment.hideImageProgress(errorUploading, message);
break;
case 1:
addProductIngredientsFragment.hideImageProgress(errorUploading, message);
break;
case 2:
addProductNutritionFactsFragment.hideImageProgress(errorUploading, message);
break;
case 3:
addProductOverviewFragment.hideOtherImageProgress(errorUploading, message);
break;
case 4:
addProductPhotosFragment.hideImageProgress(errorUploading, message);
}
}
private void showImageProgress(int position) {
switch (position) {
case 0:
addProductOverviewFragment.showImageProgress();
break;
case 1:
addProductIngredientsFragment.showImageProgress();
break;
case 2:
addProductNutritionFactsFragment.showImageProgress();
break;
case 3:
addProductOverviewFragment.showOtherImageProgress();
break;
case 4:
addProductPhotosFragment.showImageProgress();
}
}
public String getProductLanguageForEdition() {
return productDetails.get(PARAM_LANGUAGE);
}
public void setProductLanguage(String languageCode) {
addToMap(PARAM_LANGUAGE, languageCode);
}
public void updateLanguage() {
addProductIngredientsFragment.loadIngredientsImage();
addProductNutritionFactsFragment.loadNutritionImage();
}
public void setIngredients(String status, String ingredients) {
addProductIngredientsFragment.setIngredients(status, ingredients);
}
}
| 1 | 67,817 | weird naming: `is` but `fields` (plural or singular ?) . If the method checks if the fragment has an invalid value, then "hasInvalidValue" or "containsInvalidValue" is fine no ? | openfoodfacts-openfoodfacts-androidapp | java |
@@ -28,7 +28,7 @@ LISTENS_PER_PAGE = 25
user_bp = Blueprint("user", __name__)
-@user_bp.route("/<user_name>")
+@user_bp.route("/<user_name>/")
def profile(user_name):
# Which database to use to showing user listens.
db_conn = webserver.timescale_connection._ts | 1 | import listenbrainz.db.stats as db_stats
import listenbrainz.db.user as db_user
import urllib
import ujson
import psycopg2
import datetime
import time
from flask import Blueprint, render_template, request, url_for, Response, redirect, flash, current_app, jsonify
from flask_login import current_user, login_required
from listenbrainz import webserver
from listenbrainz.db.exceptions import DatabaseException
from listenbrainz.domain import spotify
from listenbrainz.webserver import flash
from listenbrainz.webserver.decorators import crossdomain
from listenbrainz.webserver.login import User
from listenbrainz.webserver.redis_connection import _redis
from listenbrainz.webserver.timescale_connection import _ts
from listenbrainz.webserver.views.api_tools import publish_data_to_queue, log_raise_400, is_valid_uuid
from datetime import datetime
from werkzeug.exceptions import NotFound, BadRequest, RequestEntityTooLarge, ServiceUnavailable, Unauthorized, InternalServerError
from listenbrainz.webserver.views.stats_api import _get_non_negative_param
from listenbrainz.listenstore.timescale_listenstore import TimescaleListenStoreException
from pydantic import ValidationError
LISTENS_PER_PAGE = 25
user_bp = Blueprint("user", __name__)
@user_bp.route("/<user_name>")
def profile(user_name):
# Which database to use to showing user listens.
db_conn = webserver.timescale_connection._ts
# Which database to use to show playing_now stream.
playing_now_conn = webserver.redis_connection._redis
user = _get_user(user_name)
# User name used to get user may not have the same case as original user name.
user_name = user.musicbrainz_id
# Getting data for current page
max_ts = request.args.get("max_ts")
if max_ts is not None:
try:
max_ts = int(max_ts)
except ValueError:
raise BadRequest("Incorrect timestamp argument max_ts: %s" % request.args.get("max_ts"))
min_ts = request.args.get("min_ts")
if min_ts is not None:
try:
min_ts = int(min_ts)
except ValueError:
raise BadRequest("Incorrect timestamp argument min_ts: %s" % request.args.get("min_ts"))
# Send min and max listen times to allow React component to hide prev/next buttons accordingly
(min_ts_per_user, max_ts_per_user) = db_conn.get_timestamps_for_user(user_name)
if max_ts is None and min_ts is None:
if max_ts_per_user:
max_ts = max_ts_per_user + 1
else:
max_ts = int(time.time())
listens = []
if min_ts_per_user != max_ts_per_user:
args = {}
if max_ts:
args['to_ts'] = max_ts
else:
args['from_ts'] = min_ts
for listen in db_conn.fetch_listens(user_name, limit=LISTENS_PER_PAGE, **args):
listens.append({
"track_metadata": listen.data,
"listened_at": listen.ts_since_epoch,
"listened_at_iso": listen.timestamp.isoformat() + "Z",
})
# If there are no previous listens then display now_playing
if not listens or listens[0]['listened_at'] >= max_ts_per_user:
playing_now = playing_now_conn.get_playing_now(user.id)
if playing_now:
listen = {
"track_metadata": playing_now.data,
"playing_now": "true",
}
listens.insert(0, listen)
user_stats = db_stats.get_user_artists(user.id, 'all_time')
try:
artist_count = user_stats.all_time.count
except (AttributeError, ValidationError):
artist_count = None
spotify_data = {}
current_user_data = {}
if current_user.is_authenticated:
spotify_data = spotify.get_user_dict(current_user.id)
current_user_data = {
"id": current_user.id,
"name": current_user.musicbrainz_id,
"auth_token": current_user.auth_token,
}
props = {
"user": {
"id": user.id,
"name": user.musicbrainz_id,
},
"current_user": current_user_data,
"listens": listens,
"latest_listen_ts": max_ts_per_user,
"oldest_listen_ts": min_ts_per_user,
"latest_spotify_uri": _get_spotify_uri_for_listens(listens),
"artist_count": format(artist_count, ",d") if artist_count else None,
"profile_url": url_for('user.profile', user_name=user_name),
"mode": "listens",
"spotify": spotify_data,
"web_sockets_server_url": current_app.config['WEBSOCKETS_SERVER_URL'],
"api_url": current_app.config['API_URL'],
}
return render_template("user/profile.html",
props=ujson.dumps(props),
mode='listens',
user=user,
active_section='listens')
@user_bp.route("/<user_name>/artists")
def artists(user_name):
""" Redirect to charts page """
page = request.args.get('page', default=1)
stats_range = request.args.get('range', default="all_time")
return redirect(url_for('user.charts', user_name=user_name, entity='artist', page=page, range=stats_range), code=301)
@user_bp.route("/<user_name>/history")
def history(user_name):
""" Redirect to charts page """
entity = request.args.get('entity', default="artist")
page = request.args.get('page', default=1)
stats_range = request.args.get('range', default="all_time")
return redirect(url_for('user.charts', user_name=user_name, entity=entity, page=page, range=stats_range), code=301)
@user_bp.route("/<user_name>/charts")
def charts(user_name):
""" Show the top entitys for the user. """
user = _get_user(user_name)
user_data = {
"name": user.musicbrainz_id,
"id": user.id,
}
props = {
"user": user_data,
"api_url": current_app.config["API_URL"]
}
return render_template(
"user/charts.html",
active_section="charts",
props=ujson.dumps(props),
user=user
)
@user_bp.route("/<user_name>/reports")
def reports(user_name: str):
""" Show user reports """
user = _get_user(user_name)
user_data = {
"name": user.musicbrainz_id,
"id": user.id,
}
props = {
"user": user_data,
"api_url": current_app.config["API_URL"]
}
return render_template(
"user/reports.html",
active_section="reports",
props=ujson.dumps(props),
user=user
)
@user_bp.route('/<user_name>/delete-listen', methods=['POST'])
@login_required
def delete_listen(user_name):
""" Delete a particular listen from the currently logged-in user's listen history.
This checks for the correct authorization token and deletes the listen.
"""
if request.form.get('token') and (request.form.get('token') == current_user.auth_token):
listened_at = request.form.get('listened_at')
recording_msid = request.form.get('recording_msid')
if listened_at is None:
log_raise_400("Listen timestamp missing.")
try:
listened_at = int(listened_at)
except ValueError:
log_raise_400("%s: Listen timestamp invalid." % listened_at)
if recording_msid is None:
log_raise_400("Recording MSID missing.")
if not is_valid_uuid(recording_msid):
log_raise_400("%s: Recording MSID format invalid." % recording_msid)
try:
user = _get_user(user_name)
_ts.delete_listen(listened_at=listened_at, recording_msid=recording_msid, user_name=user.musicbrainz_id)
except TimescaleListenStoreException as e:
current_app.logger.error("Cannot delete listen for user: %s" % str(e))
raise ServiceUnavailable("We couldn't delete the listen. Please try again later.")
except Exception as e:
current_app.logger.error("Cannot delete listen for user: %s" % str(e))
raise InternalServerError("We couldn't delete the listen. Please try again later.")
return jsonify({'status': 'ok'})
else:
raise Unauthorized("Auth token invalid or missing.")
def _get_user(user_name):
""" Get current username """
if current_user.is_authenticated and \
current_user.musicbrainz_id == user_name:
return current_user
else:
user = db_user.get_by_mb_id(user_name)
if user is None:
raise NotFound("Cannot find user: %s" % user_name)
return User.from_dbrow(user)
def _get_spotify_uri_for_listens(listens):
def get_track_id_from_listen(listen):
additional_info = listen["track_metadata"]["additional_info"]
if "spotify_id" in additional_info and additional_info["spotify_id"] is not None:
return additional_info["spotify_id"].rsplit('/', 1)[-1]
else:
return None
track_id = None
if len(listens):
track_id = get_track_id_from_listen(listens[0])
if track_id:
return "spotify:track:" + track_id
else:
return None
def delete_user(musicbrainz_id):
""" Delete a user from ListenBrainz completely.
First, drops the user's listens and then deletes the user from the
database.
Args:
musicbrainz_id (str): the MusicBrainz ID of the user
Raises:
NotFound if user isn't present in the database
"""
user = _get_user(musicbrainz_id)
_ts.delete(user.musicbrainz_id)
db_user.delete(user.id)
def delete_listens_history(musicbrainz_id):
""" Delete a user's listens from ListenBrainz completely.
Args:
musicbrainz_id (str): the MusicBrainz ID of the user
Raises:
NotFound if user isn't present in the database
"""
user = _get_user(musicbrainz_id)
_ts.delete(user.musicbrainz_id)
_ts.reset_listen_count(user.musicbrainz_id)
db_user.reset_latest_import(user.musicbrainz_id)
db_stats.delete_user_stats(user.id)
| 1 | 17,025 | Does this mean that `listenbrainz.org/user/iliekcomputers` will start getting 404s? | metabrainz-listenbrainz-server | py |
@@ -23,7 +23,11 @@ module Blacklight
# CatalogController.include ModuleDefiningNewMethod
# CatalogController.search_params_logic += [:new_method]
# CatalogController.search_params_logic.delete(:we_dont_want)
- self.search_params_logic = [:default_solr_parameters, :add_query_to_solr, :add_facet_fq_to_solr, :add_facetting_to_solr, :add_solr_fields_to_query, :add_paging_to_solr, :add_sorting_to_solr, :add_group_config_to_solr ]
+ self.search_params_logic = [
+ :default_solr_parameters, :add_query_to_solr, :add_facet_fq_to_solr,
+ :add_facetting_to_solr, :add_solr_fields_to_query, :add_paging_to_solr,
+ :add_sorting_to_solr, :add_group_config_to_solr, :add_facet_paging_to_solr
+ ]
if self.respond_to?(:helper_method)
helper_method(:facet_limit_for) | 1 | module Blacklight
##
# This module contains methods that are specified by SearchHelper.search_params_logic
# They transform user parameters into parameters that are sent as a request to Solr when
# RequestBuilders#solr_search_params is called.
#
module RequestBuilders
extend ActiveSupport::Concern
extend Deprecation
self.deprecation_horizon = 'blacklight 6.0'
included do
# We want to install a class-level place to keep
# search_params_logic method names. Compare to before_filter,
# similar design. Since we're a module, we have to add it in here.
# There are too many different semantic choices in ruby 'class variables',
# we choose this one for now, supplied by Rails.
class_attribute :search_params_logic
# Set defaults. Each symbol identifies a _method_ that must be in
# this class, taking two parameters (solr_parameters, user_parameters)
# Can be changed in local apps or by plugins, eg:
# CatalogController.include ModuleDefiningNewMethod
# CatalogController.search_params_logic += [:new_method]
# CatalogController.search_params_logic.delete(:we_dont_want)
self.search_params_logic = [:default_solr_parameters, :add_query_to_solr, :add_facet_fq_to_solr, :add_facetting_to_solr, :add_solr_fields_to_query, :add_paging_to_solr, :add_sorting_to_solr, :add_group_config_to_solr ]
if self.respond_to?(:helper_method)
helper_method(:facet_limit_for)
end
end
module ClassMethods
extend Deprecation
self.deprecation_horizon = 'blacklight 6.0'
def solr_search_params_logic
search_params_logic
end
deprecation_deprecate solr_search_params_logic: :search_params_logic
def solr_search_params_logic= logic
self.search_params_logic= logic
end
deprecation_deprecate :solr_search_params_logic= => :search_params_logic=
end
def solr_search_params_logic
search_params_logic
end
deprecation_deprecate solr_search_params_logic: :search_params_logic
def solr_search_params_logic= logic
self.search_params_logic= logic
end
deprecation_deprecate :solr_search_params_logic= => :search_params_logic=
def search_builder_class
blacklight_config.search_builder_class
end
def search_builder processor_chain = search_params_logic
search_builder_class.new(processor_chain, self)
end
# @returns a params hash for searching solr.
# The CatalogController #index action uses this.
# Solr parameters can come from a number of places. From lowest
# precedence to highest:
# 1. General defaults in blacklight config (are trumped by)
# 2. defaults for the particular search field identified by params[:search_field] (are trumped by)
# 3. certain parameters directly on input HTTP query params
# * not just any parameter is grabbed willy nilly, only certain ones are allowed by HTTP input)
# * for legacy reasons, qt in http query does not over-ride qt in search field definition default.
# 4. extra parameters passed in as argument.
#
# spellcheck.q will be supplied with the [:q] value unless specifically
# specified otherwise.
#
# Incoming parameter :f is mapped to :fq solr parameter.
def solr_search_params(user_params = params || {}, processor_chain = search_params_logic)
search_builder(processor_chain).with(user_params).processed_parameters
end
deprecation_deprecate solr_search_params: :processed_parameters
##
# @param [Hash] user_params a hash of user submitted parameters
# @param [Array] processor_chain a list of processor methods to run
# @param [Hash] extra_params an optional hash of parameters that should be
# added to the query post processing
def build_solr_query(user_params, processor_chain, extra_params=nil)
search_builder(processor_chain).with(user_params).merge(extra_params)
end
deprecation_deprecate build_solr_query: :query
##
# Retrieve the results for a list of document ids
def solr_document_ids_params(ids = [])
Deprecation.silence(Blacklight::RequestBuilders) do
solr_documents_by_field_values_params blacklight_config.document_model.unique_key, ids
end
end
deprecation_deprecate :solr_document_ids_params
##
# Retrieve the results for a list of document ids
# @deprecated
def solr_documents_by_field_values_params(field, values)
search_builder([:add_query_to_solr]).with(q: { field => values}).merge(fl: '*')
end
deprecation_deprecate :solr_documents_by_field_values_params
##
# Retrieve a facet's paginated values.
def solr_facet_params(facet_field, user_params=params || {}, extra_controller_params={})
input = user_params.deep_merge(extra_controller_params)
facet_config = blacklight_config.facet_fields[facet_field]
solr_params = {}
# Now override with our specific things for fetching facet values
solr_params[:"facet.field"] = search_builder.with_ex_local_param((facet_config.ex if facet_config.respond_to?(:ex)), facet_field)
limit = if respond_to?(:facet_list_limit)
facet_list_limit.to_s.to_i
elsif solr_params["facet.limit"]
solr_params["facet.limit"].to_i
else
20
end
# Need to set as f.facet_field.facet.* to make sure we
# override any field-specific default in the solr request handler.
solr_params[:"f.#{facet_field}.facet.limit"] = limit + 1
solr_params[:"f.#{facet_field}.facet.offset"] = ( input.fetch(Blacklight::Solr::FacetPaginator.request_keys[:page] , 1).to_i - 1 ) * ( limit )
solr_params[:"f.#{facet_field}.facet.sort"] = input[ Blacklight::Solr::FacetPaginator.request_keys[:sort] ] if input[ Blacklight::Solr::FacetPaginator.request_keys[:sort] ]
solr_params[:rows] = 0
solr_params
end
##
# Opensearch autocomplete parameters for plucking a field's value from the results
def solr_opensearch_params(field=nil)
if field.nil?
Deprecation.warn(Blacklight::RequestBuilders, "Calling Blacklight::RequestBuilders#solr_opensearch_params without a field name is deprecated and will be required in Blacklight 6.0.")
end
solr_params = {}
solr_params[:rows] ||= 10
solr_params[:fl] = field || blacklight_config.view_config('opensearch').title_field
solr_params
end
##
# Pagination parameters for selecting the previous and next documents
# out of a result set.
def previous_and_next_document_params(index, window = 1)
solr_params = {}
if index > 0
solr_params[:start] = index - window # get one before
solr_params[:rows] = 2*window + 1 # and one after
else
solr_params[:start] = 0 # there is no previous doc
solr_params[:rows] = 2*window # but there should be one after
end
solr_params[:fl] = '*'
solr_params[:facet] = false
solr_params
end
DEFAULT_FACET_LIMIT = 10
# Look up facet limit for given facet_field. Will look at config, and
# if config is 'true' will look up from Solr @response if available. If
# no limit is avaialble, returns nil. Used from #add_facetting_to_solr
# to supply f.fieldname.facet.limit values in solr request (no @response
# available), and used in display (with @response available) to create
# a facet paginator with the right limit.
def facet_limit_for(facet_field)
facet = blacklight_config.facet_fields[facet_field]
return if facet.blank?
if facet.limit and @response and @response.aggregations[facet_field]
limit = @response.aggregations[facet_field].limit
if limit.nil? # we didn't get or a set a limit, so infer one.
facet.limit if facet.limit != true
elsif limit == -1 # limit -1 is solr-speak for unlimited
nil
else
limit.to_i - 1 # we added 1 to find out if we needed to paginate
end
elsif facet.limit
facet.limit == true ? DEFAULT_FACET_LIMIT : facet.limit
end
end
end
end
| 1 | 5,981 | Line is too long. [82/80] | projectblacklight-blacklight | rb |
@@ -28,6 +28,7 @@ CAN_INV_FILTER = 0x20000000
class CANSocket(SuperSocket):
desc = "read/write packets at a given CAN interface using PF_CAN sockets"
+ async_select_unrequired = True
def __init__(self, iface=None, receive_own_messages=False,
can_filters=None, remove_padding=True, basecls=CAN): | 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Nils Weiss <[email protected]>
# This program is published under a GPLv2 license
# scapy.contrib.description = Native CANSocket
# scapy.contrib.status = loads
"""
Native CANSocket.
"""
import struct
import socket
import time
from scapy.config import conf
from scapy.supersocket import SuperSocket
from scapy.error import Scapy_Exception, warning
from scapy.layers.can import CAN
from scapy.packet import Padding
from scapy.arch.linux import get_last_packet_timestamp
conf.contribs['NativeCANSocket'] = {'iface': "can0"}
CAN_FRAME_SIZE = 16
CAN_INV_FILTER = 0x20000000
class CANSocket(SuperSocket):
desc = "read/write packets at a given CAN interface using PF_CAN sockets"
def __init__(self, iface=None, receive_own_messages=False,
can_filters=None, remove_padding=True, basecls=CAN):
self.basecls = basecls
self.remove_padding = remove_padding
self.iface = conf.contribs['NativeCANSocket']['iface'] if \
iface is None else iface
self.ins = socket.socket(socket.PF_CAN,
socket.SOCK_RAW,
socket.CAN_RAW)
try:
self.ins.setsockopt(socket.SOL_CAN_RAW,
socket.CAN_RAW_RECV_OWN_MSGS,
struct.pack("i", receive_own_messages))
except Exception as exception:
Scapy_Exception("Could not modify receive own messages (%s)",
exception)
if can_filters is None:
can_filters = [{
"can_id": 0,
"can_mask": 0
}]
can_filter_fmt = "={}I".format(2 * len(can_filters))
filter_data = []
for can_filter in can_filters:
filter_data.append(can_filter["can_id"])
filter_data.append(can_filter["can_mask"])
self.ins.setsockopt(socket.SOL_CAN_RAW,
socket.CAN_RAW_FILTER,
struct.pack(can_filter_fmt, *filter_data))
self.ins.bind((self.iface,))
self.outs = self.ins
def recv(self, x=CAN_FRAME_SIZE):
try:
pkt, sa_ll = self.ins.recvfrom(x)
except BlockingIOError: # noqa: F821
warning("Captured no data, socket in non-blocking mode.")
return None
except socket.timeout:
warning("Captured no data, socket read timed out.")
return None
except OSError:
# something bad happened (e.g. the interface went down)
warning("Captured no data.")
return None
# need to change the byte order of the first four bytes,
# required by the underlying Linux SocketCAN frame format
pkt = struct.pack("<I12s", *struct.unpack(">I12s", pkt))
len = pkt[4]
canpkt = self.basecls(pkt[:len + 8])
canpkt.time = get_last_packet_timestamp(self.ins)
if self.remove_padding:
return canpkt
else:
return canpkt / Padding(pkt[len + 8:])
def send(self, x):
try:
if hasattr(x, "sent_time"):
x.sent_time = time.time()
# need to change the byte order of the first four bytes,
# required by the underlying Linux SocketCAN frame format
bs = bytes(x)
bs = bs + b'\x00' * (CAN_FRAME_SIZE - len(bs))
bs = struct.pack("<I12s", *struct.unpack(">I12s", bs))
return SuperSocket.send(self, bs)
except socket.error as msg:
raise msg
def close(self):
self.ins.close()
@conf.commands.register
def srcan(pkt, iface=None, receive_own_messages=False,
canfilter=None, basecls=CAN, *args, **kargs):
s = CANSocket(iface, receive_own_messages=receive_own_messages,
can_filters=canfilter, basecls=basecls)
a, b = s.sr(pkt, *args, **kargs)
s.close()
return a, b
| 1 | 15,378 | That's a strange name =) | secdev-scapy | py |
@@ -48,6 +48,17 @@ describe CommunicartMailer do
end
end
+ context 'custom templates' do
+ it 'renders a default template when an origin is not indicated' do
+ expect(mail.body.encoded).to include('Purchase Request')
+ end
+
+ it 'renders a custom template when origin is indicated' do
+ approval.cart.properties << Property.create!(property: 'origin', value:'whsc')
+ expect(mail.body.encoded).to include('White House Service Center: Purchase Request')
+ end
+ end
+
end
describe 'approval reply received email' do | 1 | require 'ostruct'
describe CommunicartMailer do
let(:approval_group) { FactoryGirl.create(:approval_group_with_approvers_and_requester, name: "anotherApprovalGroupName") }
let(:approver) { FactoryGirl.create(:user) }
let(:cart) { FactoryGirl.create(:cart_with_approvals, name: "TestCart") }
def expect_csvs_to_be_exported
expect_any_instance_of(Exporter::Items).to receive(:to_csv)
expect_any_instance_of(Exporter::Comments).to receive(:to_csv)
expect_any_instance_of(Exporter::Approvals).to receive(:to_csv)
end
describe 'cart notification email' do
let(:approval) { cart.approvals.first }
let(:mail) { CommunicartMailer.cart_notification_email('[email protected]', approval) }
let(:api_token) { FactoryGirl.create(:api_token) }
before do
expect_any_instance_of(CommunicartMailer).to receive(:from_email).and_return('[email protected]')
expect(ApiToken).to receive_message_chain(:where, :where, :last).and_return(api_token)
end
it 'renders the subject' do
cart.update_attributes(external_id: 13579)
expect(mail.subject).to eq('Communicart Approval Request from Liono Requester: Please review Cart #13579')
end
it 'renders the receiver email' do
expect(mail.to).to eq(["[email protected]"])
end
it 'renders the sender email' do
expect(mail.from).to eq(['[email protected]'])
end
context 'attaching a csv of the cart activity' do
it 'generates csv attachments for an approved cart' do
expect(cart).to receive(:all_approvals_received?).and_return(true)
expect_csvs_to_be_exported
mail
end
it 'does not generate csv attachments for an unapproved cart' do
expect(cart).to receive(:all_approvals_received?).and_return(false)
expect_any_instance_of(Exporter::Base).not_to receive(:to_csv)
mail
end
end
end
describe 'approval reply received email' do
let(:requester) { FactoryGirl.create(:user, email_address: '[email protected]') }
let(:cart_with_approval_group) { FactoryGirl.create(:cart_with_approvals, external_id: 13579) }
let(:approval) { cart_with_approval_group.approvals.first }
let(:mail) { CommunicartMailer.approval_reply_received_email(approval) }
before do
approval.update_attribute(:status, 'approved')
expect_any_instance_of(CommunicartMailer).to receive(:from_email).and_return('[email protected]')
expect(cart_with_approval_group).to receive(:requester).and_return(requester).at_least(:once)
end
it 'renders the subject' do
expect(mail.subject).to eq('User [email protected] has approved cart #13579')
end
it 'renders the receiver email' do
expect(mail.to).to eq(["[email protected]"])
end
it 'renders the sender email' do
expect(mail.from).to eq(['[email protected]'])
end
context 'attaching a csv of the cart activity' do
it 'generates csv attachments for an approved cart' do
expect(cart_with_approval_group).to receive(:all_approvals_received?).and_return(true).at_least(:once)
expect_csvs_to_be_exported
mail
end
it 'does not generate csv attachments for an unapproved cart' do
expect(cart_with_approval_group).to receive(:all_approvals_received?).and_return(false).at_least(:once)
expect_any_instance_of(Exporter::Base).not_to receive(:to_csv)
mail
end
end
end
describe 'comment_added_email' do
let(:cart_item) { FactoryGirl.create(:cart_item, description: "A cart item in need of a comment") }
let(:comment) { FactoryGirl.create(:comment, comment_text: 'Somebody give this cart item a comment') }
let(:email) { "[email protected]" }
let(:mail) { CommunicartMailer.comment_added_email(comment, email) }
before do
expect_any_instance_of(CommunicartMailer).to receive(:from_email).and_return('[email protected]')
cart_item.comments << comment
end
it 'renders the subject' do
expect(mail.subject).to eq("A comment has been added to cart item 'A cart item in need of a comment'")
end
it 'renders the receiver email' do
expect(mail.to).to eq(["[email protected]"])
end
it 'renders the sender email' do
expect(mail.from).to eq(['[email protected]'])
end
end
describe 'cart observer received email' do
let(:observer) { FactoryGirl.create(:user, email_address: '[email protected]') }
let(:requester) { FactoryGirl.create(:user, email_address: '[email protected]') }
before do
expect_any_instance_of(CommunicartMailer).to receive(:from_email).and_return('[email protected]')
expect(cart_with_observers).to receive(:requester).and_return(requester).at_least(:once)
end
let(:cart_with_observers) { FactoryGirl.create(:cart_with_observers, external_id: 1965) }
let(:observer) { cart_with_observers.observers.first }
let(:mail) { CommunicartMailer.cart_observer_email(observer.user_email_address, cart_with_observers) }
it 'renders the subject' do
expect(mail.subject).to eq('Communicart Approval Request from Liono Thunder: Please review Cart #1965')
end
it 'renders the receiver email' do
expect(mail.to).to eq(["[email protected]"])
end
it 'renders the sender email' do
expect(mail.from).to eq(['[email protected]'])
end
context 'attaching a csv of the cart activity' do
it 'generates csv attachments for an approved cart' do
expect(cart_with_observers).to receive(:all_approvals_received?).and_return(true)
expect_csvs_to_be_exported
mail
end
it 'does not generate csv attachments for an unapproved cart' do
expect(cart_with_observers).to receive(:all_approvals_received?).and_return(false)
expect_any_instance_of(Exporter::Base).not_to receive(:to_csv)
mail
end
end
end
end
| 1 | 12,230 | Thoughts on this? I'm not crazy about the brittleness of this but haven't found a good way to test more generically that a specific (custom) template has been rendered. | 18F-C2 | rb |
@@ -30,6 +30,7 @@ createMethods(Results.prototype, objectTypes.RESULTS, [
'filtered',
'sorted',
'snapshot',
+ 'subscribe',
'isValid',
'indexOf',
'min', | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
'use strict';
import Collection, { createCollection } from './collections';
import { objectTypes } from './constants';
import { createMethods } from './util';
export default class Results extends Collection {
}
// Non-mutating methods:
createMethods(Results.prototype, objectTypes.RESULTS, [
'filtered',
'sorted',
'snapshot',
'isValid',
'indexOf',
'min',
'max',
'sum',
'avg',
'addListener',
'removeListener',
'removeAllListeners',
]);
// Mutating methods:
createMethods(Results.prototype, objectTypes.RESULTS, [
'update',
], true);
export function createResults(realmId, info) {
return createCollection(Results.prototype, realmId, info);
}
| 1 | 16,829 | Have we reached binding-level agreement on the `subscribe` terminology? My only concern is that it doesn't seem descriptive enough and may be confused with subscribing for notifications. | realm-realm-js | js |
@@ -138,7 +138,9 @@ func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr Sa
}
for _, l := range config.Links() {
- s.addLink(l)
+ if l.SpanContext.IsValid() {
+ s.addLink(l)
+ }
}
s.SetAttributes(sr.Attributes...) | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"time"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/sdk/instrumentation"
)
type tracer struct {
provider *TracerProvider
instrumentationLibrary instrumentation.Library
}
var _ trace.Tracer = &tracer{}
// Start starts a Span and returns it along with a context containing it.
//
// The Span is created with the provided name and as a child of any existing
// span context found in the passed context. The created Span will be
// configured appropriately by any SpanOption passed.
func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) {
config := trace.NewSpanStartConfig(options...)
// For local spans created by this SDK, track child span count.
if p := trace.SpanFromContext(ctx); p != nil {
if sdkSpan, ok := p.(*recordingSpan); ok {
sdkSpan.addChild()
}
}
s := tr.newSpan(ctx, name, &config)
if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates)
for _, sp := range sps {
sp.sp.OnStart(ctx, rw)
}
}
if rtt, ok := s.(runtimeTracer); ok {
ctx = rtt.runtimeTrace(ctx)
}
return trace.ContextWithSpan(ctx, s), s
}
type runtimeTracer interface {
// runtimeTrace starts a "runtime/trace".Task for the span and
// returns a context containing the task.
runtimeTrace(ctx context.Context) context.Context
}
// newSpan returns a new configured span.
func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
// If told explicitly to make this a new root use a zero value SpanContext
// as a parent which contains an invalid trace ID and is not remote.
var psc trace.SpanContext
if config.NewRoot() {
ctx = trace.ContextWithSpanContext(ctx, psc)
} else {
psc = trace.SpanContextFromContext(ctx)
}
// If there is a valid parent trace ID, use it to ensure the continuity of
// the trace. Always generate a new span ID so other components can rely
// on a unique span ID, even if the Span is non-recording.
var tid trace.TraceID
var sid trace.SpanID
if !psc.TraceID().IsValid() {
tid, sid = tr.provider.idGenerator.NewIDs(ctx)
} else {
tid = psc.TraceID()
sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
}
samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
ParentContext: ctx,
TraceID: tid,
Name: name,
Kind: config.SpanKind(),
Attributes: config.Attributes(),
Links: config.Links(),
})
scc := trace.SpanContextConfig{
TraceID: tid,
SpanID: sid,
TraceState: samplingResult.Tracestate,
}
if isSampled(samplingResult) {
scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
} else {
scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
}
sc := trace.NewSpanContext(scc)
if !isRecording(samplingResult) {
return tr.newNonRecordingSpan(sc)
}
return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
}
// newRecordingSpan returns a new configured recordingSpan.
func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan {
startTime := config.Timestamp()
if startTime.IsZero() {
startTime = time.Now()
}
s := &recordingSpan{
parent: psc,
spanContext: sc,
spanKind: trace.ValidateSpanKind(config.SpanKind()),
name: name,
startTime: startTime,
attributes: newAttributesMap(tr.provider.spanLimits.AttributeCountLimit),
events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit),
links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit),
tracer: tr,
spanLimits: tr.provider.spanLimits,
resource: tr.provider.resource,
instrumentationLibrary: tr.instrumentationLibrary,
}
for _, l := range config.Links() {
s.addLink(l)
}
s.SetAttributes(sr.Attributes...)
s.SetAttributes(config.Attributes()...)
return s
}
// newNonRecordingSpan returns a new configured nonRecordingSpan.
func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
return nonRecordingSpan{tracer: tr, sc: sc}
}
| 1 | 16,674 | would it not be safer to move this condition to the `addLink` method? | open-telemetry-opentelemetry-go | go |
@@ -611,6 +611,13 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, di Dia
rw.WriteHeader(res.StatusCode)
+ // for some apps which need a correct response header to start http2 streaming,
+ // it's important to explicit flush headers to the client before copy streaming data.
+ if req.ProtoMajor == 2 && res.ContentLength == -1 {
+ if wf, ok := rw.(http.Flusher); ok {
+ wf.Flush()
+ }
+ }
err = h.copyResponse(rw, res.Body, h.flushInterval(req, res))
res.Body.Close() // close now, instead of defer, to populate res.Trailer
if err != nil { | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reverseproxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddyhttp/headers"
"go.uber.org/zap"
"golang.org/x/net/http/httpguts"
)
func init() {
caddy.RegisterModule(Handler{})
}
// Handler implements a highly configurable and production-ready reverse proxy.
//
// Upon proxying, this module sets the following placeholders (which can be used
// both within and after this handler):
//
// Placeholder | Description
// ------------|-------------
// `{http.reverse_proxy.upstream.address}` | The full address to the upstream as given in the config
// `{http.reverse_proxy.upstream.hostport}` | The host:port of the upstream
// `{http.reverse_proxy.upstream.host}` | The host of the upstream
// `{http.reverse_proxy.upstream.port}` | The port of the upstream
// `{http.reverse_proxy.upstream.requests}` | The approximate current number of requests to the upstream
// `{http.reverse_proxy.upstream.max_requests}` | The maximum approximate number of requests allowed to the upstream
// `{http.reverse_proxy.upstream.fails}` | The number of recent failed requests to the upstream
type Handler struct {
// Configures the method of transport for the proxy. A transport
// is what performs the actual "round trip" to the backend.
// The default transport is plaintext HTTP.
TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"`
// A circuit breaker may be used to relieve pressure on a backend
// that is beginning to exhibit symptoms of stress or latency.
// By default, there is no circuit breaker.
CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"`
// Load balancing distributes load/requests between backends.
LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
// Health checks update the status of backends, whether they are
// up or down. Down backends will not be proxied to.
HealthChecks *HealthChecks `json:"health_checks,omitempty"`
// Upstreams is the list of backends to proxy to.
Upstreams UpstreamPool `json:"upstreams,omitempty"`
// Adjusts how often to flush the response buffer. A
// negative value disables response buffering.
// TODO: figure out good defaults and write docs for this
// (see https://github.com/caddyserver/caddy/issues/1460)
FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
// Headers manipulates headers between Caddy and the backend.
// By default, all headers are passed-thru without changes,
// with the exceptions of special hop-by-hop headers.
//
// X-Forwarded-For and X-Forwarded-Proto are also set
// implicitly, but this may change in the future if the official
// standardized Forwarded header field gains more adoption.
Headers *headers.Handler `json:"headers,omitempty"`
// If true, the entire request body will be read and buffered
// in memory before being proxied to the backend. This should
// be avoided if at all possible for performance reasons.
BufferRequests bool `json:"buffer_requests,omitempty"`
// List of handlers and their associated matchers to evaluate
// after successful roundtrips. The first handler that matches
// the response from a backend will be invoked. The response
// body from the backend will not be written to the client;
// it is up to the handler to finish handling the response.
// If passive health checks are enabled, any errors from the
// handler chain will not affect the health status of the
// backend.
//
// Two new placeholders are available in this handler chain:
// - `{http.reverse_proxy.status_code}` The status code
// - `{http.reverse_proxy.status_text}` The status text
HandleResponse []caddyhttp.ResponseHandler `json:"handle_response,omitempty"`
Transport http.RoundTripper `json:"-"`
CB CircuitBreaker `json:"-"`
logger *zap.Logger
}
// CaddyModule returns the Caddy module information.
func (Handler) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.handlers.reverse_proxy",
New: func() caddy.Module { return new(Handler) },
}
}
// Provision ensures that h is set up properly before use.
func (h *Handler) Provision(ctx caddy.Context) error {
h.logger = ctx.Logger(h)
// start by loading modules
if h.TransportRaw != nil {
mod, err := ctx.LoadModule(h, "TransportRaw")
if err != nil {
return fmt.Errorf("loading transport: %v", err)
}
h.Transport = mod.(http.RoundTripper)
}
if h.LoadBalancing != nil && h.LoadBalancing.SelectionPolicyRaw != nil {
mod, err := ctx.LoadModule(h.LoadBalancing, "SelectionPolicyRaw")
if err != nil {
return fmt.Errorf("loading load balancing selection policy: %s", err)
}
h.LoadBalancing.SelectionPolicy = mod.(Selector)
}
if h.CBRaw != nil {
mod, err := ctx.LoadModule(h, "CBRaw")
if err != nil {
return fmt.Errorf("loading circuit breaker: %s", err)
}
h.CB = mod.(CircuitBreaker)
}
// ensure any embedded headers handler module gets provisioned
// (see https://caddy.community/t/set-cookie-manipulation-in-reverse-proxy/7666?u=matt
// for what happens if we forget to provision it)
if h.Headers != nil {
err := h.Headers.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning embedded headers handler: %v", err)
}
}
// set up transport
if h.Transport == nil {
t := &HTTPTransport{
KeepAlive: &KeepAlive{
ProbeInterval: caddy.Duration(30 * time.Second),
IdleConnTimeout: caddy.Duration(2 * time.Minute),
MaxIdleConnsPerHost: 32,
},
DialTimeout: caddy.Duration(10 * time.Second),
}
err := t.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning default transport: %v", err)
}
h.Transport = t
}
// set up load balancing
if h.LoadBalancing == nil {
h.LoadBalancing = new(LoadBalancing)
}
if h.LoadBalancing.SelectionPolicy == nil {
h.LoadBalancing.SelectionPolicy = RandomSelection{}
}
if h.LoadBalancing.TryDuration > 0 && h.LoadBalancing.TryInterval == 0 {
// a non-zero try_duration with a zero try_interval
// will always spin the CPU for try_duration if the
// upstream is local or low-latency; avoid that by
// defaulting to a sane wait period between attempts
h.LoadBalancing.TryInterval = caddy.Duration(250 * time.Millisecond)
}
lbMatcherSets, err := ctx.LoadModule(h.LoadBalancing, "RetryMatchRaw")
if err != nil {
return err
}
err = h.LoadBalancing.RetryMatch.FromInterface(lbMatcherSets)
if err != nil {
return err
}
// set up upstreams
for _, upstream := range h.Upstreams {
// create or get the host representation for this upstream
var host Host = new(upstreamHost)
existingHost, loaded := hosts.LoadOrStore(upstream.String(), host)
if loaded {
host = existingHost.(Host)
}
upstream.Host = host
// give it the circuit breaker, if any
upstream.cb = h.CB
// if the passive health checker has a non-zero UnhealthyRequestCount
// but the upstream has no MaxRequests set (they are the same thing,
// but the passive health checker is a default value for for upstreams
// without MaxRequests), copy the value into this upstream, since the
// value in the upstream (MaxRequests) is what is used during
// availability checks
if h.HealthChecks != nil && h.HealthChecks.Passive != nil {
h.HealthChecks.Passive.logger = h.logger.Named("health_checker.passive")
if h.HealthChecks.Passive.UnhealthyRequestCount > 0 &&
upstream.MaxRequests == 0 {
upstream.MaxRequests = h.HealthChecks.Passive.UnhealthyRequestCount
}
}
// upstreams need independent access to the passive
// health check policy because passive health checks
// run without access to h.
if h.HealthChecks != nil {
upstream.healthCheckPolicy = h.HealthChecks.Passive
}
}
// if active health checks are enabled, configure them and start a worker
if h.HealthChecks != nil &&
h.HealthChecks.Active != nil &&
(h.HealthChecks.Active.Path != "" || h.HealthChecks.Active.Port != 0) {
h.HealthChecks.Active.logger = h.logger.Named("health_checker.active")
timeout := time.Duration(h.HealthChecks.Active.Timeout)
if timeout == 0 {
timeout = 5 * time.Second
}
h.HealthChecks.Active.stopChan = make(chan struct{})
h.HealthChecks.Active.httpClient = &http.Client{
Timeout: timeout,
Transport: h.Transport,
}
if h.HealthChecks.Active.Interval == 0 {
h.HealthChecks.Active.Interval = caddy.Duration(30 * time.Second)
}
if h.HealthChecks.Active.ExpectBody != "" {
var err error
h.HealthChecks.Active.bodyRegexp, err = regexp.Compile(h.HealthChecks.Active.ExpectBody)
if err != nil {
return fmt.Errorf("expect_body: compiling regular expression: %v", err)
}
}
go h.activeHealthChecker()
}
// set up any response routes
for i, rh := range h.HandleResponse {
err := rh.Provision(ctx)
if err != nil {
return fmt.Errorf("provisioning response handler %d: %v", i, err)
}
}
return nil
}
// Cleanup cleans up the resources made by h during provisioning.
func (h *Handler) Cleanup() error {
// stop the active health checker
if h.HealthChecks != nil &&
h.HealthChecks.Active != nil &&
h.HealthChecks.Active.stopChan != nil {
// TODO: consider using context cancellation, could be much simpler
close(h.HealthChecks.Active.stopChan)
}
// TODO: Close keepalive connections on reload? https://github.com/caddyserver/caddy/pull/2507/files#diff-70219fd88fe3f36834f474ce6537ed26R762
// remove hosts from our config from the pool
for _, upstream := range h.Upstreams {
hosts.Delete(upstream.String())
}
return nil
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// if enabled, buffer client request;
// this should only be enabled if the
// upstream requires it and does not
// work with "slow clients" (gunicorn,
// etc.) - this obviously has a perf
// overhead and makes the proxy at
// risk of exhausting memory and more
// susceptible to slowloris attacks,
// so it is strongly recommended to
// only use this feature if absolutely
// required, if read timeouts are set,
// and if body size is limited
if h.BufferRequests {
buf := bufPool.Get().(*bytes.Buffer)
buf.Reset()
defer bufPool.Put(buf)
io.Copy(buf, r.Body)
r.Body.Close()
r.Body = ioutil.NopCloser(buf)
}
// prepare the request for proxying; this is needed only once
err := h.prepareRequest(r)
if err != nil {
return caddyhttp.Error(http.StatusInternalServerError,
fmt.Errorf("preparing request for upstream round-trip: %v", err))
}
// we will need the original headers and Host value if
// header operations are configured; and we should
// restore them after we're done if they are changed
// (for example, changing the outbound Host header
// should not permanently change r.Host; issue #3509)
reqHost := r.Host
reqHeader := r.Header
defer func() {
r.Host = reqHost
r.Header = reqHeader
}()
start := time.Now()
var proxyErr error
for {
// choose an available upstream
upstream := h.LoadBalancing.SelectionPolicy.Select(h.Upstreams, r)
if upstream == nil {
if proxyErr == nil {
proxyErr = fmt.Errorf("no upstreams available")
}
if !h.LoadBalancing.tryAgain(start, proxyErr, r) {
break
}
continue
}
// the dial address may vary per-request if placeholders are
// used, so perform those replacements here; the resulting
// DialInfo struct should have valid network address syntax
dialInfo, err := upstream.fillDialInfo(r)
if err != nil {
return fmt.Errorf("making dial info: %v", err)
}
// attach to the request information about how to dial the upstream;
// this is necessary because the information cannot be sufficiently
// or satisfactorily represented in a URL
caddyhttp.SetVar(r.Context(), dialInfoVarKey, dialInfo)
// set placeholders with information about this upstream
repl.Set("http.reverse_proxy.upstream.address", dialInfo.String())
repl.Set("http.reverse_proxy.upstream.hostport", dialInfo.Address)
repl.Set("http.reverse_proxy.upstream.host", dialInfo.Host)
repl.Set("http.reverse_proxy.upstream.port", dialInfo.Port)
repl.Set("http.reverse_proxy.upstream.requests", upstream.Host.NumRequests())
repl.Set("http.reverse_proxy.upstream.max_requests", upstream.MaxRequests)
repl.Set("http.reverse_proxy.upstream.fails", upstream.Host.Fails())
// mutate request headers according to this upstream;
// because we're in a retry loop, we have to copy
// headers (and the r.Host value) from the original
// so that each retry is identical to the first
if h.Headers != nil && h.Headers.Request != nil {
r.Header = make(http.Header)
copyHeader(r.Header, reqHeader)
r.Host = reqHost
h.Headers.Request.ApplyToRequest(r)
}
// proxy the request to that upstream
proxyErr = h.reverseProxy(w, r, dialInfo, next)
if proxyErr == nil || proxyErr == context.Canceled {
// context.Canceled happens when the downstream client
// cancels the request, which is not our failure
return nil
}
// if the roundtrip was successful, don't retry the request or
// ding the health status of the upstream (an error can still
// occur after the roundtrip if, for example, a response handler
// after the roundtrip returns an error)
if succ, ok := proxyErr.(roundtripSucceeded); ok {
return succ.error
}
// remember this failure (if enabled)
h.countFailure(upstream)
// if we've tried long enough, break
if !h.LoadBalancing.tryAgain(start, proxyErr, r) {
break
}
}
return caddyhttp.Error(http.StatusBadGateway, proxyErr)
}
// prepareRequest modifies req so that it is ready to be proxied,
// except for directing to a specific upstream. This method mutates
// headers and other necessary properties of the request and should
// be done just once (before proxying) regardless of proxy retries.
// This assumes that no mutations of the request are performed
// by h during or after proxying.
func (h Handler) prepareRequest(req *http.Request) error {
// most of this is borrowed from the Go std lib reverse proxy
if req.ContentLength == 0 {
req.Body = nil // Issue golang/go#16036: nil Body for http.Transport retries
}
req.Close = false
// if User-Agent is not set by client, then explicitly
// disable it so it's not set to default value by std lib
if _, ok := req.Header["User-Agent"]; !ok {
req.Header.Set("User-Agent", "")
}
reqUpType := upgradeType(req.Header)
removeConnectionHeaders(req.Header)
// Remove hop-by-hop headers to the backend. Especially
// important is "Connection" because we want a persistent
// connection, regardless of what the client sent to us.
for _, h := range hopHeaders {
hv := req.Header.Get(h)
if hv == "" {
continue
}
if h == "Te" && hv == "trailers" {
// Issue golang/go#21096: tell backend applications that
// care about trailer support that we support
// trailers. (We do, but we don't go out of
// our way to advertise that unless the
// incoming client request thought it was
// worth mentioning)
continue
}
req.Header.Del(h)
}
// After stripping all the hop-by-hop connection headers above, add back any
// necessary for protocol upgrades, such as for websockets.
if reqUpType != "" {
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", reqUpType)
}
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
// If we aren't the first proxy retain prior
// X-Forwarded-For information as a comma+space
// separated list and fold multiple headers into one.
if prior, ok := req.Header["X-Forwarded-For"]; ok {
clientIP = strings.Join(prior, ", ") + ", " + clientIP
}
req.Header.Set("X-Forwarded-For", clientIP)
}
if req.Header.Get("X-Forwarded-Proto") == "" {
// set X-Forwarded-Proto; many backend apps expect this too
proto := "https"
if req.TLS == nil {
proto = "http"
}
req.Header.Set("X-Forwarded-Proto", proto)
}
return nil
}
// reverseProxy performs a round-trip to the given backend and processes the response with the client.
// (This method is mostly the beginning of what was borrowed from the net/http/httputil package in the
// Go standard library which was used as the foundation.)
func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, di DialInfo, next caddyhttp.Handler) error {
di.Upstream.Host.CountRequest(1)
defer di.Upstream.Host.CountRequest(-1)
// point the request to this upstream
h.directRequest(req, di)
// do the round-trip; emit debug log with values we know are
// safe, or if there is no error, emit fuller log entry
start := time.Now()
res, err := h.Transport.RoundTrip(req)
duration := time.Since(start)
logger := h.logger.With(
zap.String("upstream", di.Upstream.String()),
zap.Object("request", caddyhttp.LoggableHTTPRequest{Request: req}),
zap.Duration("duration", duration))
if err != nil {
logger.Debug("upstream roundtrip", zap.Error(err))
return err
}
logger.Debug("upstream roundtrip",
zap.Object("headers", caddyhttp.LoggableHTTPHeader(res.Header)),
zap.Int("status", res.StatusCode))
// update circuit breaker on current conditions
if di.Upstream.cb != nil {
di.Upstream.cb.RecordMetric(res.StatusCode, duration)
}
// perform passive health checks (if enabled)
if h.HealthChecks != nil && h.HealthChecks.Passive != nil {
// strike if the status code matches one that is "bad"
for _, badStatus := range h.HealthChecks.Passive.UnhealthyStatus {
if caddyhttp.StatusCodeMatches(res.StatusCode, badStatus) {
h.countFailure(di.Upstream)
}
}
// strike if the roundtrip took too long
if h.HealthChecks.Passive.UnhealthyLatency > 0 &&
duration >= time.Duration(h.HealthChecks.Passive.UnhealthyLatency) {
h.countFailure(di.Upstream)
}
}
// see if any response handler is configured for this response from the backend
for i, rh := range h.HandleResponse {
if rh.Match != nil && !rh.Match.Match(res.StatusCode, res.Header) {
continue
}
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
// if configured to only change the status code, do that then continue regular proxy response
if statusCodeStr := rh.StatusCode.String(); statusCodeStr != "" {
statusCode, err := strconv.Atoi(repl.ReplaceAll(statusCodeStr, ""))
if err != nil {
return caddyhttp.Error(http.StatusInternalServerError, err)
}
if statusCode != 0 {
res.StatusCode = statusCode
}
break
}
// otherwise, if there are any routes configured, execute those as the
// actual response instead of what we got from the proxy backend
if len(rh.Routes) == 0 {
continue
}
res.Body.Close()
repl.Set("http.reverse_proxy.status_code", res.StatusCode)
repl.Set("http.reverse_proxy.status_text", res.Status)
h.logger.Debug("handling response", zap.Int("handler", i))
if routeErr := rh.Routes.Compile(next).ServeHTTP(rw, req); routeErr != nil {
// wrap error in roundtripSucceeded so caller knows that
// the roundtrip was successful and to not retry
return roundtripSucceeded{routeErr}
}
}
// Deal with 101 Switching Protocols responses: (WebSocket, h2c, etc)
if res.StatusCode == http.StatusSwitchingProtocols {
h.handleUpgradeResponse(rw, req, res)
return nil
}
removeConnectionHeaders(res.Header)
for _, h := range hopHeaders {
res.Header.Del(h)
}
// apply any response header operations
if h.Headers != nil && h.Headers.Response != nil {
if h.Headers.Response.Require == nil ||
h.Headers.Response.Require.Match(res.StatusCode, res.Header) {
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
h.Headers.Response.ApplyTo(res.Header, repl)
}
}
copyHeader(rw.Header(), res.Header)
// The "Trailer" header isn't included in the Transport's response,
// at least for *http.Transport. Build it up from Trailer.
announcedTrailers := len(res.Trailer)
if announcedTrailers > 0 {
trailerKeys := make([]string, 0, len(res.Trailer))
for k := range res.Trailer {
trailerKeys = append(trailerKeys, k)
}
rw.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
}
rw.WriteHeader(res.StatusCode)
err = h.copyResponse(rw, res.Body, h.flushInterval(req, res))
res.Body.Close() // close now, instead of defer, to populate res.Trailer
if err != nil {
// we're streaming the response and we've already written headers, so
// there's nothing an error handler can do to recover at this point;
// the standard lib's proxy panics at this point, but we'll just log
// the error and abort the stream here
h.logger.Error("aborting with incomplete response", zap.Error(err))
return nil
}
if len(res.Trailer) > 0 {
// Force chunking if we saw a response trailer.
// This prevents net/http from calculating the length for short
// bodies and adding a Content-Length.
if fl, ok := rw.(http.Flusher); ok {
fl.Flush()
}
}
if len(res.Trailer) == announcedTrailers {
copyHeader(rw.Header(), res.Trailer)
return nil
}
for k, vv := range res.Trailer {
k = http.TrailerPrefix + k
for _, v := range vv {
rw.Header().Add(k, v)
}
}
return nil
}
// tryAgain takes the time that the handler was initially invoked
// as well as any error currently obtained, and the request being
// tried, and returns true if another attempt should be made at
// proxying the request. If true is returned, it has already blocked
// long enough before the next retry (i.e. no more sleeping is
// needed). If false is returned, the handler should stop trying to
// proxy the request.
func (lb LoadBalancing) tryAgain(start time.Time, proxyErr error, req *http.Request) bool {
// if we've tried long enough, break
if time.Since(start) >= time.Duration(lb.TryDuration) {
return false
}
// if the error occurred while dialing (i.e. a connection
// could not even be established to the upstream), then it
// should be safe to retry, since without a connection, no
// HTTP request can be transmitted; but if the error is not
// specifically a dialer error, we need to be careful
if _, ok := proxyErr.(DialError); proxyErr != nil && !ok {
// if the error occurred after a connection was established,
// we have to assume the upstream received the request, and
// retries need to be carefully decided, because some requests
// are not idempotent
if lb.RetryMatch == nil && req.Method != "GET" {
// by default, don't retry requests if they aren't GET
return false
}
if !lb.RetryMatch.AnyMatch(req) {
return false
}
}
// otherwise, wait and try the next available host
time.Sleep(time.Duration(lb.TryInterval))
return true
}
// directRequest modifies only req.URL so that it points to the upstream
// in the given DialInfo. It must modify ONLY the request URL.
func (h Handler) directRequest(req *http.Request, di DialInfo) {
// we need a host, so set the upstream's host address
reqHost := di.Address
// if the port equates to the scheme, strip the port because
// it's weird to make a request like http://example.com:80/.
if (req.URL.Scheme == "http" && di.Port == "80") ||
(req.URL.Scheme == "https" && di.Port == "443") {
reqHost = di.Host
}
req.URL.Host = reqHost
}
func copyHeader(dst, src http.Header) {
for k, vv := range src {
for _, v := range vv {
dst.Add(k, v)
}
}
}
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
for k, vv := range h {
vv2 := make([]string, len(vv))
copy(vv2, vv)
h2[k] = vv2
}
return h2
}
func upgradeType(h http.Header) string {
if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") {
return ""
}
return strings.ToLower(h.Get("Upgrade"))
}
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
// removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h.
// See RFC 7230, section 6.1
func removeConnectionHeaders(h http.Header) {
if c := h.Get("Connection"); c != "" {
for _, f := range strings.Split(c, ",") {
if f = strings.TrimSpace(f); f != "" {
h.Del(f)
}
}
}
}
// LoadBalancing has parameters related to load balancing.
type LoadBalancing struct {
// A selection policy is how to choose an available backend.
// The default policy is random selection.
SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
// How long to try selecting available backends for each request
// if the next available host is down. By default, this retry is
// disabled. Clients will wait for up to this long while the load
// balancer tries to find an available upstream host.
TryDuration caddy.Duration `json:"try_duration,omitempty"`
// How long to wait between selecting the next host from the pool. Default
// is 250ms. Only relevant when a request to an upstream host fails. Be
// aware that setting this to 0 with a non-zero try_duration can cause the
// CPU to spin if all backends are down and latency is very low.
TryInterval caddy.Duration `json:"try_interval,omitempty"`
// A list of matcher sets that restricts with which requests retries are
// allowed. A request must match any of the given matcher sets in order
// to be retried if the connection to the upstream succeeded but the
// subsequent round-trip failed. If the connection to the upstream failed,
// a retry is always allowed. If unspecified, only GET requests will be
// allowed to be retried. Note that a retry is done with the next available
// host according to the load balancing policy.
RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"`
SelectionPolicy Selector `json:"-"`
RetryMatch caddyhttp.MatcherSets `json:"-"`
}
// Selector selects an available upstream from the pool.
type Selector interface {
Select(UpstreamPool, *http.Request) *Upstream
}
// Hop-by-hop headers. These are removed when sent to the backend.
// As of RFC 7230, hop-by-hop headers are required to appear in the
// Connection header field. These are the headers defined by the
// obsoleted RFC 2616 (section 13.5.1) and are used for backward
// compatibility.
var hopHeaders = []string{
"Alt-Svc",
"Connection",
"Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
"Keep-Alive",
"Proxy-Authenticate",
"Proxy-Authorization",
"Te", // canonicalized version of "TE"
"Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
"Transfer-Encoding",
"Upgrade",
}
// DialError is an error that specifically occurs
// in a call to Dial or DialContext.
type DialError struct{ error }
// TLSTransport is implemented by transports
// that are capable of using TLS.
type TLSTransport interface {
// TLSEnabled returns true if the transport
// has TLS enabled, false otherwise.
TLSEnabled() bool
// EnableTLS enables TLS within the transport
// if it is not already, using the provided
// value as a basis for the TLS config.
EnableTLS(base *TLSConfig) error
}
// roundtripSucceeded is an error type that is returned if the
// roundtrip succeeded, but an error occurred after-the-fact.
type roundtripSucceeded struct{ error }
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
// Interface guards
var (
_ caddy.Provisioner = (*Handler)(nil)
_ caddy.CleanerUpper = (*Handler)(nil)
_ caddyhttp.MiddlewareHandler = (*Handler)(nil)
)
| 1 | 15,242 | I think this might read better: > some apps need the response headers before starting to stream content with http2, so it's important to explicitly flush the headers to the client before streaming the data. | caddyserver-caddy | go |
@@ -47,6 +47,13 @@ public interface FileScanTask extends ScanTask {
*/
PartitionSpec spec();
+ /**
+ * The partition data for the file of this task.
+ *
+ * @return the partition data for the file of this task
+ */
+ StructLike partition();
+
/**
* The starting position of this scan range in the file.
* | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import org.apache.iceberg.expressions.Expression;
/**
* A scan task over a range of a single file.
*/
public interface FileScanTask extends ScanTask {
/**
* The {@link DataFile file} to scan.
*
* @return the file to scan
*/
DataFile file();
/**
* A list of {@link DeleteFile delete files} to apply when reading the task's data file.
*
* @return a list of delete files to apply
*/
List<DeleteFile> deletes();
/**
* The {@link PartitionSpec spec} used to store this file.
*
* @return the partition spec from this file's manifest
*/
PartitionSpec spec();
/**
* The starting position of this scan range in the file.
*
* @return the start position of this scan range
*/
long start();
/**
* The number of bytes to scan from the {@link #start()} position in the file.
*
* @return the length of this scan range in bytes
*/
long length();
/**
* Returns the residual expression that should be applied to rows in this file scan.
* <p>
* The residual expression for a file is a filter expression created from the scan's filter, inclusive
* any predicates that are true or false for the entire file removed, based on the file's
* partition data.
*
* @return a residual expression to apply to rows from this scan
*/
Expression residual();
/**
* Splits this scan task into component {@link FileScanTask scan tasks}, each of {@code splitSize} size
* @param splitSize The size of a component scan task
* @return an Iterable of {@link FileScanTask scan tasks}
*/
Iterable<FileScanTask> split(long splitSize);
@Override
default boolean isFileScanTask() {
return true;
}
@Override
default FileScanTask asFileScanTask() {
return this;
}
}
| 1 | 34,212 | What does this return if there is no partition? I think that we should consider adding a struct type that describes this tuple. That way, we can use an empty struct for unpartitioned and a non-empty struct for tasks that are combined by partition. We could also support more combinations, like combining across day partitions by not across bucket partitions or similar. | apache-iceberg | java |
@@ -493,7 +493,16 @@ func DeleteRepo(
_, _, err = kbfsOps.Lookup(ctx, repoNode, normalizedRepoName)
if err != nil {
- return err
+ // For the common "repo doesn't exist" case, use an error type that the
+ // client can recognize.
+ switch errors.Cause(err).(type) {
+ case libkbfs.NoSuchNameError:
+ return libkb.RepoDoesntExistError{
+ Name: repoName,
+ }
+ default:
+ return err
+ }
}
deletedReposNode, err := lookupOrCreateDir( | 1 | // Copyright 2017 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libgit
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"strconv"
"strings"
"time"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/libfs"
"github.com/keybase/kbfs/libkbfs"
"github.com/pkg/errors"
)
const (
kbfsRepoDir = ".kbfs_git"
kbfsConfigName = "kbfs_config"
kbfsConfigNameTemp = "._kbfs_config"
gitSuffixToIgnore = ".git"
kbfsDeletedReposDir = ".kbfs_deleted_repos"
minDeletedAgeForCleaning = 1 * time.Hour
cleaningTimeLimit = 2 * time.Second
)
// This character set is what Github supports in repo names. It's
// probably to avoid any problems when cloning onto filesystems that
// have different Unicode decompression schemes
// (https://en.wikipedia.org/wiki/Unicode_equivalence). There's no
// internal reason to be so restrictive, but it probably makes sense
// to start off more restrictive and then relax things later as we
// test.
var repoNameRE = regexp.MustCompile(`^([a-zA-Z0-9][a-zA-Z0-9_\.-]*)$`)
func checkValidRepoName(repoName string, config libkbfs.Config) bool {
return len(repoName) >= 1 &&
uint32(len(repoName)) <= config.MaxNameBytes() &&
(os.Getenv("KBFS_GIT_REPONAME_SKIP_CHECK") != "" ||
repoNameRE.MatchString(repoName))
}
func recursiveDelete(
ctx context.Context, fs *libfs.FS, fi os.FileInfo) error {
if !fi.IsDir() {
// Delete regular files and symlinks directly.
return fs.Remove(fi.Name())
}
subdirFS, err := fs.Chroot(fi.Name())
if err != nil {
return err
}
children, err := subdirFS.ReadDir("/")
if err != nil {
return err
}
for _, childFI := range children {
err := recursiveDelete(ctx, subdirFS.(*libfs.FS), childFI)
if err != nil {
return err
}
}
return fs.Remove(fi.Name())
}
// CleanOldDeletedRepos completely removes any "deleted" repos that
// have been deleted for longer than `minDeletedAgeForCleaning`. The
// caller is responsible for syncing any data to disk, if desired.
func CleanOldDeletedRepos(
ctx context.Context, config libkbfs.Config,
tlfHandle *libkbfs.TlfHandle) (err error) {
fs, err := libfs.NewFS(
ctx, config, tlfHandle, path.Join(kbfsRepoDir, kbfsDeletedReposDir),
"" /* uniq ID isn't used for removals */)
switch errors.Cause(err).(type) {
case libkbfs.NoSuchNameError:
// Nothing to clean.
return nil
case nil:
default:
return err
}
deletedRepos, err := fs.ReadDir("/")
if err != nil {
return err
}
if len(deletedRepos) == 0 {
return nil
}
log := config.MakeLogger("")
now := config.Clock().Now()
log.CDebugf(ctx, "Checking %d deleted repos for cleaning in %s",
len(deletedRepos), tlfHandle.GetCanonicalPath())
defer func() {
log.CDebugf(ctx, "Done checking deleted repos: %+v", err)
}()
for _, fi := range deletedRepos {
parts := strings.Split(fi.Name(), "-")
if len(parts) < 2 {
log.CDebugf(ctx,
"Ignoring deleted repo name with wrong format: %s", fi.Name())
continue
}
deletedTimeUnixNano, err := strconv.ParseInt(
parts[len(parts)-1], 10, 64)
if err != nil {
log.CDebugf(ctx,
"Ignoring deleted repo name with wrong format: %s: %+v",
fi.Name(), err)
continue
}
deletedTime := time.Unix(0, deletedTimeUnixNano)
if deletedTime.Add(minDeletedAgeForCleaning).After(now) {
// Repo was deleted too recently.
continue
}
log.CDebugf(ctx, "Cleaning deleted repo %s", fi.Name())
err = recursiveDelete(ctx, fs, fi)
if err != nil {
return err
}
}
return nil
}
// CleanOldDeletedReposTimeLimited is the same as
// `CleanOldDeletedRepos`, except it limits the time spent on
// cleaning, deleting as much data as possible within the given time
// limit (without returning an error).
func CleanOldDeletedReposTimeLimited(
ctx context.Context, config libkbfs.Config,
tlfHandle *libkbfs.TlfHandle) error {
ctx, cancel := context.WithTimeout(ctx, cleaningTimeLimit)
defer cancel()
err := CleanOldDeletedRepos(ctx, config, tlfHandle)
if errors.Cause(err) == context.DeadlineExceeded {
return nil
}
return err
}
// UpdateRepoMD lets the Keybase service know that a repo's MD has
// been updated.
func UpdateRepoMD(ctx context.Context, config libkbfs.Config,
tlfHandle *libkbfs.TlfHandle, fs *libfs.FS) error {
folder := tlfHandle.ToFavorite().ToKBFolder(false)
// Get the user-formatted repo name.
f, err := fs.Open(kbfsConfigName)
if err != nil {
return err
}
defer f.Close()
buf, err := ioutil.ReadAll(f)
if err != nil {
return err
}
c, err := configFromBytes(buf)
if err != nil {
return err
}
log := config.MakeLogger("")
log.CDebugf(ctx, "Putting git MD update")
err = config.KBPKI().PutGitMetadata(
ctx, folder, keybase1.RepoID(c.ID.String()),
keybase1.GitRepoName(c.Name))
if err != nil {
// Just log the put error, it shouldn't block the success of
// the overall git operation.
log.CDebugf(ctx, "Failed to put git metadata: %+v", err)
}
return nil
}
func createNewRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string, fs *libfs.FS) (ID, error) {
// TODO: take a global repo lock here to make sure only one
// client generates the repo ID.
repoID, err := makeRandomID()
if err != nil {
return NullID, err
}
config.MakeLogger("").CDebugf(ctx,
"Creating a new repo %s in %s: repoID=%s",
repoName, tlfHandle.GetCanonicalPath(), repoID)
// Lock a temp file to avoid a duplicate create of the actual
// file. TODO: clean up this file at some point?
lockFile, err := fs.Create(kbfsConfigNameTemp)
if err != nil && !os.IsExist(err) {
return NullID, err
} else if os.IsExist(err) {
lockFile, err = fs.Open(kbfsConfigNameTemp)
}
if err != nil {
return NullID, err
}
defer lockFile.Close()
// Take a lock during creation.
err = lockFile.Lock()
if err != nil {
return NullID, err
}
f, err := fs.Create(kbfsConfigName)
if err != nil && !os.IsExist(err) {
return NullID, err
} else if os.IsExist(err) {
// The config file already exists, so someone else already
// initialized the repo.
config.MakeLogger("").CDebugf(
ctx, "Config file for repo %s already exists", repoName)
f, err := fs.Open(kbfsConfigName)
if err != nil {
return NullID, err
}
defer f.Close()
buf, err := ioutil.ReadAll(f)
if err != nil {
return NullID, err
}
existingConfig, err := configFromBytes(buf)
if err != nil {
return NullID, err
}
return NullID, errors.WithStack(libkb.RepoAlreadyExistsError{
DesiredName: repoName,
ExistingName: existingConfig.Name,
ExistingID: existingConfig.ID.String(),
})
}
defer f.Close()
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return NullID, err
}
c := &Config{
ID: repoID,
Name: repoName,
CreatorUID: session.UID.String(),
Ctime: config.Clock().Now().UnixNano(),
}
buf, err := c.toBytes()
if err != nil {
return NullID, err
}
_, err = f.Write(buf)
if err != nil {
return NullID, err
}
err = UpdateRepoMD(ctx, config, tlfHandle, fs)
if err != nil {
return NullID, err
}
return repoID, nil
}
func normalizeRepoName(repoName string) string {
return strings.TrimSuffix(strings.ToLower(repoName), gitSuffixToIgnore)
}
func lookupOrCreateDir(ctx context.Context, config libkbfs.Config,
n libkbfs.Node, name string) (libkbfs.Node, error) {
newNode, _, err := config.KBFSOps().Lookup(ctx, n, name)
switch errors.Cause(err).(type) {
case libkbfs.NoSuchNameError:
newNode, _, err = config.KBFSOps().CreateDir(ctx, n, name)
if err != nil {
return nil, err
}
case nil:
default:
return nil, err
}
return newNode, nil
}
type repoOpType int
const (
getOrCreate repoOpType = iota
createOnly
getOnly
)
// NoSuchRepoError indicates that a repo doesn't yet exist, and it
// will not be created.
type NoSuchRepoError struct {
name string
}
func (nsre NoSuchRepoError) Error() string {
return fmt.Sprintf("A repo named %s hasn't been created yet", nsre.name)
}
func getOrCreateRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string, uniqID string, op repoOpType) (
fs *libfs.FS, id ID, err error) {
if !checkValidRepoName(repoName, config) {
return nil, NullID, errors.WithStack(libkb.InvalidRepoNameError{Name: repoName})
}
rootNode, _, err := config.KBFSOps().GetOrCreateRootNode(
ctx, tlfHandle, libkbfs.MasterBranch)
if err != nil {
return nil, NullID, err
}
normalizedRepoName := normalizeRepoName(repoName)
// If the user doesn't have write access, but the repo doesn't
// exist, give them a nice error message.
repoExists := false
defer func() {
_, isWriteAccessErr := errors.Cause(err).(libkbfs.WriteAccessError)
if !repoExists && isWriteAccessErr {
err = NoSuchRepoError{repoName}
}
}()
repoDir, err := lookupOrCreateDir(ctx, config, rootNode, kbfsRepoDir)
if err != nil {
return nil, NullID, err
}
if op == getOnly {
_, _, err = config.KBFSOps().Lookup(ctx, repoDir, normalizedRepoName)
switch errors.Cause(err).(type) {
case libkbfs.NoSuchNameError:
return nil, NullID, errors.WithStack(NoSuchRepoError{repoName})
case nil:
default:
return nil, NullID, err
}
} else {
_, err = lookupOrCreateDir(ctx, config, repoDir, normalizedRepoName)
if err != nil {
return nil, NullID, err
}
}
repoExists = true
fs, err = libfs.NewFS(
ctx, config, tlfHandle, path.Join(kbfsRepoDir, normalizedRepoName),
uniqID)
if err != nil {
return nil, NullID, err
}
f, err := fs.Open(kbfsConfigName)
if err != nil && !os.IsNotExist(err) {
return nil, NullID, err
} else if os.IsNotExist(err) {
if op == getOnly {
return nil, NullID, errors.WithStack(NoSuchRepoError{repoName})
}
// Create a new repo ID.
repoID, err := createNewRepoAndID(ctx, config, tlfHandle, repoName, fs)
if err != nil {
return nil, NullID, err
}
return fs, repoID, nil
}
defer f.Close()
buf, err := ioutil.ReadAll(f)
if err != nil {
return nil, NullID, err
}
c, err := configFromBytes(buf)
if err != nil {
return nil, NullID, err
}
if op == createOnly {
// If this was already created, but we were expected to create
// it, then send back an error.
return nil, NullID, libkb.RepoAlreadyExistsError{
DesiredName: repoName,
ExistingName: c.Name,
ExistingID: c.ID.String(),
}
}
fs.SetLockNamespace(c.ID.Bytes())
return fs, c.ID, nil
}
// GetOrCreateRepoAndID returns a filesystem object rooted at the
// specified repo, along with the stable repo ID. If the repo hasn't
// been created yet, it generates a new ID and creates the repo. The
// caller is responsible for syncing the FS and flushing the journal,
// if desired.
func GetOrCreateRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string, uniqID string) (*libfs.FS, ID, error) {
return getOrCreateRepoAndID(
ctx, config, tlfHandle, repoName, uniqID, getOrCreate)
}
// GetRepoAndID returns a filesystem object rooted at the
// specified repo, along with the stable repo ID, if it already
// exists.
func GetRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string, uniqID string) (*libfs.FS, ID, error) {
return getOrCreateRepoAndID(
ctx, config, tlfHandle, repoName, uniqID, getOnly)
}
// CreateRepoAndID returns a new stable repo ID for the provided
// repoName in the given TLF. If the repo has already been created,
// it returns a `RepoAlreadyExistsError`. The caller is responsible
// for syncing the FS and flushing the journal, if desired. It
// expects the `config` object to be unique during the lifetime of
// this call.
func CreateRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string) (ID, error) {
// Create a unique ID using the verifying key and the `config`
// object, which should be unique to each call in practice.
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return NullID, err
}
uniqID := fmt.Sprintf("%s-%p", session.VerifyingKey.String(), config)
fs, id, err := getOrCreateRepoAndID(
ctx, config, tlfHandle, repoName, uniqID, createOnly)
if err != nil {
return NullID, err
}
err = fs.SyncAll()
if err != nil {
return NullID, err
}
return id, err
}
// DeleteRepo "deletes" the given repo in the given TLF. Right now it
// simply moves the repo out of the way to a special directory, to
// allow any concurrent writers to finish their pushes without
// triggering conflict resolution. The caller is responsible for
// syncing the FS and flushing the journal, if desired. It expects
// the `config` object to be unique during the lifetime of this call.
func DeleteRepo(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string) error {
// Create a unique ID using the verifying key and the `config`
// object, which should be unique to each call in practice.
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
kbfsOps := config.KBFSOps()
rootNode, _, err := kbfsOps.GetOrCreateRootNode(
ctx, tlfHandle, libkbfs.MasterBranch)
if err != nil {
return err
}
normalizedRepoName := normalizeRepoName(repoName)
repoNode, _, err := kbfsOps.Lookup(ctx, rootNode, kbfsRepoDir)
if err != nil {
return err
}
_, _, err = kbfsOps.Lookup(ctx, repoNode, normalizedRepoName)
if err != nil {
return err
}
deletedReposNode, err := lookupOrCreateDir(
ctx, config, repoNode, kbfsDeletedReposDir)
if err != nil {
return err
}
// For now, just rename the repo out of the way, using the device
// ID and the current time in nanoseconds to make uniqueness
// probable.
dirSuffix := fmt.Sprintf(
"%s-%d", session.VerifyingKey.String(), config.Clock().Now().UnixNano())
return kbfsOps.Rename(
ctx, repoNode, normalizedRepoName, deletedReposNode,
normalizedRepoName+dirSuffix)
}
| 1 | 18,104 | Also, it might be worth returning this for the above lookup on `kbfsRepoDir` as well, which could happen if someone tries to delete a repo in a TLF that has never had any repos at all. | keybase-kbfs | go |
@@ -171,6 +171,12 @@ var (
Usage: "IP address to bind API to",
Value: "127.0.0.1",
}
+ // FlagTequilapiAllowedHostnames Restrict hostnames in requests' Host header to following domains.
+ FlagTequilapiAllowedHostnames = cli.StringFlag{
+ Name: "tequilapi.allowed-hostnames",
+ Usage: "Comma separated list of allowed domains. Prepend value with dot for wildcard mask",
+ Value: ".localhost, localhost, .localdomain",
+ }
// FlagTequilapiPort port for listening for incoming API requests.
FlagTequilapiPort = cli.IntFlag{
Name: "tequilapi.port", | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package config
import (
"fmt"
"strings"
"time"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/urfave/cli/v2"
"github.com/mysteriumnetwork/node/metadata"
)
var (
// Alphabetically sorted list of node flags
// Some of the flags are location in separate source files: flags_*.go
// FlagDiscoveryType proposal discovery adapter.
FlagDiscoveryType = cli.StringSliceFlag{
Name: "discovery.type",
Usage: `Proposal discovery adapter(s) separated by comma. Options: { "api", "broker", "api,broker,dht" }`,
Value: cli.NewStringSlice("api"),
}
// FlagDiscoveryPingInterval proposal ping interval in seconds.
FlagDiscoveryPingInterval = cli.DurationFlag{
Name: "discovery.ping",
Usage: `Proposal update interval { "30s", "3m", "1h20m30s" }`,
Value: 180 * time.Second,
}
// FlagDiscoveryFetchInterval proposal fetch interval in seconds.
FlagDiscoveryFetchInterval = cli.DurationFlag{
Name: "discovery.fetch",
Usage: `Proposal fetch interval { "30s", "3m", "1h20m30s" }`,
Value: 180 * time.Second,
}
// FlagDHTAddress IP address of interface to listen for DHT connections.
FlagDHTAddress = cli.StringFlag{
Name: "discovery.dht.address",
Usage: "IP address to bind DHT to",
Value: "0.0.0.0",
}
// FlagDHTPort listens DHT connections on the specified port.
FlagDHTPort = cli.IntFlag{
Name: "discovery.dht.port",
Usage: "The port to bind DHT to (by default, random port will be used)",
Value: 0,
}
// FlagDHTProtocol protocol for DHT to use.
FlagDHTProtocol = cli.StringFlag{
Name: "discovery.dht.proto",
Usage: "Protocol to use with DHT. Options: { udp, tcp }",
Value: "tcp",
}
// FlagDHTBootstrapPeers DHT bootstrap peer nodes list.
FlagDHTBootstrapPeers = cli.StringSliceFlag{
Name: "discovery.dht.peers",
Usage: `Peer URL(s) for DHT bootstrap (e.g. /ip4/127.0.0.1/tcp/1234/p2p/QmNUZRp1zrk8i8TpfyeDZ9Yg3C4PjZ5o61yao3YhyY1TE8") separated by comma. They will tell us about the other nodes in the network.`,
Value: cli.NewStringSlice(),
}
// FlagBindAddress IP address to bind to.
FlagBindAddress = cli.StringFlag{
Name: "bind.address",
Usage: "IP address to bind provided services to",
Value: "0.0.0.0",
}
// FlagFeedbackURL URL of Feedback API.
FlagFeedbackURL = cli.StringFlag{
Name: "feedback.url",
Usage: "URL of Feedback API",
Value: "https://feedback.mysterium.network",
}
// FlagFirewallKillSwitch always blocks non-tunneled outgoing consumer traffic.
FlagFirewallKillSwitch = cli.BoolFlag{
Name: "firewall.killSwitch.always",
Usage: "Always block non-tunneled outgoing consumer traffic",
}
// FlagFirewallProtectedNetworks protects provider's networks from access via VPN
FlagFirewallProtectedNetworks = cli.StringFlag{
Name: "firewall.protected.networks",
Usage: "List of comma separated (no spaces) subnets to be protected from access via VPN",
Value: "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,127.0.0.0/8",
}
// FlagShaperEnabled enables bandwidth limitation.
FlagShaperEnabled = cli.BoolFlag{
Name: "shaper.enabled",
Usage: "Limit service bandwidth",
}
// FlagShaperBandwidth set the bandwidth limit.
FlagShaperBandwidth = cli.Uint64Flag{
Name: "shaper.bandwidth",
Usage: "Set the bandwidth limit in Kbytes",
Value: 5000,
}
// FlagKeystoreLightweight determines the scrypt memory complexity.
FlagKeystoreLightweight = cli.BoolFlag{
Name: "keystore.lightweight",
Usage: "Determines the scrypt memory complexity. If set to true, will use 4MB blocks instead of the standard 256MB ones",
Value: true,
}
// FlagLogHTTP enables HTTP payload logging.
FlagLogHTTP = cli.BoolFlag{
Name: "log.http",
Usage: "Enable HTTP payload logging",
}
// FlagLogLevel logger level.
FlagLogLevel = cli.StringFlag{
Name: "log-level",
Usage: func() string {
allLevels := []string{
zerolog.TraceLevel.String(),
zerolog.DebugLevel.String(),
zerolog.InfoLevel.String(),
zerolog.WarnLevel.String(),
zerolog.FatalLevel.String(),
zerolog.PanicLevel.String(),
zerolog.Disabled.String(),
}
return fmt.Sprintf("Set the logging level (%s)", strings.Join(allLevels, "|"))
}(),
Value: zerolog.DebugLevel.String(),
}
// FlagVerbose enables verbose logging.
FlagVerbose = cli.BoolFlag{
Name: "verbose",
Usage: "Enable verbose logging",
Value: false,
}
// FlagOpenvpnBinary openvpn binary to use for OpenVPN connections.
FlagOpenvpnBinary = cli.StringFlag{
Name: "openvpn.binary",
Usage: "openvpn binary to use for OpenVPN connections",
Value: "openvpn",
}
// FlagQualityType quality oracle adapter.
FlagQualityType = cli.StringFlag{
Name: "quality.type",
Usage: "Quality Oracle adapter. Options: (elastic, morqa, none - opt-out from sending quality metrics)",
Value: "morqa",
}
// FlagQualityAddress quality oracle URL.
FlagQualityAddress = cli.StringFlag{
Name: "quality.address",
Usage: fmt.Sprintf(
"Address of specific Quality Oracle adapter given in '--%s'",
FlagQualityType.Name,
),
Value: "https://testnet3-quality.mysterium.network/api/v2",
}
// FlagTequilapiAddress IP address of interface to listen for incoming connections.
FlagTequilapiAddress = cli.StringFlag{
Name: "tequilapi.address",
Usage: "IP address to bind API to",
Value: "127.0.0.1",
}
// FlagTequilapiPort port for listening for incoming API requests.
FlagTequilapiPort = cli.IntFlag{
Name: "tequilapi.port",
Usage: "Port for listening incoming API requests",
Value: 4050,
}
// FlagTequilapiDebugMode debug mode for tequilapi.
FlagTequilapiDebugMode = cli.BoolFlag{
Name: "tequilapi.debug",
Usage: "Starts tequilapi in debug mode",
Value: false,
}
// FlagTequilapiUsername username for API authentication.
FlagTequilapiUsername = cli.StringFlag{
Name: "tequilapi.auth.username",
Usage: "Default username for API authentication",
Value: "myst",
}
// FlagTequilapiPassword username for API authentication.
FlagTequilapiPassword = cli.StringFlag{
Name: "tequilapi.auth.password",
Usage: "Default password for API authentication",
Value: "mystberry",
}
// FlagPProfEnable enables pprof via TequilAPI.
FlagPProfEnable = cli.BoolFlag{
Name: "pprof.enable",
Usage: "Enables pprof",
Value: false,
}
// FlagUIEnable enables built-in web UI for node.
FlagUIEnable = cli.BoolFlag{
Name: "ui.enable",
Usage: "Enables the Web UI",
Value: true,
}
// FlagUIAddress IP address of interface to listen for incoming connections.
FlagUIAddress = cli.StringFlag{
Name: "ui.address",
Usage: "IP address to bind Web UI to. Address can be comma delimited: '192.168.1.10,192.168.1.20'. (default - 127.0.0.1 and local LAN IP)",
Value: "",
}
// FlagUIPort runs web UI on the specified port.
FlagUIPort = cli.IntFlag{
Name: "ui.port",
Usage: "The port to run Web UI on",
Value: 4449,
}
// FlagUserMode allows to run node under current user without sudo.
FlagUserMode = cli.BoolFlag{
Name: "usermode",
Usage: "Run as a regular user. Delegate elevated commands to the supervisor.",
Value: false,
}
// FlagVendorID identifies 3rd party vendor (distributor) of Mysterium node.
FlagVendorID = cli.StringFlag{
Name: "vendor.id",
Usage: "Marks vendor (distributor) of the node for collecting statistics. " +
"3rd party vendors may use their own identifier here.",
}
// FlagP2PListenPorts sets manual ports for p2p connections.
// TODO: remove the deprecated flag once all users stop to use it.
FlagP2PListenPorts = cli.StringFlag{
Name: "p2p.listen.ports",
Usage: "Deprecated flag, use --udp.ports to set range of listen ports",
Value: "0:0",
}
// FlagConsumer sets to run as consumer only which allows to skip bootstrap for some of the dependencies.
FlagConsumer = cli.BoolFlag{
Name: "consumer",
Usage: "Run in consumer mode only.",
Value: false,
}
// FlagDefaultCurrency sets the default currency used in node
FlagDefaultCurrency = cli.StringFlag{
Name: "default-currency",
Usage: "Default currency used in node and apps that depend on it",
Value: metadata.DefaultNetwork.DefaultCurrency,
Hidden: true, // Users are not meant to touch or see this.
}
// FlagDocsURL sets the URL which leads to node documentation.
FlagDocsURL = cli.StringFlag{
Name: "docs-url",
Usage: "URL leading to node documentation",
Value: "https://docs.mysterium.network",
Hidden: true,
}
// FlagDNSResolutionHeadstart sets the dns resolution head start for swarm dialer.
FlagDNSResolutionHeadstart = cli.DurationFlag{
Name: "dns-resolution-headstart",
Usage: "the headstart we give DNS lookups versus IP lookups",
Value: time.Millisecond * 1500,
Hidden: true,
}
// FlagResidentCountry sets the resident country
FlagResidentCountry = cli.StringFlag{
Name: "resident-country",
Usage: "set resident country. If not set initially a default country will be resolved.",
}
// FlagNATStatusPollInterval nat status poll interval in seconds
FlagNATStatusPollInterval = cli.DurationFlag{
Name: "nat-status.poll",
Usage: `NAT status poll interval. (lower than 1 minute will be set to 1 minute)`,
Value: 5 * time.Minute,
}
)
// RegisterFlagsNode function register node flags to flag list
func RegisterFlagsNode(flags *[]cli.Flag) error {
if err := RegisterFlagsDirectory(flags); err != nil {
return err
}
RegisterFlagsLocation(flags)
RegisterFlagsNetwork(flags)
RegisterFlagsTransactor(flags)
RegisterFlagsPayments(flags)
RegisterFlagsPolicy(flags)
RegisterFlagsMMN(flags)
RegisterFlagsPilvytis(flags)
RegisterFlagsChains(flags)
*flags = append(*flags,
&FlagBindAddress,
&FlagDiscoveryType,
&FlagDiscoveryPingInterval,
&FlagDiscoveryFetchInterval,
&FlagDHTAddress,
&FlagDHTPort,
&FlagDHTProtocol,
&FlagDHTBootstrapPeers,
&FlagFeedbackURL,
&FlagFirewallKillSwitch,
&FlagFirewallProtectedNetworks,
&FlagShaperEnabled,
&FlagShaperBandwidth,
&FlagKeystoreLightweight,
&FlagLogHTTP,
&FlagLogLevel,
&FlagVerbose,
&FlagOpenvpnBinary,
&FlagQualityType,
&FlagQualityAddress,
&FlagTequilapiAddress,
&FlagTequilapiPort,
&FlagTequilapiUsername,
&FlagTequilapiPassword,
&FlagPProfEnable,
&FlagUIEnable,
&FlagUIAddress,
&FlagUIPort,
&FlagUserMode,
&FlagVendorID,
&FlagP2PListenPorts,
&FlagConsumer,
&FlagDefaultCurrency,
&FlagDocsURL,
&FlagDNSResolutionHeadstart,
&FlagResidentCountry,
&FlagNATStatusPollInterval,
)
return nil
}
// ParseFlagsNode function fills in node options from CLI context
func ParseFlagsNode(ctx *cli.Context) {
ParseFlagsDirectory(ctx)
ParseFlagsLocation(ctx)
ParseFlagsNetwork(ctx)
ParseFlagsTransactor(ctx)
ParseFlagsPayments(ctx)
ParseFlagsPolicy(ctx)
ParseFlagsMMN(ctx)
ParseFlagPilvytis(ctx)
ParseFlagsChains(ctx)
Current.ParseStringFlag(ctx, FlagBindAddress)
Current.ParseStringSliceFlag(ctx, FlagDiscoveryType)
Current.ParseDurationFlag(ctx, FlagDiscoveryPingInterval)
Current.ParseDurationFlag(ctx, FlagDiscoveryFetchInterval)
Current.ParseStringFlag(ctx, FlagDHTAddress)
Current.ParseIntFlag(ctx, FlagDHTPort)
Current.ParseStringFlag(ctx, FlagDHTProtocol)
Current.ParseStringSliceFlag(ctx, FlagDHTBootstrapPeers)
Current.ParseStringFlag(ctx, FlagFeedbackURL)
Current.ParseBoolFlag(ctx, FlagFirewallKillSwitch)
Current.ParseStringFlag(ctx, FlagFirewallProtectedNetworks)
Current.ParseBoolFlag(ctx, FlagShaperEnabled)
Current.ParseUInt64Flag(ctx, FlagShaperBandwidth)
Current.ParseBoolFlag(ctx, FlagKeystoreLightweight)
Current.ParseBoolFlag(ctx, FlagLogHTTP)
Current.ParseBoolFlag(ctx, FlagVerbose)
Current.ParseStringFlag(ctx, FlagLogLevel)
Current.ParseStringFlag(ctx, FlagOpenvpnBinary)
Current.ParseStringFlag(ctx, FlagQualityAddress)
Current.ParseStringFlag(ctx, FlagQualityType)
Current.ParseStringFlag(ctx, FlagTequilapiAddress)
Current.ParseIntFlag(ctx, FlagTequilapiPort)
Current.ParseStringFlag(ctx, FlagTequilapiUsername)
Current.ParseStringFlag(ctx, FlagTequilapiPassword)
Current.ParseBoolFlag(ctx, FlagPProfEnable)
Current.ParseBoolFlag(ctx, FlagUIEnable)
Current.ParseStringFlag(ctx, FlagUIAddress)
Current.ParseIntFlag(ctx, FlagUIPort)
Current.ParseBoolFlag(ctx, FlagUserMode)
Current.ParseStringFlag(ctx, FlagVendorID)
Current.ParseStringFlag(ctx, FlagP2PListenPorts)
Current.ParseBoolFlag(ctx, FlagConsumer)
Current.ParseStringFlag(ctx, FlagDefaultCurrency)
Current.ParseStringFlag(ctx, FlagDocsURL)
Current.ParseDurationFlag(ctx, FlagDNSResolutionHeadstart)
Current.ParseDurationFlag(ctx, FlagNATStatusPollInterval)
ValidateAddressFlags(FlagTequilapiAddress)
}
// ValidateAddressFlags validates given address flags for public exposure
func ValidateAddressFlags(flags ...cli.StringFlag) {
for _, flag := range flags {
if flag.Value == "localhost" || flag.Value == "127.0.0.1" {
return
}
log.Warn().Msgf("Possible security vulnerability by flag `%s`, `%s` might be reachable from outside! "+
"Ensure its set to localhost or protected by firewall.", flag.Name, flag.Value)
}
}
| 1 | 17,301 | Can this be `Hidden: true` as we already have a bunch of flags that are printed on `--help` and this seems like an average person should not care about it. | mysteriumnetwork-node | go |
@@ -488,11 +488,13 @@ class Controller
$useCache = !Config::get('cms.twigNoCache');
$isDebugMode = Config::get('app.debug', false);
+ $strictVariables = (Config::get('cms.enableTwigStrictVariables', null) !== null) ?: $isDebugMode;
$forceBytecode = Config::get('cms.forceBytecodeInvalidation', false);
$options = [
'auto_reload' => true,
'debug' => $isDebugMode,
+ 'strict_variables' => $strictVariables,
];
if ($useCache) { | 1 | <?php namespace Cms\Classes;
use Cms;
use Url;
use Str;
use App;
use File;
use View;
use Lang;
use Flash;
use Config;
use Session;
use Request;
use Response;
use Exception;
use BackendAuth;
use Twig_Environment;
use Twig_Cache_Filesystem;
use Cms\Twig\Loader as TwigLoader;
use Cms\Twig\DebugExtension;
use Cms\Twig\Extension as CmsTwigExtension;
use Cms\Models\MaintenanceSetting;
use System\Models\RequestLog;
use System\Helpers\View as ViewHelper;
use System\Classes\ErrorHandler;
use System\Classes\CombineAssets;
use System\Twig\Extension as SystemTwigExtension;
use October\Rain\Exception\AjaxException;
use October\Rain\Exception\SystemException;
use October\Rain\Exception\ValidationException;
use October\Rain\Exception\ApplicationException;
use October\Rain\Parse\Bracket as TextParser;
use Illuminate\Http\RedirectResponse;
/**
* The CMS controller class.
* The controller finds and serves requested pages.
*
* @package october\cms
* @author Alexey Bobkov, Samuel Georges
*/
class Controller
{
use \System\Traits\AssetMaker;
use \System\Traits\EventEmitter;
/**
* @var \Cms\Classes\Theme A reference to the CMS theme processed by the controller.
*/
protected $theme;
/**
* @var \Cms\Classes\Router A reference to the Router object.
*/
protected $router;
/**
* @var \Cms\Twig\Loader A reference to the Twig template loader.
*/
protected $loader;
/**
* @var \Cms\Classes\Page A reference to the CMS page template being processed.
*/
protected $page;
/**
* @var \Cms\Classes\CodeBase A reference to the CMS page code section object.
*/
protected $pageObj;
/**
* @var \Cms\Classes\Layout A reference to the CMS layout template used by the page.
*/
protected $layout;
/**
* @var \Cms\Classes\CodeBase A reference to the CMS layout code section object.
*/
protected $layoutObj;
/**
* @var \Twig_Environment Keeps the Twig environment object.
*/
protected $twig;
/**
* @var string Contains the rendered page contents string.
*/
protected $pageContents;
/**
* @var array A list of variables to pass to the page.
*/
public $vars = [];
/**
* @var int Response status code
*/
protected $statusCode = 200;
/**
* @var self Cache of self
*/
protected static $instance = null;
/**
* @var \Cms\Classes\ComponentBase Object of the active component, used internally.
*/
protected $componentContext = null;
/**
* @var array Component partial stack, used internally.
*/
protected $partialStack = [];
/**
* Creates the controller.
* @param \Cms\Classes\Theme $theme Specifies the CMS theme.
* If the theme is not specified, the current active theme used.
*/
public function __construct($theme = null)
{
$this->theme = $theme ? $theme : Theme::getActiveTheme();
if (!$this->theme) {
throw new CmsException(Lang::get('cms::lang.theme.active.not_found'));
}
$this->assetPath = Config::get('cms.themesPath', '/themes').'/'.$this->theme->getDirName();
$this->router = new Router($this->theme);
$this->partialStack = new PartialStack;
$this->initTwigEnvironment();
self::$instance = $this;
}
/**
* Finds and serves the requested page.
* If the page cannot be found, returns the page with the URL /404.
* If the /404 page doesn't exist, returns the system 404 page.
* @param string $url Specifies the requested page URL.
* If the parameter is omitted, the current URL used.
* @return string Returns the processed page content.
*/
public function run($url = '/')
{
if ($url === null) {
$url = Request::path();
}
if (!strlen($url)) {
$url = '/';
}
/*
* Hidden page
*/
$page = $this->router->findByUrl($url);
if ($page && $page->is_hidden) {
if (!BackendAuth::getUser()) {
$page = null;
}
}
/*
* Maintenance mode
*/
if (
MaintenanceSetting::isConfigured() &&
MaintenanceSetting::get('is_enabled', false) &&
!BackendAuth::getUser()
) {
if (!Request::ajax()) {
$this->setStatusCode(503);
}
$page = Page::loadCached($this->theme, MaintenanceSetting::get('cms_page'));
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.beforeDisplay', [$url, $page])) {
if ($event instanceof Page) {
$page = $event;
}
else {
return $event;
}
}
/*
* If the page was not found, render the 404 page - either provided by the theme or the built-in one.
*/
if (!$page || $url === '404') {
if (!Request::ajax()) {
$this->setStatusCode(404);
}
// Log the 404 request
if (!App::runningUnitTests()) {
RequestLog::add();
}
if (!$page = $this->router->findByUrl('/404')) {
return Response::make(View::make('cms::404'), $this->statusCode);
}
}
/*
* Run the page
*/
$result = $this->runPage($page);
/*
* Post-processing
*/
$result = $this->postProcessResult($page, $url, $result);
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.display', [$url, $page, $result])) {
return $event;
}
if (!is_string($result)) {
return $result;
}
return Response::make($result, $this->statusCode);
}
/**
* Renders a page in its entirety, including component initialization.
* AJAX will be disabled for this process.
* @param string $pageFile Specifies the CMS page file name to run.
* @param array $parameters Routing parameters.
* @param \Cms\Classes\Theme $theme Theme object
*/
public static function render($pageFile, $parameters = [], $theme = null)
{
if (!$theme && (!$theme = Theme::getActiveTheme())) {
throw new CmsException(Lang::get('cms::lang.theme.active.not_found'));
}
$controller = new static($theme);
$controller->getRouter()->setParameters($parameters);
if (($page = Page::load($theme, $pageFile)) === null) {
throw new CmsException(Lang::get('cms::lang.page.not_found_name', ['name'=>$pageFile]));
}
return $controller->runPage($page, false);
}
/**
* Runs a page directly from its object and supplied parameters.
* @param \Cms\Classes\Page $page Specifies the CMS page to run.
* @return string
*/
public function runPage($page, $useAjax = true)
{
$this->page = $page;
/*
* If the page doesn't refer any layout, create the fallback layout.
* Otherwise load the layout specified in the page.
*/
if (!$page->layout) {
$layout = Layout::initFallback($this->theme);
}
elseif (($layout = Layout::loadCached($this->theme, $page->layout)) === null) {
throw new CmsException(Lang::get('cms::lang.layout.not_found_name', ['name'=>$page->layout]));
}
$this->layout = $layout;
/*
* The 'this' variable is reserved for default variables.
*/
$this->vars['this'] = [
'page' => $this->page,
'layout' => $this->layout,
'theme' => $this->theme,
'param' => $this->router->getParameters(),
'controller' => $this,
'environment' => App::environment(),
'session' => App::make('session'),
];
/*
* Check for the presence of validation errors in the session.
*/
$this->vars['errors'] = (Config::get('session.driver') && Session::has('errors'))
? Session::get('errors')
: new \Illuminate\Support\ViewErrorBag;
/*
* Handle AJAX requests and execute the life cycle functions
*/
$this->initCustomObjects();
$this->initComponents();
/*
* Give the layout and page an opportunity to participate
* after components are initialized and before AJAX is handled.
*/
if ($this->layoutObj) {
CmsException::mask($this->layout, 300);
$this->layoutObj->onInit();
CmsException::unmask();
}
CmsException::mask($this->page, 300);
$this->pageObj->onInit();
CmsException::unmask();
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.init', [$page])) {
return $event;
}
/*
* Execute AJAX event
*/
if ($useAjax && $ajaxResponse = $this->execAjaxHandlers()) {
return $ajaxResponse;
}
/*
* Execute postback handler
*/
if (
$useAjax &&
($handler = post('_handler')) &&
($this->verifyCsrfToken()) &&
($handlerResponse = $this->runAjaxHandler($handler)) &&
$handlerResponse !== true
) {
return $handlerResponse;
}
/*
* Execute page lifecycle
*/
if ($cycleResponse = $this->execPageCycle()) {
return $cycleResponse;
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.beforeRenderPage', [$page])) {
$this->pageContents = $event;
}
else {
/*
* Render the page
*/
CmsException::mask($this->page, 400);
$this->loader->setObject($this->page);
$template = $this->twig->loadTemplate($this->page->getFilePath());
$this->pageContents = $template->render($this->vars);
CmsException::unmask();
}
/*
* Render the layout
*/
CmsException::mask($this->layout, 400);
$this->loader->setObject($this->layout);
$template = $this->twig->loadTemplate($this->layout->getFilePath());
$result = $template->render($this->vars);
CmsException::unmask();
return $result;
}
/**
* Invokes the current page cycle without rendering the page,
* used by AJAX handler that may rely on the logic inside the action.
*/
public function pageCycle()
{
return $this->execPageCycle();
}
/**
* Executes the page life cycle.
* Creates an object from the PHP sections of the page and
* it's layout, then executes their life cycle functions.
*/
protected function execPageCycle()
{
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.start')) {
return $event;
}
/*
* Run layout functions
*/
if ($this->layoutObj) {
CmsException::mask($this->layout, 300);
$response = (
($result = $this->layoutObj->onStart()) ||
($result = $this->layout->runComponents()) ||
($result = $this->layoutObj->onBeforePageStart())
) ? $result : null;
CmsException::unmask();
if ($response) {
return $response;
}
}
/*
* Run page functions
*/
CmsException::mask($this->page, 300);
$response = (
($result = $this->pageObj->onStart()) ||
($result = $this->page->runComponents()) ||
($result = $this->pageObj->onEnd())
) ? $result : null;
CmsException::unmask();
if ($response) {
return $response;
}
/*
* Run remaining layout functions
*/
if ($this->layoutObj) {
CmsException::mask($this->layout, 300);
$response = ($result = $this->layoutObj->onEnd()) ? $result : null;
CmsException::unmask();
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.end')) {
return $event;
}
return $response;
}
/**
* Post-processes page HTML code before it's sent to the client.
* Note for pre-processing see cms.template.processTwigContent event.
* @param \Cms\Classes\Page $page Specifies the current CMS page.
* @param string $url Specifies the current URL.
* @param string $content The page markup to post processs.
* @return string Returns the updated result string.
*/
protected function postProcessResult($page, $url, $content)
{
$content = MediaViewHelper::instance()->processHtml($content);
$dataHolder = (object) ['content' => $content];
$this->fireSystemEvent('cms.page.postprocess', [$url, $page, $dataHolder]);
return $dataHolder->content;
}
//
// Initialization
//
/**
* Initializes the Twig environment and loader.
* Registers the \Cms\Twig\Extension object with Twig.
* @return void
*/
protected function initTwigEnvironment()
{
$this->loader = new TwigLoader;
$useCache = !Config::get('cms.twigNoCache');
$isDebugMode = Config::get('app.debug', false);
$forceBytecode = Config::get('cms.forceBytecodeInvalidation', false);
$options = [
'auto_reload' => true,
'debug' => $isDebugMode,
];
if ($useCache) {
$options['cache'] = new Twig_Cache_Filesystem(
storage_path().'/cms/twig',
$forceBytecode ? Twig_Cache_Filesystem::FORCE_BYTECODE_INVALIDATION : 0
);
}
$this->twig = new Twig_Environment($this->loader, $options);
$this->twig->addExtension(new CmsTwigExtension($this));
$this->twig->addExtension(new SystemTwigExtension);
if ($isDebugMode) {
$this->twig->addExtension(new DebugExtension($this));
}
}
/**
* Initializes the custom layout and page objects.
* @return void
*/
protected function initCustomObjects()
{
$this->layoutObj = null;
if (!$this->layout->isFallBack()) {
CmsException::mask($this->layout, 300);
$parser = new CodeParser($this->layout);
$this->layoutObj = $parser->source($this->page, $this->layout, $this);
CmsException::unmask();
}
CmsException::mask($this->page, 300);
$parser = new CodeParser($this->page);
$this->pageObj = $parser->source($this->page, $this->layout, $this);
CmsException::unmask();
}
/**
* Initializes the components for the layout and page.
* @return void
*/
protected function initComponents()
{
if (!$this->layout->isFallBack()) {
foreach ($this->layout->settings['components'] as $component => $properties) {
list($name, $alias) = strpos($component, ' ')
? explode(' ', $component)
: [$component, $component];
$this->addComponent($name, $alias, $properties, true);
}
}
foreach ($this->page->settings['components'] as $component => $properties) {
list($name, $alias) = strpos($component, ' ')
? explode(' ', $component)
: [$component, $component];
$this->addComponent($name, $alias, $properties);
}
/*
* Extensibility
*/
$this->fireSystemEvent('cms.page.initComponents', [$this->page, $this->layout]);
}
//
// AJAX
//
/**
* Returns the AJAX handler for the current request, if available.
* @return string
*/
public function getAjaxHandler()
{
if (!Request::ajax() || Request::method() != 'POST') {
return null;
}
if ($handler = Request::header('X_OCTOBER_REQUEST_HANDLER')) {
return trim($handler);
}
return null;
}
/**
* Executes the page, layout, component and plugin AJAX handlers.
* @return mixed Returns the AJAX Response object or null.
*/
protected function execAjaxHandlers()
{
if ($handler = $this->getAjaxHandler()) {
try {
/*
* Validate the handler name
*/
if (!preg_match('/^(?:\w+\:{2})?on[A-Z]{1}[\w+]*$/', $handler)) {
throw new CmsException(Lang::get('cms::lang.ajax_handler.invalid_name', ['name'=>$handler]));
}
/*
* Validate the handler partial list
*/
if ($partialList = trim(Request::header('X_OCTOBER_REQUEST_PARTIALS'))) {
$partialList = explode('&', $partialList);
foreach ($partialList as $partial) {
if (!preg_match('/^(?:\w+\:{2}|@)?[a-z0-9\_\-\.\/]+$/i', $partial)) {
throw new CmsException(Lang::get('cms::lang.partial.invalid_name', ['name'=>$partial]));
}
}
}
else {
$partialList = [];
}
$responseContents = [];
/*
* Execute the handler
*/
if (!$result = $this->runAjaxHandler($handler)) {
throw new CmsException(Lang::get('cms::lang.ajax_handler.not_found', ['name'=>$handler]));
}
/*
* Render partials and return the response as array that will be converted to JSON automatically.
*/
foreach ($partialList as $partial) {
$responseContents[$partial] = $this->renderPartial($partial);
}
/*
* If the handler returned a redirect, process the URL and dispose of it so
* framework.js knows to redirect the browser and not the request!
*/
if ($result instanceof RedirectResponse) {
$responseContents['X_OCTOBER_REDIRECT'] = $result->getTargetUrl();
$result = null;
}
/*
* No redirect is used, look for any flash messages
*/
elseif (Request::header('X_OCTOBER_REQUEST_FLASH') && Flash::check()) {
$responseContents['X_OCTOBER_FLASH_MESSAGES'] = Flash::all();
}
/*
* If the handler returned an array, we should add it to output for rendering.
* If it is a string, add it to the array with the key "result".
* If an object, pass it to Laravel as a response object.
*/
if (is_array($result)) {
$responseContents = array_merge($responseContents, $result);
}
elseif (is_string($result)) {
$responseContents['result'] = $result;
}
elseif (is_object($result)) {
return $result;
}
return Response::make($responseContents, $this->statusCode);
}
catch (ValidationException $ex) {
/*
* Handle validation errors
*/
$responseContents['X_OCTOBER_ERROR_FIELDS'] = $ex->getFields();
$responseContents['X_OCTOBER_ERROR_MESSAGE'] = $ex->getMessage();
throw new AjaxException($responseContents);
}
catch (Exception $ex) {
throw $ex;
}
}
return null;
}
/**
* Tries to find and run an AJAX handler in the page, layout, components and plugins.
* The method stops as soon as the handler is found.
* @param string $handler name of the ajax handler
* @return boolean Returns true if the handler was found. Returns false otherwise.
*/
protected function runAjaxHandler($handler)
{
/*
* Process Component handler
*/
if (strpos($handler, '::')) {
list($componentName, $handlerName) = explode('::', $handler);
$componentObj = $this->findComponentByName($componentName);
if ($componentObj && $componentObj->methodExists($handlerName)) {
$this->componentContext = $componentObj;
$result = $componentObj->runAjaxHandler($handlerName);
return ($result) ?: true;
}
}
/*
* Process code section handler
*/
else {
if (method_exists($this->pageObj, $handler)) {
$result = $this->pageObj->$handler();
return ($result) ?: true;
}
if (!$this->layout->isFallBack() && method_exists($this->layoutObj, $handler)) {
$result = $this->layoutObj->$handler();
return ($result) ?: true;
}
/*
* Cycle each component to locate a usable handler
*/
if (($componentObj = $this->findComponentByHandler($handler)) !== null) {
$this->componentContext = $componentObj;
$result = $componentObj->runAjaxHandler($handler);
return ($result) ?: true;
}
}
/*
* Generic handler that does nothing
*/
if ($handler == 'onAjax') {
return true;
}
return false;
}
//
// Rendering
//
/**
* Renders a requested page.
* The framework uses this method internally.
*/
public function renderPage()
{
$contents = $this->pageContents;
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.render', [$contents])) {
return $event;
}
return $contents;
}
/**
* Renders a requested partial.
* The framework uses this method internally.
* @param string $name The view to load.
* @param array $parameters Parameter variables to pass to the view.
* @param bool $throwException Throw an exception if the partial is not found.
* @return mixed Partial contents or false if not throwing an exception.
*/
public function renderPartial($name, $parameters = [], $throwException = true)
{
$vars = $this->vars;
$this->vars = array_merge($this->vars, $parameters);
/*
* Alias @ symbol for ::
*/
if (substr($name, 0, 1) == '@') {
$name = '::' . substr($name, 1);
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.beforeRenderPartial', [$name])) {
$partial = $event;
}
/*
* Process Component partial
*/
elseif (strpos($name, '::') !== false) {
list($componentAlias, $partialName) = explode('::', $name);
/*
* Component alias not supplied
*/
if (!strlen($componentAlias)) {
if ($this->componentContext !== null) {
$componentObj = $this->componentContext;
}
elseif (($componentObj = $this->findComponentByPartial($partialName)) === null) {
if ($throwException) {
throw new CmsException(Lang::get('cms::lang.partial.not_found_name', ['name'=>$partialName]));
}
else {
return false;
}
}
}
/*
* Component alias is supplied
*/
else {
if (($componentObj = $this->findComponentByName($componentAlias)) === null) {
if ($throwException) {
throw new CmsException(Lang::get('cms::lang.component.not_found', ['name'=>$componentAlias]));
}
else {
return false;
}
}
}
$partial = null;
$this->componentContext = $componentObj;
/*
* Check if the theme has an override
*/
if (strpos($partialName, '/') === false) {
$partial = ComponentPartial::loadOverrideCached($this->theme, $componentObj, $partialName);
}
/*
* Check the component partial
*/
if ($partial === null) {
$partial = ComponentPartial::loadCached($componentObj, $partialName);
}
if ($partial === null) {
if ($throwException) {
throw new CmsException(Lang::get('cms::lang.partial.not_found_name', ['name'=>$name]));
}
else {
return false;
}
}
/*
* Set context for self access
*/
$this->vars['__SELF__'] = $componentObj;
}
else {
/*
* Process theme partial
*/
if (($partial = Partial::loadCached($this->theme, $name)) === null) {
if ($throwException) {
throw new CmsException(Lang::get('cms::lang.partial.not_found_name', ['name'=>$name]));
}
else {
return false;
}
}
}
/*
* Run functions for CMS partials only (Cms\Classes\Partial)
*/
if ($partial instanceof Partial) {
$this->partialStack->stackPartial();
$manager = ComponentManager::instance();
foreach ($partial->settings['components'] as $component => $properties) {
// Do not inject the viewBag component to the environment.
// Not sure if they're needed there by the requirements,
// but there were problems with array-typed properties used by Static Pages
// snippets and setComponentPropertiesFromParams(). --ab
if ($component == 'viewBag') {
continue;
}
list($name, $alias) = strpos($component, ' ')
? explode(' ', $component)
: [$component, $component];
if (!$componentObj = $manager->makeComponent($name, $this->pageObj, $properties)) {
throw new CmsException(Lang::get('cms::lang.component.not_found', ['name'=>$name]));
}
$componentObj->alias = $alias;
$parameters[$alias] = $partial->components[$alias] = $componentObj;
$this->partialStack->addComponent($alias, $componentObj);
$this->setComponentPropertiesFromParams($componentObj, $parameters);
$componentObj->init();
}
CmsException::mask($this->page, 300);
$parser = new CodeParser($partial);
$partialObj = $parser->source($this->page, $this->layout, $this);
CmsException::unmask();
CmsException::mask($partial, 300);
$partialObj->onStart();
$partial->runComponents();
$partialObj->onEnd();
CmsException::unmask();
}
/*
* Render the partial
*/
CmsException::mask($partial, 400);
$this->loader->setObject($partial);
$template = $this->twig->loadTemplate($partial->getFilePath());
$partialContent = $template->render(array_merge($this->vars, $parameters));
CmsException::unmask();
if ($partial instanceof Partial) {
$this->partialStack->unstackPartial();
}
$this->vars = $vars;
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.renderPartial', [$name, &$partialContent])) {
return $event;
}
return $partialContent;
}
/**
* Renders a requested content file.
* The framework uses this method internally.
* @param string $name The content view to load.
* @param array $parameters Parameter variables to pass to the view.
* @return string
*/
public function renderContent($name, $parameters = [])
{
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.beforeRenderContent', [$name])) {
$content = $event;
}
/*
* Load content from theme
*/
elseif (($content = Content::loadCached($this->theme, $name)) === null) {
throw new CmsException(Lang::get('cms::lang.content.not_found_name', ['name'=>$name]));
}
$fileContent = $content->parsedMarkup;
/*
* Inject global view variables
*/
$globalVars = ViewHelper::getGlobalVars();
if (!empty($globalVars)) {
$parameters = (array) $parameters + $globalVars;
}
/*
* Parse basic template variables
*/
if (!empty($parameters)) {
$fileContent = TextParser::parse($fileContent, $parameters);
}
/*
* Extensibility
*/
if ($event = $this->fireSystemEvent('cms.page.renderContent', [$name, &$fileContent])) {
return $event;
}
return $fileContent;
}
/**
* Renders a component's default content, preserves the previous component context.
* @param $name
* @param array $parameters
* @return string Returns the component default contents.
*/
public function renderComponent($name, $parameters = [])
{
$result = null;
$previousContext = $this->componentContext;
if ($componentObj = $this->findComponentByName($name)) {
$componentObj->id = uniqid($name);
$componentObj->setProperties(array_merge($componentObj->getProperties(), $parameters));
$this->componentContext = $componentObj;
$result = $componentObj->onRender();
}
if (!$result) {
$result = $this->renderPartial($name.'::default', [], false);
}
$this->componentContext = $previousContext;
return $result;
}
//
// Setters
//
/**
* Sets the status code for the current web response.
* @param int $code Status code
* @return self
*/
public function setStatusCode($code)
{
$this->statusCode = (int) $code;
return $this;
}
//
// Getters
//
/**
* Returns the status code for the current web response.
* @return int Status code
*/
public function getStatusCode()
{
return $this->statusCode;
}
/**
* Returns an existing instance of the controller.
* If the controller doesn't exists, returns null.
* @return mixed Returns the controller object or null.
*/
public static function getController()
{
return self::$instance;
}
/**
* Returns the current CMS theme.
* @return \Cms\Classes\Theme
*/
public function getTheme()
{
return $this->theme;
}
/**
* Returns the Twig environment.
* @return Twig_Environment
*/
public function getTwig()
{
return $this->twig;
}
/**
* Returns the Twig loader.
* @return \Cms\Twig\Loader
*/
public function getLoader()
{
return $this->loader;
}
/**
* Returns the routing object.
* @return \Cms\Classes\Router
*/
public function getRouter()
{
return $this->router;
}
/**
* Intended to be called from the layout, returns the page code base object.
* @return \Cms\Classes\CodeBase
*/
public function getPageObject()
{
return $this->pageObj;
}
/**
* Returns the CMS page object being processed by the controller.
* The object is not available on the early stages of the controller
* initialization.
* @return \Cms\Classes\Page Returns the Page object or null.
*/
public function getPage()
{
return $this->page;
}
/**
* Intended to be called from the page, returns the layout code base object.
* @return \Cms\Classes\CodeBase
*/
public function getLayoutObject()
{
return $this->layoutObj;
}
//
// Page helpers
//
/**
* Looks up the URL for a supplied page and returns it relative to the website root.
*
* @param mixed $name Specifies the Cms Page file name.
* @param array $parameters Route parameters to consider in the URL.
* @param bool $routePersistence By default the existing routing parameters will be included
* @return string
*/
public function pageUrl($name, $parameters = [], $routePersistence = true)
{
if (!$name) {
return $this->currentPageUrl($parameters, $routePersistence);
}
/*
* Second parameter can act as third
*/
if (is_bool($parameters)) {
$routePersistence = $parameters;
}
if (!is_array($parameters)) {
$parameters = [];
}
if ($routePersistence) {
$parameters = array_merge($this->router->getParameters(), $parameters);
}
if (!$url = $this->router->findByFile($name, $parameters)) {
return null;
}
return Cms::url($url);
}
/**
* Looks up the current page URL with supplied parameters and route persistence.
* @param array $parameters
* @param bool $routePersistence
* @return null|string
*/
public function currentPageUrl($parameters = [], $routePersistence = true)
{
if (!$currentFile = $this->page->getFileName()) {
return null;
}
return $this->pageUrl($currentFile, $parameters, $routePersistence);
}
/**
* Converts supplied URL to a theme URL relative to the website root. If the URL provided is an
* array then the files will be combined.
* @param mixed $url Specifies the theme-relative URL. If null, the theme path is returned.
* @return string
*/
public function themeUrl($url = null)
{
$themeDir = $this->getTheme()->getDirName();
if (is_array($url)) {
$_url = Url::to(CombineAssets::combine($url, themes_path().'/'.$themeDir));
}
else {
$_url = Config::get('cms.themesPath', '/themes').'/'.$themeDir;
if ($url !== null) {
$_url .= '/'.$url;
}
$_url = Url::asset($_url);
}
return $_url;
}
/**
* Returns a routing parameter.
* @param string $name Routing parameter name.
* @param string $default Default to use if none is found.
* @return string
*/
public function param($name, $default = null)
{
return $this->router->getParameter($name, $default);
}
//
// Component helpers
//
/**
* Adds a component to the page object
* @param mixed $name Component class name or short name
* @param string $alias Alias to give the component
* @param array $properties Component properties
* @param bool $addToLayout Add to layout, instead of page
* @return ComponentBase Component object
*/
public function addComponent($name, $alias, $properties, $addToLayout = false)
{
$manager = ComponentManager::instance();
if ($addToLayout) {
if (!$componentObj = $manager->makeComponent($name, $this->layoutObj, $properties)) {
throw new CmsException(Lang::get('cms::lang.component.not_found', ['name'=>$name]));
}
$componentObj->alias = $alias;
$this->vars[$alias] = $this->layout->components[$alias] = $componentObj;
}
else {
if (!$componentObj = $manager->makeComponent($name, $this->pageObj, $properties)) {
throw new CmsException(Lang::get('cms::lang.component.not_found', ['name'=>$name]));
}
$componentObj->alias = $alias;
$this->vars[$alias] = $this->page->components[$alias] = $componentObj;
}
$this->setComponentPropertiesFromParams($componentObj);
$componentObj->init();
return $componentObj;
}
/**
* Searches the layout and page components by an alias
* @param $name
* @return ComponentBase The component object, if found
*/
public function findComponentByName($name)
{
if (isset($this->page->components[$name])) {
return $this->page->components[$name];
}
if (isset($this->layout->components[$name])) {
return $this->layout->components[$name];
}
$partialComponent = $this->partialStack->getComponent($name);
if ($partialComponent !== null) {
return $partialComponent;
}
return null;
}
/**
* Searches the layout and page components by an AJAX handler
* @param string $handler
* @return ComponentBase The component object, if found
*/
public function findComponentByHandler($handler)
{
foreach ($this->page->components as $component) {
if ($component->methodExists($handler)) {
return $component;
}
}
foreach ($this->layout->components as $component) {
if ($component->methodExists($handler)) {
return $component;
}
}
return null;
}
/**
* Searches the layout and page components by a partial file
* @param string $partial
* @return ComponentBase The component object, if found
*/
public function findComponentByPartial($partial)
{
foreach ($this->page->components as $component) {
if (ComponentPartial::check($component, $partial)) {
return $component;
}
}
foreach ($this->layout->components as $component) {
if (ComponentPartial::check($component, $partial)) {
return $component;
}
}
return null;
}
/**
* Set the component context manually, used by Components when calling renderPartial.
* @param ComponentBase $component
* @return void
*/
public function setComponentContext(ComponentBase $component = null)
{
$this->componentContext = $component;
}
/**
* Sets component property values from partial parameters.
* The property values should be defined as {{ param }}.
* @param ComponentBase $component The component object.
* @param array $parameters Specifies the partial parameters.
*/
protected function setComponentPropertiesFromParams($component, $parameters = [])
{
$properties = $component->getProperties();
$routerParameters = $this->router->getParameters();
foreach ($properties as $propertyName => $propertyValue) {
if (is_array($propertyValue)) {
continue;
}
$matches = [];
if (preg_match('/^\{\{([^\}]+)\}\}$/', $propertyValue, $matches)) {
$paramName = trim($matches[1]);
if (substr($paramName, 0, 1) == ':') {
$routeParamName = substr($paramName, 1);
$newPropertyValue = array_key_exists($routeParamName, $routerParameters)
? $routerParameters[$routeParamName]
: null;
}
else {
$newPropertyValue = array_key_exists($paramName, $parameters)
? $parameters[$paramName]
: null;
}
$component->setProperty($propertyName, $newPropertyValue);
$component->setExternalPropertyName($propertyName, $paramName);
}
}
}
//
// Security
//
/**
* Checks the request data / headers for a valid CSRF token.
* Returns false if a valid token is not found. Override this
* method to disable the check.
* @return bool
*/
protected function verifyCsrfToken()
{
if (!Config::get('cms.enableCsrfProtection')) {
return true;
}
if (in_array(Request::method(), ['HEAD', 'GET', 'OPTIONS'])) {
return true;
}
$token = Request::input('_token') ?: Request::header('X-CSRF-TOKEN');
if (!strlen($token)) {
return false;
}
return hash_equals(
Session::token(),
$token
);
}
}
| 1 | 13,026 | Wouldn't this set $strictVariables = true when Config::get returns false? | octobercms-october | php |
@@ -16,6 +16,7 @@ import javax.json.JsonReader;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import static junit.framework.Assert.assertEquals;
+import org.junit.Ignore;
import org.junit.Test;
import org.xml.sax.ErrorHandler;
import org.xml.sax.InputSource; | 1 | package edu.harvard.iq.dataverse.export;
import com.jayway.restassured.path.xml.XmlPath;
import edu.harvard.iq.dataverse.DatasetVersion;
import edu.harvard.iq.dataverse.util.xml.XmlPrinter;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.InputStream;
import java.io.StringReader;
import java.nio.file.Files;
import java.nio.file.Paths;
import javax.json.Json;
import javax.json.JsonObject;
import javax.json.JsonReader;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import static junit.framework.Assert.assertEquals;
import org.junit.Test;
import org.xml.sax.ErrorHandler;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
public class OpenAireExporterTest {
private final OpenAireExporter openAireExporter;
public OpenAireExporterTest() {
openAireExporter = new OpenAireExporter();
}
/**
* Test of getProviderName method, of class OpenAireExporter.
*/
@Test
public void testGetProviderName() {
System.out.println("getProviderName");
OpenAireExporter instance = new OpenAireExporter();
String expResult = "oai_datacite";
String result = instance.getProviderName();
assertEquals(expResult, result);
}
/**
* Test of getDisplayName method, of class OpenAireExporter.
*/
@Test
public void testGetDisplayName() {
System.out.println("getDisplayName");
OpenAireExporter instance = new OpenAireExporter();
String expResult = "OpenAIRE";
String result = instance.getDisplayName();
assertEquals(expResult, result);
}
/**
* Test of exportDataset method, of class OpenAireExporter.
*/
@Test
public void testExportDataset() throws Exception {
System.out.println("exportDataset");
File datasetVersionJson = new File("src/test/java/edu/harvard/iq/dataverse/export/dataset-spruce1.json");
String datasetVersionAsJson = new String(Files.readAllBytes(Paths.get(datasetVersionJson.getAbsolutePath())));
JsonReader jsonReader = Json.createReader(new StringReader(datasetVersionAsJson));
JsonObject jsonObject = jsonReader.readObject();
DatasetVersion nullVersion = null;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
openAireExporter.exportDataset(nullVersion, jsonObject, byteArrayOutputStream);
String xmlOnOneLine = new String(byteArrayOutputStream.toByteArray());
String xmlAsString = XmlPrinter.prettyPrintXml(xmlOnOneLine);
System.out.println("XML: " + xmlAsString);
XmlPath xmlpath = XmlPath.from(xmlAsString);
assertEquals("Spruce Goose", xmlpath.getString("resource.titles.title"));
assertEquals("Spruce, Sabrina", xmlpath.getString("resource.creators.creator.creatorName"));
assertEquals("1.0", xmlpath.getString("resource.version"));
}
/**
* Test of exportDataset method, of class OpenAireExporter.
*/
@Test
public void testValidateExportDataset() throws Exception {
System.out.println("validateExportDataset");
File datasetVersionJson = new File("src/test/java/edu/harvard/iq/dataverse/export/dataset-all-defaults.txt");
String datasetVersionAsJson = new String(Files.readAllBytes(Paths.get(datasetVersionJson.getAbsolutePath())));
JsonReader jsonReader = Json.createReader(new StringReader(datasetVersionAsJson));
JsonObject jsonObject = jsonReader.readObject();
DatasetVersion nullVersion = null;
ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
openAireExporter.exportDataset(nullVersion, jsonObject, byteArrayOutputStream);
{
String xmlOnOneLine = new String(byteArrayOutputStream.toByteArray());
String xmlAsString = XmlPrinter.prettyPrintXml(xmlOnOneLine);
System.out.println("XML: " + xmlAsString);
}
InputStream xmlStream = new ByteArrayInputStream(byteArrayOutputStream.toByteArray());
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
factory.setValidating(true);
factory.setNamespaceAware(true);
factory.setAttribute("http://java.sun.com/xml/jaxp/properties/schemaLanguage",
"http://www.w3.org/2001/XMLSchema");
DocumentBuilder builder = factory.newDocumentBuilder();
builder.setErrorHandler(new ErrorHandler() {
public void warning(SAXParseException e) throws SAXException {
throw new RuntimeException(e);
}
public void error(SAXParseException e) throws SAXException {
throw new RuntimeException(e);
}
public void fatalError(SAXParseException e) throws SAXException {
throw new RuntimeException(e);
}
});
builder.parse(new InputSource(xmlStream));
xmlStream.close();
}
/**
* Test of isXMLFormat method, of class OpenAireExporter.
*/
@Test
public void testIsXMLFormat() {
System.out.println("isXMLFormat");
OpenAireExporter instance = new OpenAireExporter();
Boolean expResult = true;
Boolean result = instance.isXMLFormat();
assertEquals(expResult, result);
}
/**
* Test of isHarvestable method, of class OpenAireExporter.
*/
@Test
public void testIsHarvestable() {
System.out.println("isHarvestable");
OpenAireExporter instance = new OpenAireExporter();
Boolean expResult = true;
Boolean result = instance.isHarvestable();
assertEquals(expResult, result);
}
/**
* Test of isAvailableToUsers method, of class OpenAireExporter.
*/
@Test
public void testIsAvailableToUsers() {
System.out.println("isAvailableToUsers");
OpenAireExporter instance = new OpenAireExporter();
Boolean expResult = true;
Boolean result = instance.isAvailableToUsers();
assertEquals(expResult, result);
}
/**
* Test of getXMLNameSpace method, of class OpenAireExporter.
*/
@Test
public void testGetXMLNameSpace() throws Exception {
System.out.println("getXMLNameSpace");
OpenAireExporter instance = new OpenAireExporter();
String expResult = "http://datacite.org/schema/kernel-4";
String result = instance.getXMLNameSpace();
assertEquals(expResult, result);
}
/**
* Test of getXMLSchemaLocation method, of class OpenAireExporter.
*/
@Test
public void testGetXMLSchemaLocation() throws Exception {
System.out.println("getXMLSchemaLocation");
OpenAireExporter instance = new OpenAireExporter();
String expResult = "http://schema.datacite.org/meta/kernel-4.1/metadata.xsd";
String result = instance.getXMLSchemaLocation();
assertEquals(expResult, result);
}
/**
* Test of getXMLSchemaVersion method, of class OpenAireExporter.
*/
@Test
public void testGetXMLSchemaVersion() throws Exception {
System.out.println("getXMLSchemaVersion");
OpenAireExporter instance = new OpenAireExporter();
String expResult = "4.1";
String result = instance.getXMLSchemaVersion();
assertEquals(expResult, result);
}
}
| 1 | 43,568 | was this meant to be checked in? | IQSS-dataverse | java |
@@ -156,4 +156,8 @@ const (
// attempt to reconnect a route, gateway or leaf node connection.
// The default is to report every attempt.
DEFAULT_RECONNECT_ERROR_REPORTS = 1
+
+ // DEFAULT_RTT_MEASUREMENT_INTERVAL is how often we want to measure RTT from
+ // this server to clients, routes, gateways or leafnode connections.
+ DEFAULT_RTT_MEASUREMENT_INTERVAL = time.Hour
) | 1 | // Copyright 2012-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"time"
)
// Command is a signal used to control a running nats-server process.
type Command string
// Valid Command values.
const (
CommandStop = Command("stop")
CommandQuit = Command("quit")
CommandReopen = Command("reopen")
CommandReload = Command("reload")
// private for now
commandLDMode = Command("ldm")
)
var (
// gitCommit injected at build
gitCommit string
// trustedKeys is a whitespace separated array of trusted operator's public nkeys.
trustedKeys string
)
const (
// VERSION is the current version for the server.
VERSION = "2.0.1"
// PROTO is the currently supported protocol.
// 0 was the original
// 1 maintains proto 0, adds echo abilities for CONNECT from the client. Clients
// should not send echo unless proto in INFO is >= 1.
PROTO = 1
// DEFAULT_PORT is the default port for client connections.
DEFAULT_PORT = 4222
// RANDOM_PORT is the value for port that, when supplied, will cause the
// server to listen on a randomly-chosen available port. The resolved port
// is available via the Addr() method.
RANDOM_PORT = -1
// DEFAULT_HOST defaults to all interfaces.
DEFAULT_HOST = "0.0.0.0"
// MAX_CONTROL_LINE_SIZE is the maximum allowed protocol control line size.
// 4k should be plenty since payloads sans connect/info string are separate.
MAX_CONTROL_LINE_SIZE = 4096
// MAX_PAYLOAD_SIZE is the maximum allowed payload size. Should be using
// something different if > 1MB payloads are needed.
MAX_PAYLOAD_SIZE = (1024 * 1024)
// MAX_PENDING_SIZE is the maximum outbound pending bytes per client.
MAX_PENDING_SIZE = (64 * 1024 * 1024)
// DEFAULT_MAX_CONNECTIONS is the default maximum connections allowed.
DEFAULT_MAX_CONNECTIONS = (64 * 1024)
// TLS_TIMEOUT is the TLS wait time.
TLS_TIMEOUT = 500 * time.Millisecond
// AUTH_TIMEOUT is the authorization wait time.
AUTH_TIMEOUT = 2 * TLS_TIMEOUT
// DEFAULT_PING_INTERVAL is how often pings are sent to clients and routes.
DEFAULT_PING_INTERVAL = 2 * time.Minute
// DEFAULT_PING_MAX_OUT is maximum allowed pings outstanding before disconnect.
DEFAULT_PING_MAX_OUT = 2
// CR_LF string
CR_LF = "\r\n"
// LEN_CR_LF hold onto the computed size.
LEN_CR_LF = len(CR_LF)
// DEFAULT_FLUSH_DEADLINE is the write/flush deadlines.
DEFAULT_FLUSH_DEADLINE = 2 * time.Second
// DEFAULT_HTTP_PORT is the default monitoring port.
DEFAULT_HTTP_PORT = 8222
// ACCEPT_MIN_SLEEP is the minimum acceptable sleep times on temporary errors.
ACCEPT_MIN_SLEEP = 10 * time.Millisecond
// ACCEPT_MAX_SLEEP is the maximum acceptable sleep times on temporary errors
ACCEPT_MAX_SLEEP = 1 * time.Second
// DEFAULT_ROUTE_CONNECT Route solicitation intervals.
DEFAULT_ROUTE_CONNECT = 1 * time.Second
// DEFAULT_ROUTE_RECONNECT Route reconnect intervals.
DEFAULT_ROUTE_RECONNECT = 1 * time.Second
// DEFAULT_ROUTE_DIAL Route dial timeout.
DEFAULT_ROUTE_DIAL = 1 * time.Second
// DEFAULT_LEAF_NODE_RECONNECT LeafNode reconnect interval.
DEFAULT_LEAF_NODE_RECONNECT = time.Second
// DEFAULT_LEAF_TLS_TIMEOUT TLS timeout for LeafNodes
DEFAULT_LEAF_TLS_TIMEOUT = 2 * time.Second
// PROTO_SNIPPET_SIZE is the default size of proto to print on parse errors.
PROTO_SNIPPET_SIZE = 32
// MAX_MSG_ARGS Maximum possible number of arguments from MSG proto.
MAX_MSG_ARGS = 4
// MAX_PUB_ARGS Maximum possible number of arguments from PUB proto.
MAX_PUB_ARGS = 3
// DEFAULT_MAX_CLOSED_CLIENTS is the maximum number of closed connections we hold onto.
DEFAULT_MAX_CLOSED_CLIENTS = 10000
// DEFAULT_MAX_ACCOUNT_AE_RESPONSE_MAPS is for auto-expire response maps for imports.
DEFAULT_MAX_ACCOUNT_AE_RESPONSE_MAPS = 100000
// DEFAULT_TTL_AE_RESPONSE_MAP is the default time to expire auto-response map entries.
DEFAULT_TTL_AE_RESPONSE_MAP = 10 * time.Minute
// DEFAULT_LAME_DUCK_DURATION is the time in which the server spreads
// the closing of clients when signaled to go in lame duck mode.
DEFAULT_LAME_DUCK_DURATION = 2 * time.Minute
// DEFAULT_LEAFNODE_INFO_WAIT Route dial timeout.
DEFAULT_LEAFNODE_INFO_WAIT = 1 * time.Second
// DEFAULT_CONNECT_ERROR_REPORTS is the number of attempts at which a
// repeated failed route, gateway or leaf node connection is reported.
// This is used for initial connection, that is, when the server has
// never had a connection to the given endpoint. Once connected, and
// if a disconnect occurs, DEFAULT_RECONNECT_ERROR_REPORTS is used
// instead.
// The default is to report every 3600 attempts (roughly every hour).
DEFAULT_CONNECT_ERROR_REPORTS = 3600
// DEFAULT_RECONNECT_ERROR_REPORTS is the default number of failed
// attempt to reconnect a route, gateway or leaf node connection.
// The default is to report every attempt.
DEFAULT_RECONNECT_ERROR_REPORTS = 1
)
| 1 | 9,127 | check spaces after const name... | nats-io-nats-server | go |
@@ -67,7 +67,10 @@ func (agent *ecsAgent) appendVolumeDriverCapabilities(capabilities []*ecs.Attrib
func (agent *ecsAgent) appendNvidiaDriverVersionAttribute(capabilities []*ecs.Attribute) []*ecs.Attribute {
if agent.resourceFields != nil && agent.resourceFields.NvidiaGPUManager != nil {
- capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityNvidiaDriverVersionInfix+agent.resourceFields.NvidiaGPUManager.GetDriverVersion())
+ driverVersion := agent.resourceFields.NvidiaGPUManager.GetDriverVersion()
+ if driverVersion != "" {
+ capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityNvidiaDriverVersionInfix+driverVersion)
+ }
}
return capabilities
} | 1 | // +build linux
// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package app
import (
"strings"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi"
"github.com/aws/amazon-ecs-agent/agent/ecs_client/model/ecs"
"github.com/aws/amazon-ecs-agent/agent/taskresource/volume"
"github.com/cihub/seelog"
)
func (agent *ecsAgent) appendVolumeDriverCapabilities(capabilities []*ecs.Attribute) []*ecs.Attribute {
// "local" is default docker driver
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityDockerPluginInfix+volume.DockerLocalVolumeDriver)
// for non-standardized plugins, call docker pkg's plugins.Scan()
nonStandardizedPlugins, err := agent.mobyPlugins.Scan()
if err != nil {
seelog.Warnf("Scanning plugins failed: %v", err)
// do not return yet, we need the list of plugins below. range handles nil slice.
}
for _, pluginName := range nonStandardizedPlugins {
// Replace the ':' to '.' in the plugin name for attributes
capabilities = appendNameOnlyAttribute(capabilities,
attributePrefix+capabilityDockerPluginInfix+strings.Replace(pluginName, config.DockerTagSeparator, attributeSeparator, -1))
}
// for standardized plugins, call docker's plugin ls API
pluginEnabled := true
volumeDriverType := []string{dockerapi.VolumeDriverType}
standardizedPlugins, err := agent.dockerClient.ListPluginsWithFilters(agent.ctx, pluginEnabled, volumeDriverType, dockerapi.ListPluginsTimeout)
if err != nil {
seelog.Warnf("Listing plugins with filters enabled=%t, capabilities=%v failed: %v", pluginEnabled, volumeDriverType, err)
return capabilities
}
// For plugin with default tag latest, register two attributes with and without the latest tag
// as the tag is optional and can be added by docker or customer
for _, pluginName := range standardizedPlugins {
names := strings.Split(pluginName, config.DockerTagSeparator)
if len(names) > 1 && names[len(names)-1] == config.DefaultDockerTag {
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityDockerPluginInfix+strings.Join(names[:len(names)-1], attributeSeparator))
}
capabilities = appendNameOnlyAttribute(capabilities,
attributePrefix+capabilityDockerPluginInfix+strings.Replace(pluginName, config.DockerTagSeparator, attributeSeparator, -1))
}
return capabilities
}
func (agent *ecsAgent) appendNvidiaDriverVersionAttribute(capabilities []*ecs.Attribute) []*ecs.Attribute {
if agent.resourceFields != nil && agent.resourceFields.NvidiaGPUManager != nil {
capabilities = appendNameOnlyAttribute(capabilities, attributePrefix+capabilityNvidiaDriverVersionInfix+agent.resourceFields.NvidiaGPUManager.GetDriverVersion())
}
return capabilities
}
| 1 | 21,771 | If the GPU setting is enabled, resourceFields.NvidiaGPUManager will be nil, right? | aws-amazon-ecs-agent | go |
@@ -363,7 +363,11 @@ HELP
when 'command'
executor.run_command(nodes, options[:object])
when 'script'
- executor.run_script(nodes, options[:object], options[:leftovers])
+ script = options[:object]
+ unless file_exist?(script)
+ raise Bolt::CLIError, "The script '#{script}' does not exist"
+ end
+ executor.run_script(nodes, script, options[:leftovers])
when 'task'
task_name = options[:object]
| 1 | require 'uri'
require 'optparse'
require 'benchmark'
require 'logger'
require 'json'
require 'bolt/node'
require 'bolt/version'
require 'bolt/executor'
require 'bolt/config'
require 'io/console'
module Bolt
class CLIError < RuntimeError
attr_reader :error_code
def initialize(msg, error_code: 1)
super(msg)
@error_code = error_code
end
end
class CLIExit < StandardError; end
class CLI
BANNER = <<-HELP.freeze
Usage: bolt <subcommand> <action> [options]
Available subcommands:
bolt command run <command> Run a command remotely
bolt script run <script> Upload a local script and run it remotely
bolt task run <task> [params] Run a Puppet task
bolt plan run <plan> [params] Run a Puppet task plan
bolt file upload <src> <dest> Upload a local file
where [options] are:
HELP
TASK_HELP = <<-HELP.freeze
Usage: bolt task <action> <task> [options] [parameters]
Available actions are:
run Run a Puppet task
Parameters are of the form <parameter>=<value>.
Available options are:
HELP
COMMAND_HELP = <<-HELP.freeze
Usage: bolt command <action> <command> [options]
Available actions are:
run Run a command remotely
Available options are:
HELP
SCRIPT_HELP = <<-HELP.freeze
Usage: bolt script <action> <script> [[arg1] ... [argN]] [options]
Available actions are:
run Upload a local script and run it remotely
Available options are:
HELP
PLAN_HELP = <<-HELP.freeze
Usage: bolt plan <action> <plan> [options] [parameters]
Available actions are:
run Run a Puppet task plan
Parameters are of the form <parameter>=<value>.
Available options are:
HELP
FILE_HELP = <<-HELP.freeze
Usage: bolt file <action> [options]
Available actions are:
upload <src> <dest> Upload local file <src> to <dest> on each node
Available options are:
HELP
MODES = %w[command script task plan file].freeze
ACTIONS = %w[run upload download].freeze
TRANSPORTS = %w[ssh winrm pcp].freeze
attr_reader :parser
attr_accessor :options
def initialize(argv)
@argv = argv
@options = {
nodes: [],
transport: 'ssh',
insecure: false
}
@parser = create_option_parser(@options)
end
def create_option_parser(results)
OptionParser.new('') do |opts|
opts.on(
'-n', '--nodes NODES',
'Node(s) to connect to in URI format [protocol://]host[:port]',
'Eg. --nodes bolt.puppet.com',
'Eg. --nodes localhost,ssh://nix.com:2222,winrm://windows.puppet.com',
"\n",
'* NODES can either be comma-separated, \'@<file>\' to read',
'* nodes from a file, or \'-\' to read from stdin',
'* Windows nodes must specify protocol with winrm://',
'* protocol is `ssh` by default, may be `ssh` or `winrm`',
'* port is `22` by default for SSH, `5985` for winrm (Optional)'
) do |nodes|
results[:nodes] += parse_nodes(nodes)
results[:nodes].uniq!
end
opts.on('-u', '--user USER',
"User to authenticate as (Optional)") do |user|
results[:user] = user
end
opts.on('-p', '--password [PASSWORD]',
'Password to authenticate with (Optional).',
'Omit the value to prompt for the password.') do |password|
if password.nil?
STDOUT.print "Please enter your password: "
results[:password] = STDIN.noecho(&:gets).chomp
STDOUT.puts
else
results[:password] = password
end
end
results[:concurrency] = 100
opts.on('-c', '--concurrency CONCURRENCY', Integer,
"Maximum number of simultaneous connections " \
"(Optional, defaults to 100)") do |concurrency|
results[:concurrency] = concurrency
end
opts.on('--modulepath MODULES',
"List of directories containing modules, " \
"separated by #{File::PATH_SEPARATOR}") do |modulepath|
results[:modulepath] = modulepath.split(File::PATH_SEPARATOR)
end
opts.on('--params PARAMETERS',
"Parameters to a task or plan") do |params|
results[:task_options] = parse_params(params)
end
opts.on('-k', '--insecure',
"Whether to connect insecurely ") do |insecure|
results[:insecure] = insecure
end
opts.on('--transport TRANSPORT', TRANSPORTS,
"Specify a default transport: #{TRANSPORTS.join(', ')}") do |t|
options[:transport] = t
end
opts.on_tail('--[no-]tty',
"Request a pseudo TTY on nodes that support it") do |tty|
results[:tty] = tty
end
opts.on_tail('-h', '--help', 'Display help') do |_|
results[:help] = true
end
opts.on_tail('--verbose', 'Display verbose logging') do |_|
results[:verbose] = true
end
opts.on_tail('--debug', 'Display debug logging') do |_|
results[:debug] = true
end
opts.on_tail('--version', 'Display the version') do |_|
puts Bolt::VERSION
raise Bolt::CLIExit
end
end
end
def parse
Bolt.log_level = Logger::WARN
if @argv.empty?
options[:help] = true
end
remaining = handle_parser_errors do
parser.permute(@argv)
end
options[:mode] = remaining.shift
if options[:mode] == 'help'
options[:help] = true
options[:mode] = remaining.shift
end
options[:action] = remaining.shift
options[:object] = remaining.shift
if options[:debug]
Bolt.log_level = Logger::DEBUG
elsif options[:verbose]
Bolt.log_level = Logger::INFO
end
if options[:help]
print_help(options[:mode])
raise Bolt::CLIExit
end
task_options, remaining = remaining.partition { |s| s =~ /.+=/ }
if options[:task_options]
unless task_options.empty?
raise Bolt::CLIError,
"Parameters must be specified through either the --params " \
"option or param=value pairs, not both"
end
else
options[:task_options] = Hash[task_options.map { |a| a.split('=', 2) }]
end
options[:leftovers] = remaining
validate(options)
options
end
def print_help(mode)
parser.banner = case mode
when 'task'
TASK_HELP
when 'command'
COMMAND_HELP
when 'script'
SCRIPT_HELP
when 'file'
FILE_HELP
when 'plan'
PLAN_HELP
else
BANNER
end
puts parser.help
end
def parse_nodes(nodes)
list = get_arg_input(nodes)
list.split(/[[:space:],]+/).reject(&:empty?).uniq
end
def parse_params(params)
json = get_arg_input(params)
JSON.parse(json)
rescue JSON::ParserError => err
raise Bolt::CLIError, "Unable to parse --params value as JSON: #{err}"
end
def get_arg_input(value)
if value.start_with?('@')
file = value.sub(/^@/, '')
read_arg_file(file)
elsif value == '-'
STDIN.read
else
value
end
end
def read_arg_file(file)
File.read(file)
rescue StandardError => err
raise Bolt::CLIError, "Error attempting to read #{file}: #{err}"
end
def validate(options)
unless MODES.include?(options[:mode])
raise Bolt::CLIError,
"Expected subcommand '#{options[:mode]}' to be one of " \
"#{MODES.join(', ')}"
end
if options[:action].nil?
raise Bolt::CLIError,
"Expected an action of the form 'bolt #{options[:mode]} <action>'"
end
unless ACTIONS.include?(options[:action])
raise Bolt::CLIError,
"Expected action '#{options[:action]}' to be one of " \
"#{ACTIONS.join(', ')}"
end
if options[:mode] != 'file' && options[:mode] != 'script' &&
!options[:leftovers].empty?
raise Bolt::CLIError,
"Unknown argument(s) #{options[:leftovers].join(', ')}"
end
if %w[task plan].include?(options[:mode])
if options[:object].nil?
raise Bolt::CLIError, "Must specify a #{options[:mode]} to run"
end
# This may mean that we parsed a parameter as the object
unless options[:object] =~ /\A([a-z][a-z0-9_]*)?(::[a-z][a-z0-9_]*)*\Z/
raise Bolt::CLIError,
"Invalid #{options[:mode]} '#{options[:object]}'"
end
end
unless !options[:nodes].empty? || options[:mode] == 'plan'
raise Bolt::CLIError, "Option '--nodes' must be specified"
end
if %w[task plan].include?(options[:mode]) && options[:modulepath].nil?
raise Bolt::CLIError,
"Option '--modulepath' must be specified when running" \
" a task or plan"
end
end
def handle_parser_errors
yield
rescue OptionParser::MissingArgument => e
raise Bolt::CLIError, "Option '#{e.args.first}' needs a parameter"
rescue OptionParser::InvalidOption => e
raise Bolt::CLIError, "Unknown argument '#{e.args.first}'"
end
def execute(options)
if options[:mode] == 'plan' || options[:mode] == 'task'
begin
require_relative '../../vendored/require_vendored'
rescue LoadError
raise Bolt::CLIError, "Puppet must be installed to execute tasks"
end
Puppet::Util::Log.newdestination(:console)
Puppet[:log_level] = if Bolt.log_level == Logger::DEBUG
'debug'
else
'notice'
end
end
config = Bolt::Config.new(concurrency: options[:concurrency],
user: options[:user],
password: options[:password],
tty: options[:tty],
insecure: options[:insecure],
transport: options[:transport])
executor = Bolt::Executor.new(config)
if options[:mode] == 'plan'
execute_plan(executor, options)
else
nodes = executor.from_uris(options[:nodes])
results = nil
elapsed_time = Benchmark.realtime do
results =
case options[:mode]
when 'command'
executor.run_command(nodes, options[:object])
when 'script'
executor.run_script(nodes, options[:object], options[:leftovers])
when 'task'
task_name = options[:object]
path, metadata = load_task_data(task_name, options[:modulepath])
input_method = metadata['input_method']
input_method ||= 'both'
executor.run_task(
nodes, path, input_method, options[:task_options]
)
when 'file'
src = options[:object]
dest = options[:leftovers].first
if dest.nil?
raise Bolt::CLIError, "A destination path must be specified"
elsif !file_exist?(src)
raise Bolt::CLIError, "The source file '#{src}' does not exist"
end
executor.file_upload(nodes, src, dest)
end
end
print_results(results, elapsed_time)
end
end
def execute_plan(executor, options)
result = Puppet.override(bolt_executor: executor) do
run_plan(options[:object],
options[:task_options],
options[:modulepath])
end
puts result
rescue Puppet::Error
raise Bolt::CLIError, "Exiting because of an error in Puppet code"
end
def colorize(result, stream)
color = result.success? ? "\033[32m" : "\033[31m"
stream.print color if stream.isatty
yield
stream.print "\033[0m" if stream.isatty
end
def print_results(results, elapsed_time)
results.each_pair do |node, result|
colorize(result, $stdout) { $stdout.puts "#{node.host}:" }
$stdout.puts
$stdout.puts result.message
$stdout.puts
end
$stdout.puts format("Ran on %d node%s in %.2f seconds",
results.size,
results.size > 1 ? 's' : '',
elapsed_time)
end
def file_exist?(path)
File.exist?(path)
end
def load_task_data(name, modulepath)
module_name, file_name = name.split('::', 2)
file_name ||= 'init'
begin
env = Puppet::Node::Environment.create('bolt', modulepath)
Puppet.override(environments: Puppet::Environments::Static.new(env)) do
data = Puppet::InfoService::TaskInformationService.task_data(
env.name, module_name, name
)
file = data[:files].find { |f| File.basename(f, '.*') == file_name }
if file.nil?
raise Bolt::CLIError, "Failed to load task file for '#{name}'"
end
metadata =
if data[:metadata_file]
JSON.parse(File.read(data[:metadata_file]))
else
{}
end
[file, metadata]
end
rescue Puppet::Module::Task::TaskNotFound
raise Bolt::CLIError,
"Could not find task '#{name}' in module '#{module_name}'"
rescue Puppet::Module::MissingModule
# Generate message so we don't expose "bolt environment"
raise Bolt::CLIError, "Could not find module '#{module_name}'"
end
end
def run_plan(plan, args, modulepath)
Dir.mktmpdir('bolt') do |dir|
cli = []
Puppet::Settings::REQUIRED_APP_SETTINGS.each do |setting|
cli << "--#{setting}" << dir
end
Puppet.initialize_settings(cli)
Puppet::Pal.in_tmp_environment('bolt', modulepath: modulepath) do |pal|
puts pal.run_plan(plan, plan_args: args)
end
end
end
end
end
| 1 | 6,831 | We should probably verify that it's readable too | puppetlabs-bolt | rb |
@@ -355,7 +355,7 @@ namespace Microsoft.CodeAnalysis.Sarif.Converters
throw new InvalidDataException("Expected key value before dictionary data.");
}
- string value = xmlReader.ReadElementContentAsString();
+ xmlReader.ReadElementContentAsString();
readerMoved = true;
keyName = string.Empty;
break; | 1 | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Xml;
using Microsoft.CodeAnalysis.Sarif.Writers;
namespace Microsoft.CodeAnalysis.Sarif.Converters
{
internal class ClangAnalyzerConverter : IToolFileConverter
{
private IList<object> _files = null;
/// <summary>Convert a Clang plist report into the SARIF format.</summary>
/// <exception cref="ArgumentNullException">Thrown when one or more required arguments are null.</exception>
/// <param name="input">CLang log file stream.</param>
/// <param name="output">Result log writer.</param>
public void Convert(Stream input, IResultLogWriter output)
{
// ToDo remove this comment after all issues are resolved.
// Rodney is tasked with bringing Clang analyzer results into the SARIF fold.
// Once this work is complete, he can close the following task:
// http://twcsec-tfs01:8080/tfs/DefaultCollection/SecDevTools/_workitems#_a=edit&id=13409
if (input == null)
{
throw new ArgumentNullException(nameof(input));
}
if (output == null)
{
throw new ArgumentNullException(nameof(output));
}
try
{
XmlReaderSettings settings = new XmlReaderSettings();
settings.IgnoreWhitespace = true;
settings.DtdProcessing = DtdProcessing.Ignore;
var results = new List<Result>();
using (XmlReader xmlReader = XmlReader.Create(input, settings))
{
XmlNodeType nodeType = xmlReader.MoveToContent();
xmlReader.ReadStartElement(ClangSchemaStrings.PlistName);
if (xmlReader.NodeType == XmlNodeType.Element)
{
using (var pListReader = xmlReader.ReadSubtree())
{
this.ReadPlist(pListReader, results);
}
}
}
var tool = new Tool
{
Name = "Clang"
};
var fileInfoFactory = new FileInfoFactory(MimeType.DetermineFromFileExtension);
Dictionary<string, FileData> fileDictionary = fileInfoFactory.Create(results);
output.Initialize(id: null, correlationId: null);
output.WriteTool(tool);
if (fileDictionary != null && fileDictionary.Count > 0) { output.WriteFiles(fileDictionary); }
output.OpenResults();
output.WriteResults(results);
output.CloseResults();
}
finally
{
_files = null;
}
}
private static IDictionary<string, object> FindDictionary(IDictionary<string, object> dictionary, string key)
{
object getObject;
Dictionary<string, object> value = null;
if (dictionary.TryGetValue(key, out getObject))
{
value = getObject as Dictionary<string, object>;
}
return value ?? new Dictionary<string, object>();
}
private static int FindInt(IDictionary<string, object> dictionary, string key)
{
object getObject;
string value = null;
int returnValue = 0;
if (dictionary.TryGetValue(key, out getObject))
{
value = getObject as string;
if (!int.TryParse(value, out returnValue))
{
throw new InvalidDataException("Expected an int value for " + key + " found : " + value);
}
}
return returnValue;
}
private static string FindString(IDictionary<string, object> dictionary, string key)
{
object getObject;
string value = null;
if (dictionary.TryGetValue(key, out getObject))
{
value = getObject as string;
}
return value ?? string.Empty;
}
private Result CreateResult(IDictionary<string, object> issueData)
{
if (issueData != null)
{
// Used for Result.FullMessage
string description = FindString(issueData, "description");
// Used as rule id.
string issueType = FindString(issueData, "type");
// This data persisted to result property bag
string category = FindString(issueData, "category");
string issueContextKind = FindString(issueData, "issue_context_kind");
string issueContext = FindString(issueData, "issue_context");
string issueHash = FindString(issueData, "issue_hash");
int issueLine = 0;
int issueColumn = 0;
string fileName = null;
IDictionary<string, object> location = FindDictionary(issueData, "location");
if (location != null)
{
issueLine = FindInt(location, "line");
issueColumn = FindInt(location, "col");
int fileNumber = FindInt(location, "file");
if (_files != null && fileNumber < _files.Count)
{
fileName = _files[fileNumber] as string;
}
}
var result = new Result
{
RuleId = issueType,
Message = description,
Locations = new List<Location>
{
new Location
{
AnalysisTarget = new PhysicalLocation
{
Uri = new Uri(fileName, UriKind.RelativeOrAbsolute),
Region = new Region()
{
StartLine = issueLine,
StartColumn = issueColumn
}
}
}
},
};
result.SetProperty("category", category);
result.SetProperty("issue_context_kind", issueContextKind);
result.SetProperty("issueContext", issueContext);
result.SetProperty("issueHash", issueHash);
return result;
}
else
{
return null;
}
}
private static IList<object> ReadArray(XmlReader xmlReader)
{
List<object> list = new List<object>();
bool readerMoved = false; // ReadElementContentAsString moves the reader so prevent double moves.
xmlReader.Read(); // Read past the "array" element start.
while (readerMoved || xmlReader.Read())
{
readerMoved = false;
if (xmlReader.NodeType == XmlNodeType.Element)
{
switch (xmlReader.Name)
{
case ClangSchemaStrings.StringName:
case ClangSchemaStrings.IntegerName:
case ClangSchemaStrings.RealName:
case ClangSchemaStrings.DataName:
case ClangSchemaStrings.DateName:
{
string value = xmlReader.ReadElementContentAsString();
readerMoved = true;
list.Add(value);
break;
}
case ClangSchemaStrings.ArrayName:
{
using (var subTreeReader = xmlReader.ReadSubtree())
{
IList<object> array = ReadArray(subTreeReader);
list.Add(array);
}
break;
}
case ClangSchemaStrings.DictionaryName:
{
using (var subTreeReader = xmlReader.ReadSubtree())
{
IDictionary<string, object> dictionary = ReadDictionary(subTreeReader);
list.Add(dictionary);
}
break;
}
}
}
if (xmlReader.NodeType == XmlNodeType.EndElement && (xmlReader.Name == ClangSchemaStrings.ArrayName))
{
break;
}
}
return list;
}
private static IDictionary<string, object> ReadDictionary(XmlReader xmlReader)
{
IDictionary<string, object> dictionary = new Dictionary<string, object>();
string keyName = string.Empty;
bool readerMoved = false; // ReadElementContentAsString reads to next element
xmlReader.Read(); // read past the dictionary element;
while (readerMoved || xmlReader.Read())
{
readerMoved = false;
if (xmlReader.NodeType == XmlNodeType.Element)
{
switch (xmlReader.Name)
{
case ClangSchemaStrings.KeyName:
{
keyName = xmlReader.ReadElementContentAsString();
readerMoved = true;
break;
}
case ClangSchemaStrings.StringName:
case ClangSchemaStrings.IntegerName:
case ClangSchemaStrings.RealName:
case ClangSchemaStrings.DataName:
case ClangSchemaStrings.DateName:
{
if (string.IsNullOrEmpty(keyName))
{
throw new InvalidDataException("Expected key value before dictionary data.");
}
string value = xmlReader.ReadElementContentAsString();
readerMoved = true;
dictionary.Add(keyName, value);
keyName = string.Empty;
break;
}
case ClangSchemaStrings.ArrayName:
{
if (string.IsNullOrEmpty(keyName))
{
throw new InvalidDataException("Expected key value before dictionary data.");
}
using (var subTreeReader = xmlReader.ReadSubtree())
{
IList<object> array = ReadArray(subTreeReader);
dictionary.Add(keyName, array);
keyName = string.Empty;
}
break;
}
case ClangSchemaStrings.DictionaryName:
{
if (string.IsNullOrEmpty(keyName))
{
throw new InvalidDataException("Expected key value before dictionary data.");
}
using (var subTreeReader = xmlReader.ReadSubtree())
{
IDictionary<string, object> child = ReadDictionary(subTreeReader);
dictionary.Add(keyName, child);
keyName = string.Empty;
}
break;
}
}
}
else if (xmlReader.NodeType == XmlNodeType.EndElement && xmlReader.Name == ClangSchemaStrings.DictionaryName)
{
break;
}
}
return dictionary;
}
private void ReadPlistDictionary(XmlReader xmlReader, IList<Result> results)
{
string keyName = string.Empty;
bool readerMoved = false; // ReadElementContentAsString reads to next element
xmlReader.Read(); // read past the dictionary element;
while (readerMoved || xmlReader.Read())
{
readerMoved = false;
if (xmlReader.NodeType == XmlNodeType.Element)
{
switch (xmlReader.Name)
{
case ClangSchemaStrings.KeyName:
{
keyName = xmlReader.ReadElementContentAsString();
readerMoved = true;
break;
}
case ClangSchemaStrings.StringName:
{
if (string.IsNullOrEmpty(keyName))
{
throw new InvalidDataException("Expected key value before dictionary data.");
}
string value = xmlReader.ReadElementContentAsString();
readerMoved = true;
keyName = string.Empty;
break;
}
case ClangSchemaStrings.ArrayName:
{
if (string.IsNullOrEmpty(keyName))
{
throw new InvalidDataException("Expected key value before dictionary data.");
}
using (var subTreeReader = xmlReader.ReadSubtree())
{
if (keyName.Equals("files"))
{
_files = ReadArray(subTreeReader);
}
if (keyName.Equals("diagnostics"))
{
ReadDiagnostics(subTreeReader, results);
}
keyName = string.Empty;
}
break;
}
}
}
else if (xmlReader.NodeType == XmlNodeType.EndElement && xmlReader.Name == ClangSchemaStrings.DictionaryName)
{
break;
}
}
}
private void ReadDiagnostics(XmlReader xmlReader, IList<Result> results)
{
xmlReader.Read(); // Read past the "array" element start.
while (xmlReader.Read())
{
if (xmlReader.NodeType == XmlNodeType.Element)
{
if (xmlReader.Name.Equals(ClangSchemaStrings.DictionaryName))
{
using (var subTreeReader = xmlReader.ReadSubtree())
{
IDictionary<string, object> dictionary = ReadDictionary(subTreeReader);
Result result = this.CreateResult(dictionary);
if (result != null)
{
results.Add(result);
}
}
}
}
if (xmlReader.NodeType == XmlNodeType.EndElement && xmlReader.Name == ClangSchemaStrings.ArrayName)
{
break;
}
}
}
private void ReadPlist(XmlReader xmlReader, IList<Result> results)
{
while (xmlReader.Read())
{
if (xmlReader.NodeType == XmlNodeType.Element)
{
if (xmlReader.Name.Equals(ClangSchemaStrings.DictionaryName))
{
using (var subTreeReader = xmlReader.ReadSubtree())
{
this.ReadPlistDictionary(subTreeReader, results);
}
}
}
}
}
private static class ClangSchemaStrings
{
public const string ArrayName = "array";
public const string DataName = "data";
public const string DateName = "date";
public const string DictionaryName = "dict";
public const string IntegerName = "integer";
public const string KeyName = "key";
public const string PlistName = "plist";
public const string RealName = "real";
public const string StringName = "string";
public const string VersionName = "version";
}
}
} | 1 | 10,967 | See, here's an example where you removed the variable but kept the call, which makes sense. | microsoft-sarif-sdk | .cs |
@@ -78,12 +78,12 @@ public class FeedParserTask implements Callable<FeedHandlerResult> {
}
if (successful) {
- downloadStatus = new DownloadStatus(feed, feed.getHumanReadableIdentifier(),
- DownloadError.SUCCESS, successful, reasonDetailed);
+ downloadStatus = new DownloadStatus(feed, feed.getHumanReadableIdentifier(), DownloadError.SUCCESS,
+ successful, reasonDetailed, request.isInitiatedByUser());
return result;
} else {
- downloadStatus = new DownloadStatus(feed, request.getTitle(),
- reason, successful, reasonDetailed);
+ downloadStatus = new DownloadStatus(feed, feed.getHumanReadableIdentifier(), reason, successful,
+ reasonDetailed, request.isInitiatedByUser());
return null;
}
} | 1 | package de.danoeh.antennapod.core.service.download.handler;
import android.util.Log;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedPreferences;
import de.danoeh.antennapod.core.feed.VolumeAdaptionSetting;
import de.danoeh.antennapod.core.service.download.DownloadRequest;
import de.danoeh.antennapod.core.service.download.DownloadStatus;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import de.danoeh.antennapod.core.syndication.handler.FeedHandler;
import de.danoeh.antennapod.core.syndication.handler.FeedHandlerResult;
import de.danoeh.antennapod.core.syndication.handler.UnsupportedFeedtypeException;
import de.danoeh.antennapod.core.util.DownloadError;
import de.danoeh.antennapod.core.util.InvalidFeedException;
import org.xml.sax.SAXException;
import javax.xml.parsers.ParserConfigurationException;
import java.io.File;
import java.io.IOException;
import java.util.Date;
import java.util.concurrent.Callable;
public class FeedParserTask implements Callable<FeedHandlerResult> {
private static final String TAG = "FeedParserTask";
private final DownloadRequest request;
private DownloadStatus downloadStatus;
private boolean successful = true;
public FeedParserTask(DownloadRequest request) {
this.request = request;
}
@Override
public FeedHandlerResult call() {
Feed feed = new Feed(request.getSource(), request.getLastModified());
feed.setFile_url(request.getDestination());
feed.setId(request.getFeedfileId());
feed.setDownloaded(true);
feed.setPreferences(new FeedPreferences(0, true, FeedPreferences.AutoDeleteAction.GLOBAL,
VolumeAdaptionSetting.OFF, request.getUsername(), request.getPassword()));
feed.setPageNr(request.getArguments().getInt(DownloadRequester.REQUEST_ARG_PAGE_NR, 0));
DownloadError reason = null;
String reasonDetailed = null;
FeedHandler feedHandler = new FeedHandler();
FeedHandlerResult result = null;
try {
result = feedHandler.parseFeed(feed);
Log.d(TAG, feed.getTitle() + " parsed");
if (!checkFeedData(feed)) {
throw new InvalidFeedException();
}
} catch (SAXException | IOException | ParserConfigurationException e) {
successful = false;
e.printStackTrace();
reason = DownloadError.ERROR_PARSER_EXCEPTION;
reasonDetailed = e.getMessage();
} catch (UnsupportedFeedtypeException e) {
e.printStackTrace();
successful = false;
reason = DownloadError.ERROR_UNSUPPORTED_TYPE;
reasonDetailed = e.getMessage();
} catch (InvalidFeedException e) {
e.printStackTrace();
successful = false;
reason = DownloadError.ERROR_PARSER_EXCEPTION;
reasonDetailed = e.getMessage();
} finally {
File feedFile = new File(request.getDestination());
if (feedFile.exists()) {
boolean deleted = feedFile.delete();
Log.d(TAG, "Deletion of file '" + feedFile.getAbsolutePath() + "' "
+ (deleted ? "successful" : "FAILED"));
}
}
if (successful) {
downloadStatus = new DownloadStatus(feed, feed.getHumanReadableIdentifier(),
DownloadError.SUCCESS, successful, reasonDetailed);
return result;
} else {
downloadStatus = new DownloadStatus(feed, request.getTitle(),
reason, successful, reasonDetailed);
return null;
}
}
public boolean isSuccessful() {
return successful;
}
/**
* Checks if the feed was parsed correctly.
*/
private boolean checkFeedData(Feed feed) {
if (feed.getTitle() == null) {
Log.e(TAG, "Feed has no title.");
return false;
}
if (!hasValidFeedItems(feed)) {
Log.e(TAG, "Feed has invalid items");
return false;
}
return true;
}
private boolean hasValidFeedItems(Feed feed) {
for (FeedItem item : feed.getItems()) {
if (item.getTitle() == null) {
Log.e(TAG, "Item has no title");
return false;
}
if (item.getPubDate() == null) {
Log.e(TAG, "Item has no pubDate. Using current time as pubDate");
if (item.getTitle() != null) {
Log.e(TAG, "Title of invalid item: " + item.getTitle());
}
item.setPubDate(new Date());
}
}
return true;
}
public DownloadStatus getDownloadStatus() {
return downloadStatus;
}
}
| 1 | 15,788 | Please use `request.getTitle()` instead of `feed.getHumanReadableIdentifier()`: In this case, `feed` does not have a human readable title yet | AntennaPod-AntennaPod | java |
@@ -254,7 +254,7 @@ func waitForClef(logger logging.Logger, maxRetries uint64, endpoint string) (ext
return nil, err
}
maxRetries--
- logger.Errorf("cannot connect to clef signer: %v", err)
+ logger.Warningf("failing to connect to clef signer: %v", err)
time.Sleep(5 * time.Second)
} | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmd
import (
"bytes"
"context"
"crypto/ecdsa"
"fmt"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/accounts/external"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethersphere/bee"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/crypto/clef"
"github.com/ethersphere/bee/pkg/keystore"
filekeystore "github.com/ethersphere/bee/pkg/keystore/file"
memkeystore "github.com/ethersphere/bee/pkg/keystore/mem"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/node"
"github.com/ethersphere/bee/pkg/resolver/multiresolver"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/kardianos/service"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const (
serviceName = "SwarmBeeSvc"
)
func (c *command) initStartCmd() (err error) {
cmd := &cobra.Command{
Use: "start",
Short: "Start a Swarm node",
RunE: func(cmd *cobra.Command, args []string) (err error) {
if len(args) > 0 {
return cmd.Help()
}
var logger logging.Logger
switch v := strings.ToLower(c.config.GetString(optionNameVerbosity)); v {
case "0", "silent":
logger = logging.New(ioutil.Discard, 0)
case "1", "error":
logger = logging.New(cmd.OutOrStdout(), logrus.ErrorLevel)
case "2", "warn":
logger = logging.New(cmd.OutOrStdout(), logrus.WarnLevel)
case "3", "info":
logger = logging.New(cmd.OutOrStdout(), logrus.InfoLevel)
case "4", "debug":
logger = logging.New(cmd.OutOrStdout(), logrus.DebugLevel)
case "5", "trace":
logger = logging.New(cmd.OutOrStdout(), logrus.TraceLevel)
default:
return fmt.Errorf("unknown verbosity level %q", v)
}
isWindowsService, err := isWindowsService()
if err != nil {
return fmt.Errorf("failed to determine if we are running in service: %w", err)
}
if isWindowsService {
var err error
logger, err = createWindowsEventLogger(serviceName, logger)
if err != nil {
return fmt.Errorf("failed to create windows logger %w", err)
}
}
// If the resolver is specified, resolve all connection strings
// and fail on any errors.
var resolverCfgs []multiresolver.ConnectionConfig
resolverEndpoints := c.config.GetStringSlice(optionNameResolverEndpoints)
if len(resolverEndpoints) > 0 {
resolverCfgs, err = multiresolver.ParseConnectionStrings(resolverEndpoints)
if err != nil {
return err
}
}
beeASCII := `
Welcome to the Swarm.... Bzzz Bzzzz Bzzzz
\ /
\ o ^ o /
\ ( ) /
____________(%%%%%%%)____________
( / / )%%%%%%%( \ \ )
(___/___/__/ \__\___\___)
( / /(%%%%%%%)\ \ )
(__/___/ (%%%%%%%) \___\__)
/( )\
/ (%%%%%) \
(%%%)
! `
fmt.Println(beeASCII)
logger.Infof("version: %v", bee.Version)
debugAPIAddr := c.config.GetString(optionNameDebugAPIAddr)
if !c.config.GetBool(optionNameDebugAPIEnable) {
debugAPIAddr = ""
}
signerConfig, err := c.configureSigner(cmd, logger)
if err != nil {
return err
}
b, err := node.NewBee(c.config.GetString(optionNameP2PAddr), signerConfig.address, *signerConfig.publicKey, signerConfig.signer, c.config.GetUint64(optionNameNetworkID), logger, signerConfig.libp2pPrivateKey, signerConfig.pssPrivateKey, node.Options{
DataDir: c.config.GetString(optionNameDataDir),
DBCapacity: c.config.GetUint64(optionNameDBCapacity),
APIAddr: c.config.GetString(optionNameAPIAddr),
DebugAPIAddr: debugAPIAddr,
Addr: c.config.GetString(optionNameP2PAddr),
NATAddr: c.config.GetString(optionNameNATAddr),
EnableWS: c.config.GetBool(optionNameP2PWSEnable),
EnableQUIC: c.config.GetBool(optionNameP2PQUICEnable),
WelcomeMessage: c.config.GetString(optionWelcomeMessage),
Bootnodes: c.config.GetStringSlice(optionNameBootnodes),
CORSAllowedOrigins: c.config.GetStringSlice(optionCORSAllowedOrigins),
Standalone: c.config.GetBool(optionNameStandalone),
TracingEnabled: c.config.GetBool(optionNameTracingEnabled),
TracingEndpoint: c.config.GetString(optionNameTracingEndpoint),
TracingServiceName: c.config.GetString(optionNameTracingServiceName),
Logger: logger,
GlobalPinningEnabled: c.config.GetBool(optionNameGlobalPinningEnabled),
PaymentThreshold: c.config.GetString(optionNamePaymentThreshold),
PaymentTolerance: c.config.GetString(optionNamePaymentTolerance),
PaymentEarly: c.config.GetString(optionNamePaymentEarly),
ResolverConnectionCfgs: resolverCfgs,
GatewayMode: c.config.GetBool(optionNameGatewayMode),
SwapEndpoint: c.config.GetString(optionNameSwapEndpoint),
SwapFactoryAddress: c.config.GetString(optionNameSwapFactoryAddress),
SwapInitialDeposit: c.config.GetString(optionNameSwapInitialDeposit),
SwapEnable: c.config.GetBool(optionNameSwapEnable),
})
if err != nil {
return err
}
// Wait for termination or interrupt signals.
// We want to clean up things at the end.
interruptChannel := make(chan os.Signal, 1)
signal.Notify(interruptChannel, syscall.SIGINT, syscall.SIGTERM)
p := &program{
start: func() {
// Block main goroutine until it is interrupted
sig := <-interruptChannel
logger.Debugf("received signal: %v", sig)
logger.Info("shutting down")
},
stop: func() {
// Shutdown
done := make(chan struct{})
go func() {
defer close(done)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
if err := b.Shutdown(ctx); err != nil {
logger.Errorf("shutdown: %v", err)
}
}()
// If shutdown function is blocking too long,
// allow process termination by receiving another signal.
select {
case sig := <-interruptChannel:
logger.Debugf("received signal: %v", sig)
case <-done:
}
},
}
if isWindowsService {
s, err := service.New(p, &service.Config{
Name: serviceName,
DisplayName: "Bee",
Description: "Bee, Swarm client.",
})
if err != nil {
return err
}
if err = s.Run(); err != nil {
return err
}
} else {
// start blocks until some interrupt is received
p.start()
p.stop()
}
return nil
},
PreRunE: func(cmd *cobra.Command, args []string) error {
return c.config.BindPFlags(cmd.Flags())
},
}
c.setAllFlags(cmd)
c.root.AddCommand(cmd)
return nil
}
type program struct {
start func()
stop func()
}
func (p *program) Start(s service.Service) error {
// Start should not block. Do the actual work async.
go p.start()
return nil
}
func (p *program) Stop(s service.Service) error {
p.stop()
return nil
}
type signerConfig struct {
signer crypto.Signer
address swarm.Address
publicKey *ecdsa.PublicKey
libp2pPrivateKey *ecdsa.PrivateKey
pssPrivateKey *ecdsa.PrivateKey
}
func waitForClef(logger logging.Logger, maxRetries uint64, endpoint string) (externalSigner *external.ExternalSigner, err error) {
for {
externalSigner, err = external.NewExternalSigner(endpoint)
if err == nil {
return externalSigner, nil
}
if maxRetries == 0 {
return nil, err
}
maxRetries--
logger.Errorf("cannot connect to clef signer: %v", err)
time.Sleep(5 * time.Second)
}
}
func (c *command) configureSigner(cmd *cobra.Command, logger logging.Logger) (config *signerConfig, err error) {
var keystore keystore.Service
if c.config.GetString(optionNameDataDir) == "" {
keystore = memkeystore.New()
logger.Warning("data directory not provided, keys are not persisted")
} else {
keystore = filekeystore.New(filepath.Join(c.config.GetString(optionNameDataDir), "keys"))
}
var signer crypto.Signer
var address swarm.Address
var password string
var publicKey *ecdsa.PublicKey
if p := c.config.GetString(optionNamePassword); p != "" {
password = p
} else if pf := c.config.GetString(optionNamePasswordFile); pf != "" {
b, err := ioutil.ReadFile(pf)
if err != nil {
return nil, err
}
password = string(bytes.Trim(b, "\n"))
} else {
// if libp2p key exists we can assume all required keys exist
// so prompt for a password to unlock them
// otherwise prompt for new password with confirmation to create them
exists, err := keystore.Exists("libp2p")
if err != nil {
return nil, err
}
if exists {
password, err = terminalPromptPassword(cmd, c.passwordReader, "Password")
if err != nil {
return nil, err
}
} else {
password, err = terminalPromptCreatePassword(cmd, c.passwordReader)
if err != nil {
return nil, err
}
}
}
if c.config.GetBool(optionNameClefSignerEnable) {
endpoint := c.config.GetString(optionNameClefSignerEndpoint)
if endpoint == "" {
endpoint, err = clef.DefaultIpcPath()
if err != nil {
return nil, err
}
}
externalSigner, err := waitForClef(logger, 5, endpoint)
if err != nil {
return nil, err
}
clefRPC, err := rpc.Dial(endpoint)
if err != nil {
return nil, err
}
wantedAddress := c.config.GetString(optionNameClefSignerEthereumAddress)
var overlayEthAddress *common.Address = nil
// if wantedAddress was specified use that, otherwise clef account 0 will be selected.
if wantedAddress != "" {
ethAddress := common.HexToAddress(wantedAddress)
overlayEthAddress = ðAddress
}
signer, err = clef.NewSigner(externalSigner, clefRPC, crypto.Recover, overlayEthAddress)
if err != nil {
return nil, err
}
publicKey, err = signer.PublicKey()
if err != nil {
return nil, err
}
address, err = crypto.NewOverlayAddress(*publicKey, c.config.GetUint64(optionNameNetworkID))
if err != nil {
return nil, err
}
logger.Infof("using swarm network address through clef: %s", address)
} else {
logger.Warning("clef is not enabled; portability and security of your keys is sub optimal")
swarmPrivateKey, created, err := keystore.Key("swarm", password)
if err != nil {
return nil, fmt.Errorf("swarm key: %w", err)
}
signer = crypto.NewDefaultSigner(swarmPrivateKey)
publicKey = &swarmPrivateKey.PublicKey
address, err = crypto.NewOverlayAddress(*publicKey, c.config.GetUint64(optionNameNetworkID))
if err != nil {
return nil, err
}
if created {
logger.Infof("new swarm network address created: %s", address)
} else {
logger.Infof("using existing swarm network address: %s", address)
}
}
logger.Infof("swarm public key %x", crypto.EncodeSecp256k1PublicKey(publicKey))
libp2pPrivateKey, created, err := keystore.Key("libp2p", password)
if err != nil {
return nil, fmt.Errorf("libp2p key: %w", err)
}
if created {
logger.Debugf("new libp2p key created")
} else {
logger.Debugf("using existing libp2p key")
}
pssPrivateKey, created, err := keystore.Key("pss", password)
if err != nil {
return nil, fmt.Errorf("pss key: %w", err)
}
if created {
logger.Debugf("new pss key created")
} else {
logger.Debugf("using existing pss key")
}
logger.Infof("pss public key %x", crypto.EncodeSecp256k1PublicKey(&pssPrivateKey.PublicKey))
// postinst and post scripts inside packaging/{deb,rpm} depend and parse on this log output
overlayEthAddress, err := signer.EthereumAddress()
if err != nil {
return nil, err
}
logger.Infof("using ethereum address %x", overlayEthAddress)
return &signerConfig{
signer: signer,
address: address,
publicKey: publicKey,
libp2pPrivateKey: libp2pPrivateKey,
pssPrivateKey: pssPrivateKey,
}, nil
}
| 1 | 14,161 | I think the wording on the left is better | ethersphere-bee | go |
@@ -69,7 +69,7 @@ func Mux(pattern string, mux *http.ServeMux) InboundOption {
// YARPC.
func Interceptor(interceptor func(yarpcHandler http.Handler) http.Handler) InboundOption {
return func(i *Inbound) {
- i.interceptor = interceptor
+ i.interceptors = append(i.interceptors, interceptor)
}
}
| 1 | // Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"context"
"net"
"net/http"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/api/x/introspection"
intnet "go.uber.org/yarpc/internal/net"
"go.uber.org/yarpc/pkg/lifecycle"
"go.uber.org/yarpc/yarpcerrors"
"go.uber.org/zap"
)
// We want a value that's around 5 seconds, but slightly higher than how
// long a successful HTTP shutdown can take.
// There's a specific path in the HTTP shutdown path that can take 5 seconds:
// https://golang.org/src/net/http/server.go?s=83923:83977#L2710
// This avoids timeouts in shutdown caused by new idle connections, without
// making the timeout too large.
const defaultShutdownTimeout = 6 * time.Second
// InboundOption customizes the behavior of an HTTP Inbound constructed with
// NewInbound.
type InboundOption func(*Inbound)
func (InboundOption) httpOption() {}
// Mux specifies that the HTTP server should make the YARPC endpoint available
// under the given pattern on the given ServeMux. By default, the YARPC
// service is made available on all paths of the HTTP server. By specifying a
// ServeMux, users can narrow the endpoints under which the YARPC service is
// available and offer their own non-YARPC endpoints.
func Mux(pattern string, mux *http.ServeMux) InboundOption {
return func(i *Inbound) {
i.mux = mux
i.muxPattern = pattern
}
}
// Interceptor specifies a function which can wrap the YARPC handler. If
// provided, this function will be called with an http.Handler which will
// route requests through YARPC. The http.Handler returned by this function
// may delegate requests to the provided YARPC handler to route them through
// YARPC.
func Interceptor(interceptor func(yarpcHandler http.Handler) http.Handler) InboundOption {
return func(i *Inbound) {
i.interceptor = interceptor
}
}
// GrabHeaders specifies additional headers that are not prefixed with
// ApplicationHeaderPrefix that should be propagated to the caller.
//
// All headers given must begin with x- or X- or the Inbound that the
// returned option is passed to will return an error when Start is called.
//
// Headers specified with GrabHeaders are case-insensitive.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
func GrabHeaders(headers ...string) InboundOption {
return func(i *Inbound) {
for _, header := range headers {
i.grabHeaders[strings.ToLower(header)] = struct{}{}
}
}
}
// ShutdownTimeout specifies the maximum duration the inbound should wait for
// closing idle connections, and pending calls to complete.
//
// Set to 0 to wait for a complete drain.
//
// Defaults to 5 seconds.
func ShutdownTimeout(timeout time.Duration) InboundOption {
return func(i *Inbound) {
i.shutdownTimeout = timeout
}
}
// NewInbound builds a new HTTP inbound that listens on the given address and
// sharing this transport.
func (t *Transport) NewInbound(addr string, opts ...InboundOption) *Inbound {
i := &Inbound{
once: lifecycle.NewOnce(),
addr: addr,
shutdownTimeout: defaultShutdownTimeout,
tracer: t.tracer,
logger: t.logger,
transport: t,
grabHeaders: make(map[string]struct{}),
bothResponseError: true,
}
for _, opt := range opts {
opt(i)
}
return i
}
// Inbound receives YARPC requests using an HTTP server. It may be constructed
// using the NewInbound method on the Transport.
type Inbound struct {
addr string
mux *http.ServeMux
muxPattern string
server *intnet.HTTPServer
shutdownTimeout time.Duration
router transport.Router
tracer opentracing.Tracer
logger *zap.Logger
transport *Transport
grabHeaders map[string]struct{}
interceptor func(http.Handler) http.Handler
once *lifecycle.Once
// should only be false in testing
bothResponseError bool
}
// Tracer configures a tracer on this inbound.
func (i *Inbound) Tracer(tracer opentracing.Tracer) *Inbound {
i.tracer = tracer
return i
}
// SetRouter configures a router to handle incoming requests.
// This satisfies the transport.Inbound interface, and would be called
// by a dispatcher when it starts.
func (i *Inbound) SetRouter(router transport.Router) {
i.router = router
}
// Transports returns the inbound's HTTP transport.
func (i *Inbound) Transports() []transport.Transport {
return []transport.Transport{i.transport}
}
// Start starts the inbound with a given service detail, opening a listening
// socket.
func (i *Inbound) Start() error {
return i.once.Start(i.start)
}
func (i *Inbound) start() error {
if i.router == nil {
return yarpcerrors.Newf(yarpcerrors.CodeInternal, "no router configured for transport inbound")
}
for header := range i.grabHeaders {
if !strings.HasPrefix(header, "x-") {
return yarpcerrors.Newf(yarpcerrors.CodeInvalidArgument, "header %s does not begin with 'x-'", header)
}
}
var httpHandler http.Handler = handler{
router: i.router,
tracer: i.tracer,
grabHeaders: i.grabHeaders,
bothResponseError: i.bothResponseError,
logger: i.logger,
}
if i.interceptor != nil {
httpHandler = i.interceptor(httpHandler)
}
if i.mux != nil {
i.mux.Handle(i.muxPattern, httpHandler)
httpHandler = i.mux
}
i.server = intnet.NewHTTPServer(&http.Server{
Addr: i.addr,
Handler: httpHandler,
})
if err := i.server.ListenAndServe(); err != nil {
return err
}
i.addr = i.server.Listener().Addr().String() // in case it changed
i.logger.Info("started HTTP inbound", zap.String("address", i.addr))
if len(i.router.Procedures()) == 0 {
i.logger.Warn("no procedures specified for HTTP inbound")
}
return nil
}
// Stop the inbound using Shutdown.
func (i *Inbound) Stop() error {
ctx, cancel := context.WithTimeout(context.Background(), i.shutdownTimeout)
defer cancel()
return i.shutdown(ctx)
}
// shutdown the inbound, closing the listening socket, closing idle
// connections, and waiting for all pending calls to complete.
func (i *Inbound) shutdown(ctx context.Context) error {
return i.once.Stop(func() error {
if i.server == nil {
return nil
}
return i.server.Shutdown(ctx)
})
}
// IsRunning returns whether the inbound is currently running
func (i *Inbound) IsRunning() bool {
return i.once.IsRunning()
}
// Addr returns the address on which the server is listening. Returns nil if
// Start has not been called yet.
func (i *Inbound) Addr() net.Addr {
if i.server == nil {
return nil
}
listener := i.server.Listener()
if listener == nil {
return nil
}
return listener.Addr()
}
// Introspect returns the state of the inbound for introspection purposes.
func (i *Inbound) Introspect() introspection.InboundStatus {
state := "Stopped"
if i.IsRunning() {
state = "Started"
}
var addrString string
if addr := i.Addr(); addr != nil {
addrString = addr.String()
}
return introspection.InboundStatus{
Transport: "http",
Endpoint: addrString,
State: state,
}
}
| 1 | 19,757 | Please update the method described to point out that it maintains a chain of interceptors and they will be called in the same order passed in the options. | yarpc-yarpc-go | go |
@@ -37,9 +37,16 @@ namespace Datadog.Trace.ClrProfiler.Managed.Loader
}
var path = Path.Combine(ManagedProfilerDirectory, $"{assemblyName}.dll");
+
if (File.Exists(path))
{
- StartupLogger.Debug("Loading {0}", path);
+ if (args.Name.StartsWith("Datadog.Trace, Version=") && args.Name != AssemblyName)
+ {
+ StartupLogger.Debug("Trying to load {0} which does not match the expected version ({1})", args.Name, AssemblyName);
+ return null;
+ }
+
+ StartupLogger.Debug("Resolving {0}, loading {1}", args.Name, path);
return Assembly.LoadFrom(path);
}
| 1 | // <copyright file="Startup.NetFramework.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
#if NETFRAMEWORK
using System;
using System.IO;
using System.Reflection;
namespace Datadog.Trace.ClrProfiler.Managed.Loader
{
/// <summary>
/// A class that attempts to load the Datadog.Trace .NET assembly.
/// </summary>
public partial class Startup
{
private static string ResolveManagedProfilerDirectory()
{
var tracerHomeDirectory = ReadEnvironmentVariable("DD_DOTNET_TRACER_HOME") ?? string.Empty;
return Path.Combine(tracerHomeDirectory, "net461");
}
private static Assembly AssemblyResolve_ManagedProfilerDependencies(object sender, ResolveEventArgs args)
{
var assemblyName = new AssemblyName(args.Name).Name;
// On .NET Framework, having a non-US locale can cause mscorlib
// to enter the AssemblyResolve event when searching for resources
// in its satellite assemblies. Exit early so we don't cause
// infinite recursion.
if (string.Equals(assemblyName, "mscorlib.resources", StringComparison.OrdinalIgnoreCase) ||
string.Equals(assemblyName, "System.Net.Http", StringComparison.OrdinalIgnoreCase))
{
return null;
}
var path = Path.Combine(ManagedProfilerDirectory, $"{assemblyName}.dll");
if (File.Exists(path))
{
StartupLogger.Debug("Loading {0}", path);
return Assembly.LoadFrom(path);
}
return null;
}
}
}
#endif
| 1 | 23,877 | Is this case purely hypothetical or did it solve an issue that occurred in your testing? I'm trying to understand this change a little better | DataDog-dd-trace-dotnet | .cs |
@@ -431,7 +431,7 @@ func (p *ReplicationTaskProcessorImpl) putReplicationTaskToDLQ(replicationTask *
p.metricsClient.Scope(
metrics.ReplicationDLQStatsScope,
metrics.TargetClusterTag(p.sourceCluster),
- metrics.InstanceTag(strconv.Itoa(p.shard.GetShardID())),
+ metrics.InstanceTag(strconv.Itoa(int(p.shard.GetShardID()))),
).UpdateGauge(
metrics.ReplicationDLQMaxLevelGauge,
float64(request.TaskInfo.GetTaskId()), | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination replicationTaskProcessor_mock.go -self_package go.temporal.io/server/service/history
package history
import (
"context"
"fmt"
"math"
"strconv"
"sync/atomic"
"time"
"go.temporal.io/api/serviceerror"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/api/historyservice/v1"
"go.temporal.io/server/api/persistenceblobs/v1"
replicationspb "go.temporal.io/server/api/replication/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/backoff"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
"go.temporal.io/server/common/quotas"
serviceerrors "go.temporal.io/server/common/serviceerror"
)
const (
dropSyncShardTaskTimeThreshold = 10 * time.Minute
replicationTimeout = 30 * time.Second
taskErrorRetryBackoffCoefficient = 1.2
dlqErrorRetryWait = time.Second
emptyMessageID = -1
)
var (
// ErrUnknownReplicationTask is the error to indicate unknown replication task type
ErrUnknownReplicationTask = serviceerror.NewInvalidArgument("unknown replication task")
)
type (
// ReplicationTaskProcessorImpl is responsible for processing replication tasks for a shard.
ReplicationTaskProcessorImpl struct {
currentCluster string
sourceCluster string
status int32
shard ShardContext
historyEngine Engine
historySerializer persistence.PayloadSerializer
config *Config
metricsClient metrics.Client
logger log.Logger
replicationTaskExecutor replicationTaskExecutor
hostRateLimiter *quotas.DynamicRateLimiter
shardRateLimiter *quotas.DynamicRateLimiter
taskRetryPolicy backoff.RetryPolicy
dlqRetryPolicy backoff.RetryPolicy
noTaskRetrier backoff.Retrier
lastProcessedMessageID int64
lastRetrievedMessageID int64
requestChan chan<- *request
syncShardChan chan *replicationspb.SyncShardStatus
done chan struct{}
}
// ReplicationTaskProcessor is responsible for processing replication tasks for a shard.
ReplicationTaskProcessor interface {
common.Daemon
}
request struct {
token *replicationspb.ReplicationToken
respChan chan<- *replicationspb.ReplicationMessages
}
)
// NewReplicationTaskProcessor creates a new replication task processor.
func NewReplicationTaskProcessor(
shard ShardContext,
historyEngine Engine,
config *Config,
metricsClient metrics.Client,
replicationTaskFetcher ReplicationTaskFetcher,
replicationTaskExecutor replicationTaskExecutor,
) *ReplicationTaskProcessorImpl {
shardID := shard.GetShardID()
taskRetryPolicy := backoff.NewExponentialRetryPolicy(config.ReplicationTaskProcessorErrorRetryWait(shardID))
taskRetryPolicy.SetBackoffCoefficient(taskErrorRetryBackoffCoefficient)
taskRetryPolicy.SetMaximumAttempts(config.ReplicationTaskProcessorErrorRetryMaxAttempts(shardID))
dlqRetryPolicy := backoff.NewExponentialRetryPolicy(dlqErrorRetryWait)
dlqRetryPolicy.SetExpirationInterval(backoff.NoInterval)
noTaskBackoffPolicy := backoff.NewExponentialRetryPolicy(config.ReplicationTaskProcessorNoTaskRetryWait(shardID))
noTaskBackoffPolicy.SetBackoffCoefficient(1)
noTaskBackoffPolicy.SetExpirationInterval(backoff.NoInterval)
noTaskRetrier := backoff.NewRetrier(noTaskBackoffPolicy, backoff.SystemClock)
return &ReplicationTaskProcessorImpl{
currentCluster: shard.GetClusterMetadata().GetCurrentClusterName(),
sourceCluster: replicationTaskFetcher.GetSourceCluster(),
status: common.DaemonStatusInitialized,
shard: shard,
historyEngine: historyEngine,
historySerializer: persistence.NewPayloadSerializer(),
config: config,
metricsClient: metricsClient,
logger: shard.GetLogger(),
replicationTaskExecutor: replicationTaskExecutor,
hostRateLimiter: replicationTaskFetcher.GetRateLimiter(),
shardRateLimiter: quotas.NewDynamicRateLimiter(func() float64 {
return config.ReplicationTaskProcessorShardQPS()
}), taskRetryPolicy: taskRetryPolicy,
noTaskRetrier: noTaskRetrier,
requestChan: replicationTaskFetcher.GetRequestChan(),
syncShardChan: make(chan *replicationspb.SyncShardStatus),
done: make(chan struct{}),
lastProcessedMessageID: emptyMessageID,
lastRetrievedMessageID: emptyMessageID,
}
}
// Start starts the processor
func (p *ReplicationTaskProcessorImpl) Start() {
if !atomic.CompareAndSwapInt32(&p.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) {
return
}
go p.processorLoop()
go p.syncShardStatusLoop()
go p.cleanupReplicationTaskLoop()
p.logger.Info("ReplicationTaskProcessor started.")
}
// Stop stops the processor
func (p *ReplicationTaskProcessorImpl) Stop() {
if !atomic.CompareAndSwapInt32(&p.status, common.DaemonStatusStarted, common.DaemonStatusStopped) {
return
}
p.logger.Info("ReplicationTaskProcessor shutting down.")
close(p.done)
}
func (p *ReplicationTaskProcessorImpl) processorLoop() {
p.lastProcessedMessageID = p.shard.GetClusterReplicationLevel(p.sourceCluster)
defer func() {
p.logger.Info("Closing replication task processor.", tag.ReadLevel(p.lastRetrievedMessageID))
}()
Loop:
for {
// for each iteration, do close check first
select {
case <-p.done:
p.logger.Info("ReplicationTaskProcessor shutting down.")
return
default:
}
respChan := p.sendFetchMessageRequest()
select {
case response, ok := <-respChan:
if !ok {
p.logger.Debug("Fetch replication messages chan closed.")
continue Loop
}
p.logger.Debug("Got fetch replication messages response.",
tag.ReadLevel(response.GetLastRetrievedMessageId()),
tag.Bool(response.GetHasMore()),
tag.Counter(len(response.GetReplicationTasks())),
)
p.taskProcessingStartWait()
p.processResponse(response)
case <-p.done:
return
}
}
}
func (p *ReplicationTaskProcessorImpl) cleanupReplicationTaskLoop() {
shardID := p.shard.GetShardID()
timer := time.NewTimer(backoff.JitDuration(
p.config.ReplicationTaskProcessorCleanupInterval(shardID),
p.config.ReplicationTaskProcessorCleanupJitterCoefficient(shardID),
))
for {
select {
case <-p.done:
timer.Stop()
return
case <-timer.C:
if p.config.EnableCleanupReplicationTask() {
err := p.cleanupAckedReplicationTasks()
if err != nil {
p.logger.Error("Failed to clean up replication messages.", tag.Error(err))
p.metricsClient.Scope(metrics.ReplicationTaskCleanupScope).IncCounter(metrics.ReplicationTaskCleanupFailure)
}
}
timer.Reset(backoff.JitDuration(
p.config.ReplicationTaskProcessorCleanupInterval(shardID),
p.config.ReplicationTaskProcessorCleanupJitterCoefficient(shardID),
))
}
}
}
func (p *ReplicationTaskProcessorImpl) cleanupAckedReplicationTasks() error {
clusterMetadata := p.shard.GetClusterMetadata()
currentCluster := clusterMetadata.GetCurrentClusterName()
minAckLevel := int64(math.MaxInt64)
for clusterName, clusterInfo := range clusterMetadata.GetAllClusterInfo() {
if !clusterInfo.Enabled {
continue
}
if clusterName != currentCluster {
ackLevel := p.shard.GetClusterReplicationLevel(clusterName)
if ackLevel < minAckLevel {
minAckLevel = ackLevel
}
}
}
p.logger.Info("Cleaning up replication task queue.", tag.ReadLevel(minAckLevel))
p.metricsClient.Scope(metrics.ReplicationTaskCleanupScope).IncCounter(metrics.ReplicationTaskCleanupCount)
p.metricsClient.Scope(metrics.ReplicationTaskFetcherScope,
metrics.TargetClusterTag(p.currentCluster),
).RecordTimer(
metrics.ReplicationTasksLag,
time.Duration(p.shard.GetTransferMaxReadLevel()-minAckLevel),
)
return p.shard.GetExecutionManager().RangeCompleteReplicationTask(
&persistence.RangeCompleteReplicationTaskRequest{
InclusiveEndTaskID: minAckLevel,
},
)
}
func (p *ReplicationTaskProcessorImpl) sendFetchMessageRequest() <-chan *replicationspb.ReplicationMessages {
respChan := make(chan *replicationspb.ReplicationMessages, 1)
// TODO: when we support prefetching, LastRetrievedMessageId can be different than LastProcessedMessageId
p.requestChan <- &request{
token: &replicationspb.ReplicationToken{
ShardId: int32(p.shard.GetShardID()),
LastRetrievedMessageId: p.lastRetrievedMessageID,
LastProcessedMessageId: p.lastProcessedMessageID,
},
respChan: respChan,
}
return respChan
}
func (p *ReplicationTaskProcessorImpl) processResponse(response *replicationspb.ReplicationMessages) {
p.syncShardChan <- response.GetSyncShardStatus()
scope := p.metricsClient.Scope(metrics.ReplicationTaskFetcherScope, metrics.TargetClusterTag(p.sourceCluster))
batchRequestStartTime := time.Now()
ctx := context.Background()
for _, replicationTask := range response.ReplicationTasks {
// TODO: move to MultiStageRateLimiter
_ = p.hostRateLimiter.Wait(ctx)
_ = p.shardRateLimiter.Wait(ctx)
err := p.processSingleTask(replicationTask)
if err != nil {
// Processor is shutdown. Exit without updating the checkpoint.
return
}
}
// Note here we check replication tasks instead of hasMore. The expectation is that in a steady state
// we will receive replication tasks but hasMore is false (meaning that we are always catching up).
// So hasMore might not be a good indicator for additional wait.
if len(response.ReplicationTasks) == 0 {
backoffDuration := p.noTaskRetrier.NextBackOff()
time.Sleep(backoffDuration)
} else {
scope.RecordTimer(metrics.ReplicationTasksAppliedLatency, time.Now().Sub(batchRequestStartTime))
}
p.lastProcessedMessageID = response.GetLastRetrievedMessageId()
p.lastRetrievedMessageID = response.GetLastRetrievedMessageId()
scope.UpdateGauge(metrics.LastRetrievedMessageID, float64(p.lastRetrievedMessageID))
p.noTaskRetrier.Reset()
}
func (p *ReplicationTaskProcessorImpl) syncShardStatusLoop() {
timer := time.NewTimer(backoff.JitDuration(
p.config.ShardSyncMinInterval(),
p.config.ShardSyncTimerJitterCoefficient(),
))
var syncShardTask *replicationspb.SyncShardStatus
for {
select {
case syncShardRequest := <-p.syncShardChan:
syncShardTask = syncShardRequest
case <-timer.C:
if err := p.handleSyncShardStatus(
syncShardTask,
); err != nil {
p.logger.Error("failed to sync shard status", tag.Error(err))
p.metricsClient.Scope(metrics.HistorySyncShardStatusScope).IncCounter(metrics.SyncShardFromRemoteFailure)
}
timer.Reset(backoff.JitDuration(
p.config.ShardSyncMinInterval(),
p.config.ShardSyncTimerJitterCoefficient(),
))
case <-p.done:
timer.Stop()
return
}
}
}
func (p *ReplicationTaskProcessorImpl) handleSyncShardStatus(
status *replicationspb.SyncShardStatus,
) error {
if status == nil ||
p.shard.GetTimeSource().Now().Sub(
timestamp.TimeValue(status.GetStatusTime())) > dropSyncShardTaskTimeThreshold {
return nil
}
p.metricsClient.Scope(metrics.HistorySyncShardStatusScope).IncCounter(metrics.SyncShardFromRemoteCounter)
ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout)
defer cancel()
return p.historyEngine.SyncShardStatus(ctx, &historyservice.SyncShardStatusRequest{
SourceCluster: p.sourceCluster,
ShardId: int64(p.shard.GetShardID()),
StatusTime: status.StatusTime,
})
}
func (p *ReplicationTaskProcessorImpl) processSingleTask(replicationTask *replicationspb.ReplicationTask) error {
retryTransientError := func() error {
return backoff.Retry(
func() error {
return p.processTaskOnce(replicationTask)
},
p.taskRetryPolicy,
isTransientRetryableError)
}
// Handle service busy error
err := backoff.Retry(
retryTransientError,
common.CreateReplicationServiceBusyRetryPolicy(),
common.IsResourceExhausted,
)
if err != nil {
p.logger.Error(
"Failed to apply replication task after retry. Putting task into DLQ.",
tag.TaskID(replicationTask.GetSourceTaskId()),
tag.Error(err),
)
return p.putReplicationTaskToDLQ(replicationTask)
}
return nil
}
func (p *ReplicationTaskProcessorImpl) processTaskOnce(replicationTask *replicationspb.ReplicationTask) error {
scope, err := p.replicationTaskExecutor.execute(
replicationTask,
false)
if err != nil {
p.updateFailureMetric(scope, err)
} else {
p.logger.Debug("Successfully applied replication task.", tag.TaskID(replicationTask.GetSourceTaskId()))
p.metricsClient.Scope(
metrics.ReplicationTaskFetcherScope,
metrics.TargetClusterTag(p.sourceCluster),
).IncCounter(metrics.ReplicationTasksApplied)
}
return err
}
func (p *ReplicationTaskProcessorImpl) putReplicationTaskToDLQ(replicationTask *replicationspb.ReplicationTask) error {
request, err := p.generateDLQRequest(replicationTask)
if err != nil {
p.logger.Error("Failed to generate DLQ replication task.", tag.Error(err))
// We cannot deserialize the task. Dropping it.
return nil
}
p.logger.Info("Put history replication to DLQ",
tag.WorkflowNamespaceID(request.TaskInfo.GetNamespaceId()),
tag.WorkflowID(request.TaskInfo.GetWorkflowId()),
tag.WorkflowRunID(request.TaskInfo.GetRunId()),
tag.TaskID(request.TaskInfo.GetTaskId()),
)
p.metricsClient.Scope(
metrics.ReplicationDLQStatsScope,
metrics.TargetClusterTag(p.sourceCluster),
metrics.InstanceTag(strconv.Itoa(p.shard.GetShardID())),
).UpdateGauge(
metrics.ReplicationDLQMaxLevelGauge,
float64(request.TaskInfo.GetTaskId()),
)
// The following is guaranteed to success or retry forever until processor is shutdown.
return backoff.Retry(func() error {
err := p.shard.GetExecutionManager().PutReplicationTaskToDLQ(request)
if err != nil {
p.logger.Error("Failed to put replication task to DLQ.", tag.Error(err))
p.metricsClient.IncCounter(metrics.ReplicationTaskFetcherScope, metrics.ReplicationDLQFailed)
}
return err
}, p.dlqRetryPolicy, p.shouldRetryDLQ)
}
func (p *ReplicationTaskProcessorImpl) generateDLQRequest(
replicationTask *replicationspb.ReplicationTask,
) (*persistence.PutReplicationTaskToDLQRequest, error) {
switch replicationTask.TaskType {
case enumsspb.REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK:
taskAttributes := replicationTask.GetSyncActivityTaskAttributes()
return &persistence.PutReplicationTaskToDLQRequest{
SourceClusterName: p.sourceCluster,
TaskInfo: &persistenceblobs.ReplicationTaskInfo{
NamespaceId: taskAttributes.GetNamespaceId(),
WorkflowId: taskAttributes.GetWorkflowId(),
RunId: taskAttributes.GetRunId(),
TaskId: replicationTask.GetSourceTaskId(),
TaskType: enumsspb.TASK_TYPE_REPLICATION_SYNC_ACTIVITY,
ScheduledId: taskAttributes.GetScheduledId(),
},
}, nil
case enumsspb.REPLICATION_TASK_TYPE_HISTORY_V2_TASK:
taskAttributes := replicationTask.GetHistoryTaskV2Attributes()
eventsDataBlob := persistence.NewDataBlobFromProto(taskAttributes.GetEvents())
events, err := p.historySerializer.DeserializeBatchEvents(eventsDataBlob)
if err != nil {
return nil, err
}
if len(events) == 0 {
p.logger.Error("Empty events in a batch")
return nil, fmt.Errorf("corrupted history event batch, empty events")
}
return &persistence.PutReplicationTaskToDLQRequest{
SourceClusterName: p.sourceCluster,
TaskInfo: &persistenceblobs.ReplicationTaskInfo{
NamespaceId: taskAttributes.GetNamespaceId(),
WorkflowId: taskAttributes.GetWorkflowId(),
RunId: taskAttributes.GetRunId(),
TaskId: replicationTask.GetSourceTaskId(),
TaskType: enumsspb.TASK_TYPE_REPLICATION_HISTORY,
FirstEventId: events[0].GetEventId(),
NextEventId: events[len(events)-1].GetEventId(),
Version: events[0].GetVersion(),
},
}, nil
default:
return nil, fmt.Errorf("unknown replication task type")
}
}
func isTransientRetryableError(err error) bool {
switch err.(type) {
case *serviceerror.InvalidArgument:
return false
case *serviceerror.ResourceExhausted:
return false
default:
return true
}
}
func (p *ReplicationTaskProcessorImpl) shouldRetryDLQ(err error) bool {
if err == nil {
return false
}
select {
case <-p.done:
p.logger.Info("ReplicationTaskProcessor shutting down.")
return false
default:
return true
}
}
func (p *ReplicationTaskProcessorImpl) updateFailureMetric(scope int, err error) {
// Always update failure counter for all replicator errors
p.metricsClient.IncCounter(scope, metrics.ReplicatorFailures)
// Also update counter to distinguish between type of failures
switch err.(type) {
case *serviceerrors.ShardOwnershipLost:
p.metricsClient.IncCounter(scope, metrics.ServiceErrShardOwnershipLostCounter)
case *serviceerror.InvalidArgument:
p.metricsClient.IncCounter(scope, metrics.ServiceErrInvalidArgumentCounter)
case *serviceerror.NamespaceNotActive:
p.metricsClient.IncCounter(scope, metrics.ServiceErrNamespaceNotActiveCounter)
case *serviceerror.WorkflowExecutionAlreadyStarted:
p.metricsClient.IncCounter(scope, metrics.ServiceErrExecutionAlreadyStartedCounter)
case *serviceerror.NotFound:
p.metricsClient.IncCounter(scope, metrics.ServiceErrNotFoundCounter)
case *serviceerror.ResourceExhausted:
p.metricsClient.IncCounter(scope, metrics.ServiceErrResourceExhaustedCounter)
case *serviceerrors.RetryTask:
p.metricsClient.IncCounter(scope, metrics.ServiceErrRetryTaskCounter)
case *serviceerror.DeadlineExceeded:
p.metricsClient.IncCounter(scope, metrics.ServiceErrContextTimeoutCounter)
}
}
func (p *ReplicationTaskProcessorImpl) taskProcessingStartWait() {
shardID := p.shard.GetShardID()
time.Sleep(backoff.JitDuration(
p.config.ReplicationTaskProcessorStartWait(shardID),
p.config.ReplicationTaskProcessorStartWaitJitterCoefficient(shardID),
))
}
| 1 | 10,363 | Use instead `convert.Int32ToString` that was added in #762 for this purpose. | temporalio-temporal | go |
@@ -473,7 +473,7 @@ public final class HashSet<T> implements Kind1<HashSet<?>, T>, Set<T>, Serializa
@Override
public HashSet<T> add(T element) {
- return new HashSet<>(tree.put(element, element));
+ return contains(element) ? this : new HashSet<>(tree.put(element, element));
}
@Override | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2017 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.Kind1;
import javaslang.Tuple;
import javaslang.Tuple2;
import javaslang.Tuple3;
import javaslang.control.Option;
import java.io.*;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.NoSuchElementException;
import java.util.Objects;
import java.util.function.*;
import java.util.stream.Collector;
/**
* An immutable {@code HashSet} implementation.
*
* @param <T> Component type
* @author Ruslan Sennov, Patryk Najda, Daniel Dietrich
* @since 2.0.0
*/
public final class HashSet<T> implements Kind1<HashSet<?>, T>, Set<T>, Serializable {
private static final long serialVersionUID = 1L;
private static final HashSet<?> EMPTY = new HashSet<>(HashArrayMappedTrie.empty());
private final HashArrayMappedTrie<T, T> tree;
private HashSet(HashArrayMappedTrie<T, T> tree) {
this.tree = tree;
}
@SuppressWarnings("unchecked")
public static <T> HashSet<T> empty() {
return (HashSet<T>) EMPTY;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link javaslang.collection.HashSet}.
*
* @param <T> Component type of the HashSet.
* @return A javaslang.collection.HashSet Collector.
*/
public static <T> Collector<T, ArrayList<T>, HashSet<T>> collector() {
final Supplier<ArrayList<T>> supplier = ArrayList::new;
final BiConsumer<ArrayList<T>, T> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<T>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<T>, HashSet<T>> finisher = HashSet::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Narrows a widened {@code HashSet<? extends T>} to {@code HashSet<T>}
* by performing a type-safe cast. This is eligible because immutable/read-only
* collections are covariant.
*
* @param hashSet A {@code HashSet}.
* @param <T> Component type of the {@code HashSet}.
* @return the given {@code hashSet} instance as narrowed type {@code HashSet<T>}.
*/
@SuppressWarnings("unchecked")
public static <T> HashSet<T> narrow(HashSet<? extends T> hashSet) {
return (HashSet<T>) hashSet;
}
/**
* Returns a singleton {@code HashSet}, i.e. a {@code HashSet} of one element.
*
* @param element An element.
* @param <T> The component type
* @return A new HashSet instance containing the given element
*/
public static <T> HashSet<T> of(T element) {
return HashSet.<T> empty().add(element);
}
/**
* Creates a HashSet of the given elements.
*
* <pre><code>HashSet.of(1, 2, 3, 4)</code></pre>
*
* @param <T> Component type of the HashSet.
* @param elements Zero or more elements.
* @return A set containing the given elements.
* @throws NullPointerException if {@code elements} is null
*/
@SafeVarargs
public static <T> HashSet<T> of(T... elements) {
Objects.requireNonNull(elements, "elements is null");
HashArrayMappedTrie<T, T> tree = HashArrayMappedTrie.empty();
for (T element : elements) {
tree = tree.put(element, element);
}
return tree.isEmpty() ? empty() : new HashSet<>(tree);
}
/**
* Returns an HashSet containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param <T> Component type of the HashSet
* @param n The number of elements in the HashSet
* @param f The Function computing element values
* @return An HashSet consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
public static <T> HashSet<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
Objects.requireNonNull(f, "f is null");
return Collections.tabulate(n, f, HashSet.empty(), HashSet::of);
}
/**
* Returns an HashSet containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param <T> Component type of the HashSet
* @param n The number of elements in the HashSet
* @param s The Supplier computing element values
* @return An HashSet of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
public static <T> HashSet<T> fill(int n, Supplier<? extends T> s) {
Objects.requireNonNull(s, "s is null");
return Collections.fill(n, s, HashSet.empty(), HashSet::of);
}
/**
* Creates a HashSet of the given elements.
*
* @param elements Set elements
* @param <T> The value type
* @return A new HashSet containing the given entries
*/
@SuppressWarnings("unchecked")
public static <T> HashSet<T> ofAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (elements instanceof HashSet) {
return (HashSet<T>) elements;
} else {
final HashArrayMappedTrie<T, T> tree = addAll(HashArrayMappedTrie.empty(), elements);
return tree.isEmpty() ? empty() : new HashSet<>(tree);
}
}
/**
* Creates a HashSet that contains the elements of the given {@link java.util.stream.Stream}.
*
* @param javaStream A {@link java.util.stream.Stream}
* @param <T> Component type of the Stream.
* @return A HashSet containing the given elements in the same order.
*/
public static <T> HashSet<T> ofAll(java.util.stream.Stream<? extends T> javaStream) {
Objects.requireNonNull(javaStream, "javaStream is null");
return HashSet.ofAll(Iterator.ofAll(javaStream.iterator()));
}
/**
* Creates a HashSet from boolean values.
*
* @param elements boolean values
* @return A new HashSet of Boolean values
* @throws NullPointerException if elements is null
*/
public static HashSet<Boolean> ofAll(boolean... elements) {
Objects.requireNonNull(elements, "elements is null");
return HashSet.ofAll(Iterator.ofAll(elements));
}
/**
* Creates a HashSet from byte values.
*
* @param elements byte values
* @return A new HashSet of Byte values
* @throws NullPointerException if elements is null
*/
public static HashSet<Byte> ofAll(byte... elements) {
Objects.requireNonNull(elements, "elements is null");
return HashSet.ofAll(Iterator.ofAll(elements));
}
/**
* Creates a HashSet from char values.
*
* @param elements char values
* @return A new HashSet of Character values
* @throws NullPointerException if elements is null
*/
public static HashSet<Character> ofAll(char... elements) {
Objects.requireNonNull(elements, "elements is null");
return HashSet.ofAll(Iterator.ofAll(elements));
}
/**
* Creates a HashSet from double values.
*
* @param elements double values
* @return A new HashSet of Double values
* @throws NullPointerException if elements is null
*/
public static HashSet<Double> ofAll(double... elements) {
Objects.requireNonNull(elements, "elements is null");
return HashSet.ofAll(Iterator.ofAll(elements));
}
/**
* Creates a HashSet from float values.
*
* @param elements float values
* @return A new HashSet of Float values
* @throws NullPointerException if elements is null
*/
public static HashSet<Float> ofAll(float... elements) {
Objects.requireNonNull(elements, "elements is null");
return HashSet.ofAll(Iterator.ofAll(elements));
}
/**
* Creates a HashSet from int values.
*
* @param elements int values
* @return A new HashSet of Integer values
* @throws NullPointerException if elements is null
*/
public static HashSet<Integer> ofAll(int... elements) {
Objects.requireNonNull(elements, "elements is null");
return HashSet.ofAll(Iterator.ofAll(elements));
}
/**
* Creates a HashSet from long values.
*
* @param elements long values
* @return A new HashSet of Long values
* @throws NullPointerException if elements is null
*/
public static HashSet<Long> ofAll(long... elements) {
Objects.requireNonNull(elements, "elements is null");
return HashSet.ofAll(Iterator.ofAll(elements));
}
/**
* Creates a HashSet from short values.
*
* @param elements short values
* @return A new HashSet of Short values
* @throws NullPointerException if elements is null
*/
public static HashSet<Short> ofAll(short... elements) {
Objects.requireNonNull(elements, "elements is null");
return HashSet.ofAll(Iterator.ofAll(elements));
}
/**
* Creates a HashSet of int numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* HashSet.range(0, 0) // = HashSet()
* HashSet.range(2, 0) // = HashSet()
* HashSet.range(-2, 2) // = HashSet(-2, -1, 0, 1)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of int values as specified or the empty range if {@code from >= toExclusive}
*/
public static HashSet<Integer> range(int from, int toExclusive) {
return HashSet.ofAll(Iterator.range(from, toExclusive));
}
public static HashSet<Character> range(char from, char toExclusive) {
return HashSet.ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a HashSet of int numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* HashSet.rangeBy(1, 3, 1) // = HashSet(1, 2)
* HashSet.rangeBy(1, 4, 2) // = HashSet(1, 3)
* HashSet.rangeBy(4, 1, -2) // = HashSet(4, 2)
* HashSet.rangeBy(4, 1, 2) // = HashSet()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or the empty range if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static HashSet<Integer> rangeBy(int from, int toExclusive, int step) {
return HashSet.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
public static HashSet<Character> rangeBy(char from, char toExclusive, int step) {
return HashSet.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
@GwtIncompatible
public static HashSet<Double> rangeBy(double from, double toExclusive, double step) {
return HashSet.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a HashSet of long numbers starting from {@code from}, extending to {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* HashSet.range(0L, 0L) // = HashSet()
* HashSet.range(2L, 0L) // = HashSet()
* HashSet.range(-2L, 2L) // = HashSet(-2L, -1L, 0L, 1L)
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @return a range of long values as specified or the empty range if {@code from >= toExclusive}
*/
public static HashSet<Long> range(long from, long toExclusive) {
return HashSet.ofAll(Iterator.range(from, toExclusive));
}
/**
* Creates a HashSet of long numbers starting from {@code from}, extending to {@code toExclusive - 1},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* HashSet.rangeBy(1L, 3L, 1L) // = HashSet(1L, 2L)
* HashSet.rangeBy(1L, 4L, 2L) // = HashSet(1L, 3L)
* HashSet.rangeBy(4L, 1L, -2L) // = HashSet(4L, 2L)
* HashSet.rangeBy(4L, 1L, 2L) // = HashSet()
* </code>
* </pre>
*
* @param from the first number
* @param toExclusive the last number + 1
* @param step the step
* @return a range of long values as specified or the empty range if<br>
* {@code from >= toInclusive} and {@code step > 0} or<br>
* {@code from <= toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static HashSet<Long> rangeBy(long from, long toExclusive, long step) {
return HashSet.ofAll(Iterator.rangeBy(from, toExclusive, step));
}
/**
* Creates a HashSet of int numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* HashSet.rangeClosed(0, 0) // = HashSet(0)
* HashSet.rangeClosed(2, 0) // = HashSet()
* HashSet.rangeClosed(-2, 2) // = HashSet(-2, -1, 0, 1, 2)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of int values as specified or the empty range if {@code from > toInclusive}
*/
public static HashSet<Integer> rangeClosed(int from, int toInclusive) {
return HashSet.ofAll(Iterator.rangeClosed(from, toInclusive));
}
public static HashSet<Character> rangeClosed(char from, char toInclusive) {
return HashSet.ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a HashSet of int numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* HashSet.rangeClosedBy(1, 3, 1) // = HashSet(1, 2, 3)
* HashSet.rangeClosedBy(1, 4, 2) // = HashSet(1, 3)
* HashSet.rangeClosedBy(4, 1, -2) // = HashSet(4, 2)
* HashSet.rangeClosedBy(4, 1, 2) // = HashSet()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static HashSet<Integer> rangeClosedBy(int from, int toInclusive, int step) {
return HashSet.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
public static HashSet<Character> rangeClosedBy(char from, char toInclusive, int step) {
return HashSet.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
@GwtIncompatible
public static HashSet<Double> rangeClosedBy(double from, double toInclusive, double step) {
return HashSet.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
/**
* Creates a HashSet of long numbers starting from {@code from}, extending to {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* HashSet.rangeClosed(0L, 0L) // = HashSet(0L)
* HashSet.rangeClosed(2L, 0L) // = HashSet()
* HashSet.rangeClosed(-2L, 2L) // = HashSet(-2L, -1L, 0L, 1L, 2L)
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @return a range of long values as specified or the empty range if {@code from > toInclusive}
*/
public static HashSet<Long> rangeClosed(long from, long toInclusive) {
return HashSet.ofAll(Iterator.rangeClosed(from, toInclusive));
}
/**
* Creates a HashSet of long numbers starting from {@code from}, extending to {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* HashSet.rangeClosedBy(1L, 3L, 1L) // = HashSet(1L, 2L, 3L)
* HashSet.rangeClosedBy(1L, 4L, 2L) // = HashSet(1L, 3L)
* HashSet.rangeClosedBy(4L, 1L, -2L) // = HashSet(4L, 2L)
* HashSet.rangeClosedBy(4L, 1L, 2L) // = HashSet()
* </code>
* </pre>
*
* @param from the first number
* @param toInclusive the last number
* @param step the step
* @return a range of int values as specified or the empty range if<br>
* {@code from > toInclusive} and {@code step > 0} or<br>
* {@code from < toInclusive} and {@code step < 0}
* @throws IllegalArgumentException if {@code step} is zero
*/
public static HashSet<Long> rangeClosedBy(long from, long toInclusive, long step) {
return HashSet.ofAll(Iterator.rangeClosedBy(from, toInclusive, step));
}
@Override
public HashSet<T> add(T element) {
return new HashSet<>(tree.put(element, element));
}
@Override
public HashSet<T> addAll(Iterable<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
final HashArrayMappedTrie<T, T> that = addAll(tree, elements);
if (that.size() == tree.size()) {
return this;
} else {
return new HashSet<>(that);
}
}
@Override
public boolean contains(T element) {
return tree.get(element).isDefined();
}
@Override
public HashSet<T> diff(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty() || elements.isEmpty()) {
return this;
} else {
return removeAll(elements);
}
}
@Override
public HashSet<T> distinct() {
return this;
}
@Override
public HashSet<T> distinctBy(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return HashSet.ofAll(iterator().distinctBy(comparator));
}
@Override
public <U> HashSet<T> distinctBy(Function<? super T, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
return HashSet.ofAll(iterator().distinctBy(keyExtractor));
}
@Override
public HashSet<T> drop(int n) {
if (n <= 0) {
return this;
} else {
return HashSet.ofAll(iterator().drop(n));
}
}
@Override
public HashSet<T> dropRight(int n) {
return drop(n);
}
@Override
public HashSet<T> dropUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
public HashSet<T> dropWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final HashSet<T> dropped = HashSet.ofAll(iterator().dropWhile(predicate));
return dropped.length() == length() ? this : dropped;
}
@Override
public HashSet<T> filter(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final HashSet<T> filtered = HashSet.ofAll(iterator().filter(predicate));
if (filtered.isEmpty()) {
return empty();
} else if (filtered.length() == length()) {
return this;
} else {
return filtered;
}
}
@Override
public <U> HashSet<U> flatMap(Function<? super T, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return empty();
} else {
final HashArrayMappedTrie<U, U> that = foldLeft(HashArrayMappedTrie.empty(),
(tree, t) -> addAll(tree, mapper.apply(t)));
return new HashSet<>(that);
}
}
@Override
public <U> U foldRight(U zero, BiFunction<? super T, ? super U, ? extends U> f) {
return foldLeft(zero, (u, t) -> f.apply(t, u));
}
@Override
public <C> Map<C, HashSet<T>> groupBy(Function<? super T, ? extends C> classifier) {
return Collections.groupBy(this, classifier, HashSet::ofAll);
}
@Override
public Iterator<HashSet<T>> grouped(int size) {
return sliding(size, size);
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public T head() {
if (tree.isEmpty()) {
throw new NoSuchElementException("head of empty set");
}
return iterator().next();
}
@Override
public Option<T> headOption() {
return iterator().headOption();
}
@Override
public HashSet<T> init() {
return tail();
}
@Override
public Option<HashSet<T>> initOption() {
return tailOption();
}
@Override
public HashSet<T> intersect(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty() || elements.isEmpty()) {
return empty();
} else {
final int size = size();
if (size <= elements.size()) {
return retainAll(elements);
} else {
final HashSet<T> results = HashSet.<T> ofAll(elements).retainAll(this);
return (size == results.size()) ? this : results;
}
}
}
@Override
public boolean isEmpty() {
return tree.isEmpty();
}
@Override
public boolean isTraversableAgain() {
return true;
}
@Override
public Iterator<T> iterator() {
return tree.keysIterator();
}
@Override
public int length() {
return tree.size();
}
@Override
public <U> HashSet<U> map(Function<? super T, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return empty();
} else {
final HashArrayMappedTrie<U, U> that = foldLeft(HashArrayMappedTrie.empty(), (tree, t) -> {
final U u = mapper.apply(t);
return tree.put(u, u);
});
return new HashSet<>(that);
}
}
@Override
public String mkString(CharSequence prefix, CharSequence delimiter, CharSequence suffix) {
return iterator().mkString(prefix, delimiter, suffix);
}
@Override
public Tuple2<HashSet<T>, HashSet<T>> partition(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final Tuple2<Iterator<T>, Iterator<T>> p = iterator().partition(predicate);
return Tuple.of(HashSet.ofAll(p._1), HashSet.ofAll(p._2));
}
@Override
public HashSet<T> peek(Consumer<? super T> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(iterator().head());
}
return this;
}
@Override
public HashSet<T> remove(T element) {
final HashArrayMappedTrie<T, T> newTree = tree.remove(element);
return (newTree == tree) ? this : new HashSet<>(newTree);
}
@Override
public HashSet<T> removeAll(Iterable<? extends T> elements) {
return Collections.removeAll(this, elements);
}
@Override
public HashSet<T> replace(T currentElement, T newElement) {
if (tree.containsKey(currentElement)) {
return remove(currentElement).add(newElement);
} else {
return this;
}
}
@Override
public HashSet<T> replaceAll(T currentElement, T newElement) {
return replace(currentElement, newElement);
}
@Override
public HashSet<T> retainAll(Iterable<? extends T> elements) {
return Collections.retainAll(this, elements);
}
@Override
public HashSet<T> scan(T zero, BiFunction<? super T, ? super T, ? extends T> operation) {
return scanLeft(zero, operation);
}
@Override
public <U> HashSet<U> scanLeft(U zero, BiFunction<? super U, ? super T, ? extends U> operation) {
return Collections.scanLeft(this, zero, operation, HashSet::ofAll);
}
@Override
public <U> HashSet<U> scanRight(U zero, BiFunction<? super T, ? super U, ? extends U> operation) {
return Collections.scanRight(this, zero, operation, HashSet::ofAll);
}
@Override
public Iterator<HashSet<T>> slideBy(Function<? super T, ?> classifier) {
return iterator().slideBy(classifier).map(HashSet::ofAll);
}
@Override
public Iterator<HashSet<T>> sliding(int size) {
return sliding(size, 1);
}
@Override
public Iterator<HashSet<T>> sliding(int size, int step) {
return iterator().sliding(size, step).map(HashSet::ofAll);
}
@Override
public Tuple2<HashSet<T>, HashSet<T>> span(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final Tuple2<Iterator<T>, Iterator<T>> t = iterator().span(predicate);
return Tuple.of(HashSet.ofAll(t._1), HashSet.ofAll(t._2));
}
@Override
public HashSet<T> tail() {
if (tree.isEmpty()) {
throw new UnsupportedOperationException("tail of empty set");
}
return remove(head());
}
@Override
public Option<HashSet<T>> tailOption() {
if (tree.isEmpty()) {
return Option.none();
} else {
return Option.some(tail());
}
}
@Override
public HashSet<T> take(int n) {
if (tree.size() <= n) {
return this;
}
return HashSet.ofAll(() -> iterator().take(n));
}
@Override
public HashSet<T> takeRight(int n) {
return take(n);
}
@Override
public HashSet<T> takeUntil(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeWhile(predicate.negate());
}
@Override
public HashSet<T> takeWhile(Predicate<? super T> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final HashSet<T> taken = HashSet.ofAll(iterator().takeWhile(predicate));
return taken.length() == length() ? this : taken;
}
/**
* Transforms this {@code HashSet}.
*
* @param f A transformation
* @param <U> Type of transformation result
* @return An instance of type {@code U}
* @throws NullPointerException if {@code f} is null
*/
public <U> U transform(Function<? super HashSet<T>, ? extends U> f) {
Objects.requireNonNull(f, "f is null");
return f.apply(this);
}
@Override
public java.util.HashSet<T> toJavaSet() {
return toJavaSet(java.util.HashSet::new);
}
@SuppressWarnings("unchecked")
@Override
public HashSet<T> union(Set<? extends T> elements) {
Objects.requireNonNull(elements, "elements is null");
if (isEmpty()) {
if (elements instanceof HashSet) {
return (HashSet<T>) elements;
} else {
return HashSet.ofAll(elements);
}
} else if (elements.isEmpty()) {
return this;
} else {
final HashArrayMappedTrie<T, T> that = addAll(tree, elements);
if (that.size() == tree.size()) {
return this;
} else {
return new HashSet<>(that);
}
}
}
@Override
public <T1, T2> Tuple2<HashSet<T1>, HashSet<T2>> unzip(
Function<? super T, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
final Tuple2<Iterator<T1>, Iterator<T2>> t = iterator().unzip(unzipper);
return Tuple.of(HashSet.ofAll(t._1), HashSet.ofAll(t._2));
}
@Override
public <T1, T2, T3> Tuple3<HashSet<T1>, HashSet<T2>, HashSet<T3>> unzip3(
Function<? super T, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
final Tuple3<Iterator<T1>, Iterator<T2>, Iterator<T3>> t = iterator().unzip3(unzipper);
return Tuple.of(HashSet.ofAll(t._1), HashSet.ofAll(t._2), HashSet.ofAll(t._3));
}
@Override
public <U> HashSet<Tuple2<T, U>> zip(Iterable<? extends U> that) {
return zipWith(that, Tuple::of);
}
@Override
public <U, R> HashSet<R> zipWith(Iterable<? extends U> that, BiFunction<? super T, ? super U, ? extends R> mapper) {
Objects.requireNonNull(that, "that is null");
Objects.requireNonNull(mapper, "mapper is null");
return HashSet.ofAll(iterator().zipWith(that, mapper));
}
@Override
public <U> HashSet<Tuple2<T, U>> zipAll(Iterable<? extends U> that, T thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
return HashSet.ofAll(iterator().zipAll(that, thisElem, thatElem));
}
@Override
public HashSet<Tuple2<T, Integer>> zipWithIndex() {
return zipWithIndex(Tuple::of);
}
@Override
public <U> HashSet<U> zipWithIndex(BiFunction<? super T, ? super Integer, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
return HashSet.ofAll(iterator().zipWithIndex(mapper));
}
// -- Object
@Override
public int hashCode() {
return tree.hashCode();
}
@SuppressWarnings("unchecked")
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof HashSet) {
final HashSet<?> that = (HashSet<?>) o;
return this.tree.equals(that.tree);
} else {
return false;
}
}
@Override
public String stringPrefix() {
return "HashSet";
}
@Override
public String toString() {
return mkString(stringPrefix() + "(", ", ", ")");
}
private static <T> HashArrayMappedTrie<T, T> addAll(HashArrayMappedTrie<T, T> initial,
Iterable<? extends T> additional) {
HashArrayMappedTrie<T, T> that = initial;
for (T t : additional) {
that = that.put(t, t);
}
return that;
}
// -- Serialization
/**
* {@code writeReplace} method for the serialization proxy pattern.
* <p>
* The presence of this method causes the serialization system to emit a SerializationProxy instance instead of
* an instance of the enclosing class.
*
* @return A SerializationProxy for this enclosing class.
*/
@GwtIncompatible("The Java serialization protocol is explicitly not supported")
private Object writeReplace() {
return new SerializationProxy<>(this.tree);
}
/**
* {@code readObject} method for the serialization proxy pattern.
* <p>
* Guarantees that the serialization system will never generate a serialized instance of the enclosing class.
*
* @param stream An object serialization stream.
* @throws java.io.InvalidObjectException This method will throw with the message "Proxy required".
*/
@GwtIncompatible("The Java serialization protocol is explicitly not supported")
private void readObject(ObjectInputStream stream) throws InvalidObjectException {
throw new InvalidObjectException("Proxy required");
}
/**
* A serialization proxy which, in this context, is used to deserialize immutable, linked Lists with final
* instance fields.
*
* @param <T> The component type of the underlying list.
*/
// DEV NOTE: The serialization proxy pattern is not compatible with non-final, i.e. extendable,
// classes. Also, it may not be compatible with circular object graphs.
@GwtIncompatible("The Java serialization protocol is explicitly not supported")
private static final class SerializationProxy<T> implements Serializable {
private static final long serialVersionUID = 1L;
// the instance to be serialized/deserialized
private transient HashArrayMappedTrie<T, T> tree;
/**
* Constructor for the case of serialization, called by {@link HashSet#writeReplace()}.
* <p/>
* The constructor of a SerializationProxy takes an argument that concisely represents the logical state of
* an instance of the enclosing class.
*
* @param tree a Cons
*/
SerializationProxy(HashArrayMappedTrie<T, T> tree) {
this.tree = tree;
}
/**
* Write an object to a serialization stream.
*
* @param s An object serialization stream.
* @throws java.io.IOException If an error occurs writing to the stream.
*/
private void writeObject(ObjectOutputStream s) throws IOException {
s.defaultWriteObject();
s.writeInt(tree.size());
for (Tuple2<T, T> e : tree) {
s.writeObject(e._1);
}
}
/**
* Read an object from a deserialization stream.
*
* @param s An object deserialization stream.
* @throws ClassNotFoundException If the object's class read from the stream cannot be found.
* @throws InvalidObjectException If the stream contains no list elements.
* @throws IOException If an error occurs reading from the stream.
*/
private void readObject(ObjectInputStream s) throws ClassNotFoundException, IOException {
s.defaultReadObject();
final int size = s.readInt();
if (size < 0) {
throw new InvalidObjectException("No elements");
}
HashArrayMappedTrie<T, T> temp = HashArrayMappedTrie.empty();
for (int i = 0; i < size; i++) {
@SuppressWarnings("unchecked")
final T element = (T) s.readObject();
temp = temp.put(element, element);
}
tree = temp;
}
/**
* {@code readResolve} method for the serialization proxy pattern.
* <p>
* Returns a logically equivalent instance of the enclosing class. The presence of this method causes the
* serialization system to translate the serialization proxy back into an instance of the enclosing class
* upon deserialization.
*
* @return A deserialized instance of the enclosing class.
*/
private Object readResolve() {
return tree.isEmpty() ? HashSet.empty() : new HashSet<>(tree);
}
}
}
| 1 | 11,910 | If key is present, tree.put(k,v) needs to return a new instance for Maps and the same instance for Sets. Therefore we currently perform an additional 'contains' check for Sets. A future optimization may add an additional flag `replace` to the backing HAMT.put() / RedBlackTree.insert() methods. Sets set it to replace=false, Maps set it to replace=true. But for now this solution is working for us. | vavr-io-vavr | java |
@@ -0,0 +1,18 @@
+"""Test to see we don't crash on this code in pandas.
+See: https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/sparse/array.py
+Code written van G van Rossum here: https://github.com/python/typing/issues/684"""
+# pylint: disable=no-member, redefined-builtin, invalid-name, missing-class-docstring
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from enum import Enum
+
+ class ellipsis(Enum):
+ Ellipsis = "..."
+
+ Ellipsis = ellipsis.Ellipsis
+
+
+else:
+ ellipsis = type(Ellipsis) | 1 | 1 | 17,809 | This is a regression test for code I found while working on this. | PyCQA-pylint | py |
|
@@ -6503,8 +6503,15 @@ ex_expr::exp_return_type ex_function_json_object_field_text::eval(char *op_data[
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
+
char *rltStr = NULL;
- JsonReturnType ret = json_extract_path_text(&rltStr, op_data[1], 1, op_data[2]);
+ char *jsonStr = new(heap) char[len1+1];
+ char *jsonAttr = new(heap) char[len2+1];
+ strncpy(jsonStr, op_data[1], len1);
+ jsonStr[len1] = '\0';
+ strncpy(jsonAttr, op_data[2], len2);
+ jsonAttr[len2] = '\0';
+ JsonReturnType ret = json_extract_path_text(&rltStr, jsonStr, 1, jsonAttr);
if (ret != JSON_OK)
{
ExRaiseJSONError(heap, diagsArea, ret); | 1 | /*********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: <file>
* Description:
*
*
* Created: 7/10/95
* Language: C++
*
*
*
*
*****************************************************************************
*/
#include "Platform.h"
#include <math.h>
#include <zlib.h>
#include <openssl/md5.h>
#include <openssl/sha.h>
#include "ComSSL.h"
#define MathSqrt(op, err) sqrt(op)
#include <ctype.h>
#include <string.h>
#include <stdio.h>
#include "NLSConversion.h"
#include "nawstring.h"
#include "exp_stdh.h"
#include "exp_clause_derived.h"
#include "exp_function.h"
#include "ComDefs.h"
#include "SQLTypeDefs.h"
#include "exp_datetime.h"
#include "exp_interval.h"
#include "exp_bignum.h"
#include "ComSysUtils.h"
#include "wstr.h"
#include "ComDiags.h"
#include "ComAnsiNamePart.h"
#include "ComSqlId.h"
#include "ex_globals.h"
#include "NAUserId.h"
#include "ComUser.h"
#include "ExpSeqGen.h"
#include "ComJSON.h"
#undef DllImport
#define DllImport __declspec ( dllimport )
#include "rosetta/rosgen.h"
#define ptimez_h_juliantimestamp
#define ptimez_h_including_section
#include "guardian/ptimez.h"
#ifdef ptimez_h_juliantimestamp
Section missing, generate compiler error
#endif
#define ptimez_h_converttimestamp
#define ptimez_h_including_section
#include "guardian/ptimez.h"
#ifdef ptimez_h_converttimestamp
Section missing, generate compiler error
#endif
#define ptimez_h_interprettimestamp
#define ptimez_h_including_section
#include "guardian/ptimez.h"
#ifdef ptimez_h_interprettimestamp
Section missing, generate compiler error
#endif
#define ptimez_h_computetimestamp
#define ptimez_h_including_section
#include "guardian/ptimez.h"
#ifdef ptimez_h_computetimestamp
Section missing, generate compiler error
#endif
#define psecure_h_including_section
#define psecure_h_security_app_priv_
#define psecure_h_security_psb_get_
#define psecure_h_security_ntuser_set_
#include "security/psecure.h"
#ifndef dsecure_h_INCLUDED
#define dsecure_h_INCLUDED
#include "security/dsecure.h"
#endif
#include "security/uid.h"
#include "security/uid.h"
#include "fs/feerrors.h"
extern char * exClauseGetText(OperatorTypeEnum ote);
void setVCLength(char * VCLen, Lng32 VCLenSize, ULng32 value);
static void ExRaiseJSONError(CollHeap* heap, ComDiagsArea** diagsArea, JsonReturnType type);
//#define TOUPPER(c) (((c >= 'a') && (c <= 'z')) ? (c - 32) : c);
//#define TOLOWER(c) (((c >= 'A') && (c <= 'Z')) ? (c + 32) : c);
// -----------------------------------------------------------------------
// There is currently a bug in the tandem include file sys/time.h that
// prevents us to get the definition of gettimeofday from there.
// -----------------------------------------------------------------------
//extern int gettimeofday(struct timeval *, struct timezone *);
ExFunctionAscii::ExFunctionAscii(){};
ExFunctionChar::ExFunctionChar(){};
ExFunctionConvertHex::ExFunctionConvertHex(){};
ExFunctionRepeat::ExFunctionRepeat(){};
ExFunctionReplace::ExFunctionReplace()
{
collation_ = CharInfo::DefaultCollation;
setArgEncodedLen( 0, 0);//initialize the first child encoded length to 0
setArgEncodedLen( 0, 1);//initialize the second child encoded length to 0
};
ex_function_char_length::ex_function_char_length(){};
ex_function_char_length_doublebyte::ex_function_char_length_doublebyte(){};
ex_function_oct_length::ex_function_oct_length(){};
ex_function_position::ex_function_position(){};
ex_function_position_doublebyte::ex_function_position_doublebyte(){};
ex_function_concat::ex_function_concat(){};
ex_function_lower::ex_function_lower(){};
ex_function_upper::ex_function_upper(){};
ex_function_substring::ex_function_substring(){};
ex_function_trim_char::ex_function_trim_char(){};
ExFunctionTokenStr::ExFunctionTokenStr(){};
ExFunctionReverseStr::ExFunctionReverseStr(){};
ex_function_current::ex_function_current(){};
ex_function_unique_execute_id::ex_function_unique_execute_id(){};//Trigger -
ex_function_get_triggers_status::ex_function_get_triggers_status(){};//Trigger -
ex_function_get_bit_value_at::ex_function_get_bit_value_at(){};//Trigger -
ex_function_is_bitwise_and_true::ex_function_is_bitwise_and_true(){};//MV
ex_function_explode_varchar::ex_function_explode_varchar(){};
ex_function_hash::ex_function_hash(){};
ex_function_hivehash::ex_function_hivehash(){};
ExHashComb::ExHashComb(){};
ExHiveHashComb::ExHiveHashComb(){};
ExHDPHash::ExHDPHash(){};
ExHDPHashComb::ExHDPHashComb(){};
ex_function_replace_null::ex_function_replace_null(){};
ex_function_mod::ex_function_mod(){};
ex_function_mask::ex_function_mask(){};
ExFunctionShift::ExFunctionShift(){};
ex_function_converttimestamp::ex_function_converttimestamp(){};
ex_function_dateformat::ex_function_dateformat(){};
ex_function_dayofweek::ex_function_dayofweek(){};
ex_function_extract::ex_function_extract(){};
ex_function_juliantimestamp::ex_function_juliantimestamp(){};
ex_function_exec_count::ex_function_exec_count(){};
ex_function_curr_transid::ex_function_curr_transid(){};
ex_function_ansi_user::ex_function_ansi_user(){};
ex_function_user::ex_function_user(){};
ex_function_nullifzero::ex_function_nullifzero(){};
ex_function_nvl::ex_function_nvl(){};
ex_function_json_object_field_text::ex_function_json_object_field_text(){};
ex_function_queryid_extract::ex_function_queryid_extract(){};
ExFunctionUniqueId::ExFunctionUniqueId(){};
ExFunctionRowNum::ExFunctionRowNum(){};
ExFunctionHbaseColumnLookup::ExFunctionHbaseColumnLookup() {};
ExFunctionHbaseColumnsDisplay::ExFunctionHbaseColumnsDisplay() {};
ExFunctionHbaseColumnCreate::ExFunctionHbaseColumnCreate() {};
ExFunctionCastType::ExFunctionCastType() {};
ExFunctionSequenceValue::ExFunctionSequenceValue() {};
ExFunctionHbaseTimestamp::ExFunctionHbaseTimestamp() {};
ExFunctionHbaseVersion::ExFunctionHbaseVersion() {};
ExFunctionSVariance::ExFunctionSVariance(){};
ExFunctionSStddev::ExFunctionSStddev(){};
ExpRaiseErrorFunction::ExpRaiseErrorFunction(){};
ExFunctionRandomNum::ExFunctionRandomNum(){};
ExFunctionGenericUpdateOutput::ExFunctionGenericUpdateOutput(){}; // MV,
ExFunctionInternalTimestamp::ExFunctionInternalTimestamp(){}; // Triggers
ExFunctionRandomSelection::ExFunctionRandomSelection(){};
ExHash2Distrib::ExHash2Distrib(){};
ExProgDistrib::ExProgDistrib(){};
ExProgDistribKey::ExProgDistribKey(){};
ExPAGroup::ExPAGroup(){};
ExFunctionPack::ExFunctionPack(){};
ExUnPackCol::ExUnPackCol(){};
ExFunctionRangeLookup::ExFunctionRangeLookup(){};
ExFunctionCrc32::ExFunctionCrc32(){};
ExFunctionMd5::ExFunctionMd5(){};
ExFunctionSha::ExFunctionSha(){};
ExFunctionSha2::ExFunctionSha2(){};
ExFunctionIsIP::ExFunctionIsIP(){};
ExFunctionInetAton::ExFunctionInetAton(){};
ExFunctionInetNtoa::ExFunctionInetNtoa(){};
ExFunctionSoundex::ExFunctionSoundex(){};
ExFunctionAESEncrypt::ExFunctionAESEncrypt(){};
ExFunctionAESDecrypt::ExFunctionAESDecrypt(){};
ExFunctionAscii::ExFunctionAscii(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionChar::ExFunctionChar(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionCrc32::ExFunctionCrc32(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionMd5::ExFunctionMd5(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionSha::ExFunctionSha(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionSha2::ExFunctionSha2(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space, Lng32 mode)
: ex_function_clause(oper_type, 2, attr, space), mode(mode)
{
};
ExFunctionIsIP::ExFunctionIsIP(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionInetAton::ExFunctionInetAton(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionInetNtoa::ExFunctionInetNtoa(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionAESEncrypt::ExFunctionAESEncrypt(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space, int args_num, Int32 aes_mode )
: ex_function_clause(oper_type, args_num + 1, attr, space), args_num(args_num), aes_mode(aes_mode)
{
};
ExFunctionAESDecrypt::ExFunctionAESDecrypt(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space, int args_num, Int32 aes_mode)
: ex_function_clause(oper_type, args_num + 1, attr, space), args_num(args_num), aes_mode(aes_mode)
{
};
ExFunctionConvertHex::ExFunctionConvertHex(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionRepeat::ExFunctionRepeat(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExFunctionReplace::ExFunctionReplace(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 4, attr, space)
{
collation_ = CharInfo::DefaultCollation;
//set first and second child encoded length
setArgEncodedLen( 0, 0);//initialize the first child encoded length to 0
setArgEncodedLen( 0, 1);//initialize the second child encoded length to 0
};
ex_function_char_length::ex_function_char_length(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_char_length_doublebyte::ex_function_char_length_doublebyte(
OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_oct_length::ex_function_oct_length(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_position::ex_function_position(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_position_doublebyte::ex_function_position_doublebyte
(
OperatorTypeEnum oper_type,
Attributes ** attr, Space * space
)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_concat::ex_function_concat(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_lower::ex_function_lower(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_upper::ex_function_upper(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_substring::ex_function_substring(OperatorTypeEnum oper_type,
short num_operands,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, num_operands, attr, space)
{
};
ex_function_translate::ex_function_translate(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
Int32 conv_type,
Int16 flags)
: ex_function_clause(oper_type, 2 , attr, space)
{
conv_type_= conv_type;
flags_ = flags;
};
ex_function_trim::ex_function_trim(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
Int32 mode)
: ex_function_clause(oper_type, 3 , attr, space)
{
mode_ = mode;
};
ex_function_trim_char::ex_function_trim_char(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
Int32 mode)
: ex_function_trim(oper_type, attr, space, mode)
{
};
ExFunctionTokenStr::ExFunctionTokenStr(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExFunctionReverseStr::ExFunctionReverseStr(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_current::ex_function_current(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
//++ Triggers -
ex_function_unique_execute_id::ex_function_unique_execute_id(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
//++ Triggers -
ex_function_get_triggers_status::ex_function_get_triggers_status(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
//++ Triggers -
ex_function_get_bit_value_at::ex_function_get_bit_value_at(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
//++ MV
ex_function_is_bitwise_and_true::ex_function_is_bitwise_and_true(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_explode_varchar::ex_function_explode_varchar(OperatorTypeEnum oper_type,
short num_operands,
Attributes ** attr,
Space * space,
NABoolean forInsert)
: ex_function_clause(oper_type, num_operands, attr, space),
forInsert_(forInsert)
{
};
ex_function_hash::ex_function_hash(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_hivehash::ex_function_hivehash(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExHashComb::ExHashComb(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExHiveHashComb::ExHiveHashComb(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExHDPHash::ExHDPHash(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExHDPHashComb::ExHDPHashComb(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_replace_null::ex_function_replace_null(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space)
: ex_function_clause(oper_type, 4, attr, space)
{
};
ex_function_mod::ex_function_mod(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_mask::ex_function_mask(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExFunctionShift::ExFunctionShift(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_bool::ex_function_bool(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ex_function_converttimestamp::ex_function_converttimestamp
( OperatorTypeEnum oper_type
, Attributes ** attr
, Space * space
)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_dateformat::ex_function_dateformat(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
Int32 dateformat)
: ex_function_clause(oper_type, 2 , attr, space), dateformat_(dateformat)
{
};
ex_function_dayofweek::ex_function_dayofweek(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_extract::ex_function_extract(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
rec_datetime_field extractField)
: ex_function_clause(oper_type, 2 , attr, space), extractField_(extractField)
{
};
ex_function_juliantimestamp::ex_function_juliantimestamp
( OperatorTypeEnum oper_type
, Attributes ** attr
, Space * space
)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_exec_count::ex_function_exec_count
( OperatorTypeEnum oper_type
, Attributes ** attr
, Space * space
)
: ex_function_clause(oper_type, 1, attr, space)
{
execCount_ = 0;
};
ex_function_curr_transid::ex_function_curr_transid
( OperatorTypeEnum oper_type
, Attributes ** attr
, Space * space
)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ex_function_ansi_user::ex_function_ansi_user(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ex_function_user::ex_function_user(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_nullifzero::ex_function_nullifzero(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ex_function_nvl::ex_function_nvl(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_json_object_field_text::ex_function_json_object_field_text (OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ex_function_queryid_extract::ex_function_queryid_extract(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 3, attr, space)
{
};
ExFunctionUniqueId::ExFunctionUniqueId(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ExFunctionRowNum::ExFunctionRowNum(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 1, attr, space)
{
};
ExFunctionHbaseColumnLookup::ExFunctionHbaseColumnLookup(OperatorTypeEnum oper_type,
Attributes ** attr,
const char * colName,
Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
strcpy(colName_, colName);
};
ExFunctionHbaseColumnsDisplay::ExFunctionHbaseColumnsDisplay(OperatorTypeEnum oper_type,
Attributes ** attr,
Lng32 numCols,
char * colNames,
Space * space)
: ex_function_clause(oper_type, 2, attr, space),
numCols_(numCols),
colNames_(colNames)
{
};
ExFunctionHbaseColumnCreate::ExFunctionHbaseColumnCreate(OperatorTypeEnum oper_type,
Attributes ** attr,
short numEntries,
short colNameMaxLen,
Int32 colValMaxLen,
short colValVCIndLen,
Space * space)
: ex_function_clause(oper_type, 1, attr, space),
numEntries_(numEntries),
colNameMaxLen_(colNameMaxLen),
colValMaxLen_(colValMaxLen),
colValVCIndLen_(colValVCIndLen)
{
};
ExFunctionSequenceValue::ExFunctionSequenceValue(OperatorTypeEnum oper_type,
Attributes ** attr,
const SequenceGeneratorAttributes &sga,
Space * space)
: ex_function_clause(oper_type, 1, attr, space),
sga_(sga),
flags_(0)
{
};
ExFunctionHbaseTimestamp::ExFunctionHbaseTimestamp(
OperatorTypeEnum oper_type,
Attributes ** attr,
Lng32 colIndex,
Space * space)
: ex_function_clause(oper_type, 2, attr, space),
colIndex_(colIndex),
flags_(0)
{
};
ExFunctionHbaseVersion::ExFunctionHbaseVersion(
OperatorTypeEnum oper_type,
Attributes ** attr,
Lng32 colIndex,
Space * space)
: ex_function_clause(oper_type, 2, attr, space),
colIndex_(colIndex),
flags_(0)
{
};
ExFunctionCastType::ExFunctionCastType(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
ExFunctionSVariance::ExFunctionSVariance(Attributes **attr, Space *space)
: ex_function_clause(ITM_VARIANCE, 4, attr, space)
{
};
ExFunctionSStddev::ExFunctionSStddev(Attributes **attr, Space *space)
: ex_function_clause(ITM_STDDEV, 4, attr, space)
{
};
ExpRaiseErrorFunction::ExpRaiseErrorFunction (Attributes **attr,
Space *space,
Lng32 sqlCode,
NABoolean raiseError,
const char *constraintName,
const char *tableName,
const NABoolean hasStringExp) // -- Triggers
: ex_function_clause (ITM_RAISE_ERROR, (hasStringExp ? 2 : 1), attr, space),
theSQLCODE_(sqlCode),
constraintName_((char *)constraintName),
tableName_((char *)tableName)
{
setRaiseError(raiseError);
};
ExFunctionRandomNum::ExFunctionRandomNum(OperatorTypeEnum opType,
short num_operands,
NABoolean simpleRandom,
Attributes **attr,
Space *space)
: ex_function_clause(opType, num_operands, attr, space),
flags_(0)
{
seed_ = 0;
if (simpleRandom)
flags_ |= SIMPLE_RANDOM;
}
// MV,
ExFunctionGenericUpdateOutput::ExFunctionGenericUpdateOutput(OperatorTypeEnum oper_type,
Attributes **attr,
Space *space)
: ex_function_clause(oper_type, 1, attr, space)
{}
// Triggers
ExFunctionInternalTimestamp::ExFunctionInternalTimestamp(OperatorTypeEnum oper_type,
Attributes **attr,
Space *space)
: ex_function_clause(oper_type, 1, attr, space)
{}
ExFunctionSoundex::ExFunctionSoundex(OperatorTypeEnum oper_type,
Attributes ** attr, Space * space)
: ex_function_clause(oper_type, 2, attr, space)
{
};
// Triggers
ex_expr::exp_return_type ex_function_get_bit_value_at::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 buffLen = getOperand(1)->getLength(op_data[1]);
// Get the position from operand 2.
Lng32 pos = *(Lng32 *)op_data[2];
// The character we look into
Lng32 charnum = pos / 8;
// The bit in the character we look into
Lng32 bitnum = 8-(pos % 8)-1;
// Check for error conditions.
if ((charnum >= buffLen) || (charnum < 0))
{
ExRaiseSqlError(heap, diagsArea, EXE_GETBIT_ERROR);
return ex_expr::EXPR_ERROR;
}
unsigned char onechar = *(unsigned char *)(op_data[1] + charnum);
unsigned char mask = 1;
mask = mask<<bitnum;
*((ULng32*)op_data[0]) = (ULng32) (mask & onechar ? 1 : 0);
return ex_expr::EXPR_OK;
}
;
//++ MV
// The function returns True if any of the bits is set in both of the strings
ex_expr::exp_return_type ex_function_is_bitwise_and_true::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 leftSize = getOperand(1)->getLength(op_data[1]);
Lng32 rightSize = getOperand(2)->getLength(op_data[2]);
if (leftSize != rightSize)
{
ExRaiseSqlError(heap, diagsArea, EXE_IS_BITWISE_AND_ERROR);
return ex_expr::EXPR_ERROR;
}
// Iterate through all characters until one "bitwise and" returns TRUE
// Starting with False
*(Lng32 *)op_data[0] = 0;
unsigned char *leftCharPtr = (unsigned char *)(op_data[1]);
unsigned char *rightCharPtr = (unsigned char *)(op_data[2]);
unsigned char *endBarrier = rightCharPtr + rightSize;
for (; rightCharPtr < endBarrier; rightCharPtr++, leftCharPtr++)
{
if ((*leftCharPtr) & (*rightCharPtr))
{
*(Lng32 *)op_data[0] = 1;
break;
}
}
return ex_expr::EXPR_OK;
}
ExFunctionRandomSelection::ExFunctionRandomSelection(OperatorTypeEnum opType,
Attributes **attr,
Space *space,
float selProb)
: ExFunctionRandomNum(opType, 1, FALSE, attr, space)
{
if (selProb < 0)
selProb = 0.0;
selProbability_ = selProb;
difference_ = -1;
}
ExHash2Distrib::ExHash2Distrib(Attributes **attr, Space *space)
: ex_function_clause(ITM_HASH2_DISTRIB, 3, attr, space)
{}
ExProgDistrib::ExProgDistrib(Attributes **attr, Space *space)
: ex_function_clause(ITM_PROGDISTRIB, 3, attr, space)
{}
ExProgDistribKey::ExProgDistribKey(Attributes **attr, Space *space)
: ex_function_clause(ITM_PROGDISTRIBKEY, 4, attr, space)
{}
ExPAGroup::ExPAGroup(Attributes **attr, Space *space)
: ex_function_clause(ITM_PAGROUP, 4, attr, space)
{}
ExUnPackCol::ExUnPackCol(Attributes **attr,
Space *space,
Lng32 width,
Lng32 base,
NABoolean nullsPresent)
: width_(width),
base_(base),
ex_function_clause(ITM_UNPACKCOL, 3, attr, space)
{
setNullsPresent(nullsPresent);
};
ExFunctionRangeLookup::ExFunctionRangeLookup(Attributes** attr,
Space* space,
Lng32 numParts,
Lng32 partKeyLen)
: ex_function_clause(ITM_RANGE_LOOKUP, 3, attr, space),
numParts_(numParts),
partKeyLen_(partKeyLen)
{
}
ex_expr::exp_return_type ex_function_concat::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
Lng32 max_len = getOperand(0)->getLength();
if ((len1 + len2) > max_len) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Int32 actual_length = len1+len2;
// If operand 0 is varchar, store the sum of operand 1 length and
// operand 2 length in the varlen area.
getOperand(0)->setVarLength((actual_length), op_data[-MAX_OPERANDS]);
// Now, copy the contents of operand 1 followed by the contents of
// operand 2 into operand 0.
str_cpy_all(op_data[0], op_data[1], len1);
str_cpy_all(&op_data[0][len1], op_data[2], len2);
//
// Blankpad the target (if needed).
//
if ((actual_length) < max_len)
str_pad(&op_data[0][actual_length], max_len - actual_length, ' ');
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionRepeat::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 repeatCount = *(Lng32 *)op_data[2];
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
Lng32 resultMaxLen = getOperand(0)->getLength();
if ((repeatCount < 0) || ((repeatCount * len1) > resultMaxLen))
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Lng32 currPos = 0;
for (Int32 i = 0; i < repeatCount; i++)
{
str_cpy_all(&op_data[0][currPos], op_data[1], len1);
currPos += len1;
}
// If operand 0 is varchar, store the length.
getOperand(0)->setVarLength(currPos, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionReplace::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
// Note: all lengths are byte lengths.
// source string
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
char * str1 = op_data[1];
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( str1, prec1, len1, cs );
}
// if caseinsensitive search is to be done, make a copy of the source
// string and upshift it. This string will be used to do the search.
// The original string will be used to replace.
char * searchStr1 = str1;
if ((caseInsensitiveOperation()) && (heap) && (str1))
{
searchStr1 = new(heap) char[len1];
str_cpy_convert(searchStr1, str1, len1, 1);
}
// string to search for in string1
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
char * str2 = op_data[2];
// string to replace string2 with in string1
Lng32 len3 = getOperand(3)->getLength(op_data[-MAX_OPERANDS+3]);
char * str3 = op_data[3];
if ( cs == CharInfo::UTF8 )
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( str2, prec2, len2, cs );
Int32 prec3 = ((SimpleType *)getOperand(3))->getPrecision();
len3 = Attributes::trimFillerSpaces( str3, prec3, len3, cs );
}
Lng32 resultMaxLen = getOperand(0)->getLength();
char * result = op_data[0];
char * sourceStr = searchStr1;
char * searchStr = str2;
Int32 lenSourceStr = len1; //getArgEncodedLen(0);
Int32 lenSearchStr = len2; //getArgEncodedLen(1);
Int32 effLenSourceStr = len1; //getArgEncodedLen(0);
Int32 effLenSearchStr = len2; //getArgEncodedLen(1);
Int16 nPasses = 1;
if (CollationInfo::isSystemCollation(getCollation()))
{
nPasses= CollationInfo::getCollationNPasses(getCollation());
lenSourceStr = getArgEncodedLen(0);
lenSearchStr = getArgEncodedLen(1);
assert (heap);
sourceStr = new(heap) char [lenSourceStr];
ex_function_encode::encodeCollationSearchKey(
(UInt8 *) str1,
len1,
(UInt8 *) sourceStr,
lenSourceStr,
(Int32 &) effLenSourceStr,
nPasses,
getCollation(),
TRUE);
searchStr = new(heap) char [lenSearchStr];
ex_function_encode::encodeCollationSearchKey(
(UInt8 *) str2,
len2,
(UInt8 *) searchStr,
lenSearchStr,
(Int32 &)effLenSearchStr,
nPasses,
getCollation(),
TRUE);
}
short bpc = (getOperand(1)->widechar() ? 2 : 1);
NABoolean done = FALSE;
Lng32 position;
Lng32 currPosStr1 = 0;
Lng32 currLenStr1 = len1;
Lng32 currPosResult = 0;
Lng32 currLenResult = 0;
while (! done)
{
position =
ex_function_position::findPosition(&sourceStr[currPosStr1 * nPasses],
currLenStr1 * nPasses,
searchStr,
effLenSearchStr,
bpc,
nPasses,
getCollation(),
0,
cs);
if(position < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("REPLACE FUNCTION");
return ex_expr::EXPR_ERROR;
}
if (position > 0)
{
position = position - 1;
// copy part of str1 from currPosStr1 till position into result
if ((currLenResult + position) > resultMaxLen) {
if (sourceStr && sourceStr != str1)
NADELETEBASIC(sourceStr,(heap));
if (searchStr && searchStr != str2)
NADELETEBASIC(searchStr,(heap));
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (position > 0)
{
str_cpy_all(&result[currPosResult], &str1[currPosStr1],
position);
}
currPosResult += position;
currLenResult += position;
currPosStr1 += (position + len2) ;
currLenStr1 -= (position + len2) ;
// now copy str3 to result. This is the replacement.
if ((currLenResult + len3) > resultMaxLen) {
if (sourceStr && sourceStr != str1)
NADELETEBASIC(sourceStr,(heap));
if (searchStr && searchStr != str2)
NADELETEBASIC(searchStr,(heap));
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
str_cpy_all(&result[currPosResult], str3, len3);
currLenResult += len3;
currPosResult += len3;
}
else
{
done = TRUE;
if ((currLenResult + currLenStr1) > resultMaxLen) {
if (sourceStr && sourceStr != str1)
NADELETEBASIC(sourceStr,(heap));
if (searchStr && searchStr != str2)
NADELETEBASIC(searchStr,(heap));
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (currLenStr1 > 0)
str_cpy_all(&result[currPosResult], &str1[currPosStr1], currLenStr1);
currLenResult += currLenStr1;
}
}
// If operand 0 is varchar, store the length.
getOperand(0)->setVarLength(currLenResult, op_data[-MAX_OPERANDS]);
if (sourceStr && sourceStr != str1)
NADELETEBASIC(sourceStr,(heap));
if (searchStr && searchStr != str2)
NADELETEBASIC(searchStr,(heap));
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_substring::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int32 len1_bytes = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
// Get the starting position in characters from operand 2.
// This may be a negative value!
Int32 specifiedCharStartPos = *(Lng32 *)op_data[2];
// Starting position in bytes. It can NOT be a negative value.
Int32 startByteOffset = 0; // Assume beginning of buffer for now.
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
// Convert number of character to offset in buffer.
if(specifiedCharStartPos > 1)
{
startByteOffset = Attributes::convertCharToOffset(op_data[1], specifiedCharStartPos, len1_bytes, cs);
if(startByteOffset < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("SUBSTRING FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
else { /* Leave startByteOffset at 0 */ }
// If operand 3 exists, get the length of substring in characters from operand
// 3. Otherwise, if specifiedCharStartPos > 0, length is from specifiedCharStartPos char to end of buf.
// If specifiedCharStartPos is 0, length is all of buf except last character.
// If specifiedCharStartPos is negative, length is even less (by that negative amount).
Int32 inputLen_bytes = len1_bytes ; // Assume byte count = length of string for now
Int32 specifiedLenInChars = inputLen_bytes ; // Assume char count = byte count for now
Int32 prec1 = 0;
if (getNumOperands() == 4)
specifiedLenInChars = *(Lng32 *)op_data[3]; // Use specified desired length for now
if ( cs == CharInfo::UTF8 )
{
prec1 = ((SimpleType *)getOperand(1))->getPrecision();
if ( prec1 )
inputLen_bytes = Attributes::trimFillerSpaces( op_data[1], prec1, inputLen_bytes, cs );
}
// NOTE: Following formula for lastChar works even if specifiedCharStartPos is 0 or negative.
Int32 lastChar = specifiedLenInChars + (specifiedCharStartPos - 1);
// The end of the substr as a byte offset
Int32 endOff_bytes = inputLen_bytes; // Assume length of input for now.
Int32 actualLenInBytes = 0;
if ( startByteOffset >= inputLen_bytes )
{
// Nothing left in buf to copy, so endOff_bytes and actualLenInBytes are OK as is.
startByteOffset = inputLen_bytes; // IGNORE it if specified start > end of buffer!
;
}
else if (lastChar > 0)
{
endOff_bytes = Attributes::convertCharToOffset (op_data[1], lastChar+1, inputLen_bytes, cs);
if(endOff_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("SUBSTRING FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
else endOff_bytes = 0;
// Check for error conditions. endOff_bytes will be less than startByteOffset if length is
// less than 0.
if (endOff_bytes < startByteOffset)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_SUBSTRING_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
actualLenInBytes = endOff_bytes - startByteOffset;
// Now, copy the substring of operand 1 from the starting position into
// operand 0, if actualLenInBytes is greater than 0.
if ( actualLenInBytes > 0)
str_cpy_all(op_data[0], &op_data[1][startByteOffset], actualLenInBytes);
//
// Blankpad the target (if needed).
//
Int32 len0_bytes = getOperand(0)->getLength();
if ( (actualLenInBytes < len0_bytes) && prec1 )
str_pad(&op_data[0][actualLenInBytes], len0_bytes - actualLenInBytes, ' ');
// store the length of substring in the varlen indicator.
if (getOperand(0)->getVCIndicatorLength() > 0)
getOperand(0)->setVarLength( actualLenInBytes, op_data[-MAX_OPERANDS] );
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_trim_char::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
const Int32 lenSrcStrSmallBuf = 128;
char srcStrSmallBuf[lenSrcStrSmallBuf];
const Int32 lenTrimCharSmallBuf = 8;
char trimCharSmallBuf[lenTrimCharSmallBuf];
// find out the length of trim character.
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
Int32 number_bytes = 0;
number_bytes = Attributes::getFirstCharLength(op_data[1], len1, cs);
if(number_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("TRIM FUNCTION");
return ex_expr::EXPR_ERROR;
}
// len1 (length of trim character) must be 1 character. Raise an exception if greater
// than 1.
if (len1 != number_bytes)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_TRIM_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
if (cs == CharInfo::UTF8) // If so, must ignore any filler spaces at end of string
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
Int16 nPasses = 1;
char * trimChar = op_data[1];
char * srcStr = op_data[2];
Lng32 lenSrcStr = len2;
Lng32 lenTrimChar = len1;
Lng32 effLenSourceStr = len2;
Lng32 effLenTrimChar = len1;
// case of collation --
if (CollationInfo::isSystemCollation(getCollation()))
{
nPasses = CollationInfo::getCollationNPasses(getCollation());
//get the length of the encoded source string
lenSrcStr = getSrcStrEncodedLength();
//get length of encoded trim character
lenTrimChar = getTrimCharEncodedLength();
assert (heap);
if (lenSrcStr <= lenSrcStrSmallBuf)
{
srcStr = srcStrSmallBuf;
}
else
{
srcStr = new(heap) char [lenSrcStr];
}
//get encoded key
ex_function_encode::encodeCollationSearchKey(
(UInt8 *) op_data[2],
len2,
(UInt8 *) srcStr,
lenSrcStr,
(Int32 &) effLenSourceStr,
nPasses,
getCollation(),
FALSE);
if (lenTrimChar <= lenTrimCharSmallBuf)
{
trimChar = trimCharSmallBuf;
}
else
{
trimChar = new(heap) char [lenTrimChar];
}
//get encoded key
ex_function_encode::encodeCollationSearchKey(
(UInt8 *) op_data[1],
len1,
(UInt8 *) trimChar,
lenTrimChar,
(Int32 &) effLenTrimChar,
nPasses,
getCollation(),
FALSE);
}
// Find how many leading characters in operand 2 correspond to the trim
// character.
Lng32 len0 = len2;
Lng32 start = 0;
NABoolean notEqualFlag = 0;
if ((getTrimMode() == 1) || (getTrimMode() == 2))
{
while (start <= len2 - len1)
{
for(Int32 i= 0; i < lenTrimChar; i++)
{
if(trimChar[i] != srcStr[start * nPasses +i])
{
notEqualFlag = 1;
break;
}
}
if (notEqualFlag == 0)
{
start += len1;
len0 -= len1;
}
else
break;
}
}
// Find how many trailing characters in operand 2 correspond to the trim
// character.
Int32 end = len2;
Int32 endt;
Int32 numberOfCharacterInBuf;
Int32 bufferLength = end - start;
const Int32 smallBufSize = 128;
char smallBuf[smallBufSize];
notEqualFlag = 0;
if ((getTrimMode() == 0) || (getTrimMode() == 2))
{
char *charLengthInBuf;
if(bufferLength <= smallBufSize)
charLengthInBuf = smallBuf;
else
charLengthInBuf = new(heap) char[bufferLength];
numberOfCharacterInBuf =
Attributes::getCharLengthInBuf(op_data[2] + start,
op_data[2] + end, charLengthInBuf, cs);
if(numberOfCharacterInBuf < 0)
{
if (srcStr && srcStr != op_data[2])
NADELETEBASIC(srcStr,(heap));
if (trimChar && trimChar != op_data[1])
NADELETEBASIC(trimChar,(heap));
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("TRIM FUNCTION");
return ex_expr::EXPR_ERROR;
}
while (end >= start + len1)
{
if (charLengthInBuf[--numberOfCharacterInBuf] != len1) break;
endt = end - len1;
for(Int32 i = 0; i < lenTrimChar; i++)
{
if (trimChar[i] != srcStr[endt *nPasses + i])
{
notEqualFlag = 1;
break;
}
}
if(notEqualFlag == 0)
{
end = endt;
len0 -= len1;
}
else
break;
}
if(bufferLength > smallBufSize)
NADELETEBASIC(charLengthInBuf, heap);
}
// Result is always a varchar.
// store the length of trimmed string in the varlen indicator.
getOperand(0)->setVarLength(len0, op_data[-MAX_OPERANDS]);
// Now, copy operand 2 skipping the trim characters into
// operand 0.
if (len0 > 0)
str_cpy_all(op_data[0], &op_data[2][start], len0);
if (srcStr && srcStr != srcStrSmallBuf && srcStr != op_data[2] )
NADELETEBASIC(srcStr,(heap));
if (trimChar && trimChar != trimCharSmallBuf && trimChar != op_data[1])
NADELETEBASIC(trimChar,(heap));
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_lower::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
Int32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
getOperand(0)->setVarLength(len1, op_data[-MAX_OPERANDS]);
cnv_charset charset = convertCharsetEnum(cs);
Int32 number_bytes;
Int32 total_bytes_out = 0;
char tmpBuf[4];
UInt32 UCS4value;
UInt16 UCS2value;
// Now, copy the contents of operand 1 after the case change into operand 0.
Int32 len0 = 0;
if(cs == CharInfo::ISO88591)
{
while (len0 < len1)
{
op_data[0][len0] = TOLOWER(op_data[1][len0]);
++len0;
}
}
else
{
// If character set is UTF8 or SJIS or ?, convert the string to UCS2,
// call UCS2 lower function and convert the string back.
while (len0 < len1)
{
number_bytes =
LocaleCharToUCS4(op_data[1] + len0, len1 - len0, &UCS4value, charset);
if(number_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("LOWER FUNCTION");
return ex_expr::EXPR_ERROR;
}
if(number_bytes == 1 && (op_data[1][len0] & 0x80) == 0)
{
op_data[0][len0] = TOLOWER(op_data[1][len0]);
++len0;
++total_bytes_out;
}
else
{
UCS2value = UCS4value & 0XFFFF;
UCS4value = unicode_char_set::to_lower(*(NAWchar *)&UCS2value);
Int32 number_bytes_out =
UCS4ToLocaleChar((const UInt32 *)&UCS4value, tmpBuf,
CharInfo::maxBytesPerChar(cs), charset);
if(number_bytes_out < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("LOWER FUNCTION");
return ex_expr::EXPR_ERROR;
}
for (Int32 j = 0; j < number_bytes_out; j++)
{
op_data[0][total_bytes_out] = tmpBuf[j];
total_bytes_out++;
}
len0 += number_bytes;
}
}
}
if (getOperand(0)->getVCIndicatorLength() > 0)
getOperand(0)->setVarLength(total_bytes_out, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_upper::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
Int32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Int32 len0 = getOperand(0)->getLength();
Int32 in_pos = 0;
Int32 out_pos = 0;
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
Int32 number_bytes;
UInt32 UCS4value = 0;
UInt16 UCS2value = 0;
// Now, copy the contents of operand 1 after the case change into operand 0.
if(cs == CharInfo::ISO88591)
{
while(in_pos < len1)
{
op_data[0][out_pos] = TOUPPER(op_data[1][in_pos]);
++in_pos;
++out_pos;
}
}
else
{
cnv_charset charset = convertCharsetEnum(cs);
// If character set is UTF8 or SJIS or ?, convert the string to UCS2,
// call UCS2 upper function and convert the string back.
while(in_pos < len1)
{
number_bytes =
LocaleCharToUCS4(op_data[1] + in_pos, len1 - in_pos, &UCS4value, charset);
if(number_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("UPPER FUNCTION");
return ex_expr::EXPR_ERROR;
}
if(number_bytes == 1 && (op_data[1][in_pos] & 0x80) == 0)
{
op_data[0][out_pos] = TOUPPER(op_data[1][in_pos]);
++in_pos;
++out_pos;
}
else
{
in_pos += number_bytes;
UCS2value = UCS4value & 0XFFFF;
NAWchar wcUpshift[3];
Int32 charCnt = 1; // Default count to 1
// search against unicode_lower2upper_mapping_table_full
NAWchar* tmpWCP = unicode_char_set::to_upper_full(UCS2value);
if ( tmpWCP )
{
wcUpshift[0] = *tmpWCP++;
wcUpshift[1] = *tmpWCP++;
wcUpshift[2] = *tmpWCP ;
charCnt = (*tmpWCP) ? 3 : 2;
}
else
wcUpshift[0] = unicode_char_set::to_upper(UCS2value);
for (Int32 ii = 0 ; ii < charCnt ; ii++)
{
UInt32 UCS4_val = wcUpshift[ii];
char tmpBuf[8];
Int32 out_bytes = UCS4ToLocaleChar((const UInt32 *)&UCS4_val, tmpBuf,
CharInfo::maxBytesPerChar(cs), charset);
if(out_bytes < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("UPPER FUNCTION");
return ex_expr::EXPR_ERROR;
}
if (out_pos + out_bytes > len0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
for (Int32 j = 0; j < out_bytes; j++)
{
op_data[0][out_pos] = tmpBuf[j];
++out_pos;
}
}
}
}
}
getOperand(0)->setVarLength(out_pos, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_oct_length::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
// Move operand's length into result.
// The data type of result is long.
Int32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
*(Lng32 *)op_data[0] = len1;
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionAscii::eval(char *op_data[],CollHeap* heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
Int32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
if (len1 > 0)
{
switch (getOperType() )
{
case ITM_UNICODE_CODE_VALUE:
{
UInt16 temp;
str_cpy_all((char *)&temp, op_data[1], 2);
*(Lng32 *)op_data[0] = temp;
}
break;
case ITM_NCHAR_MP_CODE_VALUE:
{
UInt16 temp;
#if defined( NA_LITTLE_ENDIAN )
// swap the byte order on little-endian machines as NCHAR_MP charsets are stored
// in multi-byte form (i.e. in big-endian order).
temp = reversebytesUS( *((NAWchar*) op_data[1]) );
#else
str_cpy_all((char *)&temp, op_data[1], 2);
#endif
*(UInt32 *)op_data[0] = temp;
}
break;
case ITM_ASCII:
{
Int32 val = (unsigned char)(op_data[1][0]);
if ( (val > 0x7F) && (cs != CharInfo::ISO88591) )
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("ASCII");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
*(UInt32 *)op_data[0] = (unsigned char)(op_data[1][0]);
break;
}
case ITM_CODE_VALUE:
default:
{
UInt32 UCS4value = 0;
if ( cs == CharInfo::ISO88591 )
UCS4value = *(unsigned char *)(op_data[1]);
else
{
UInt32 firstCharLen =
LocaleCharToUCS4(op_data[1], len1, &UCS4value, convertCharsetEnum(cs));
if( firstCharLen < 0 )
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("CODE_VALUE FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
*(Int32 *)op_data[0] = UCS4value;
break;
}
}
}
else
*(Int32 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionChar::eval(char *op_data[],CollHeap* heap,
ComDiagsArea** diagsArea)
{
UInt32 asciiCode = *(Lng32 *)op_data[1];
Int32 charLength = 1;
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
if (getOperType() == ITM_CHAR)
{
if (cs == CharInfo::ISO88591)
{
if (asciiCode < 0 || asciiCode > 0xFF)
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("CHAR");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
else
{
op_data[0][0] = (char)asciiCode;
getOperand(0)->setVarLength(1, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
}
else // Must be UTF8 (at least until we support SJIS or some other multi-byte charset)
{
Int32 len0_bytes = getOperand(0)->getLength();
ULng32 * UCS4ptr = (ULng32 *)op_data[1];
Int32 charLength = UCS4ToLocaleChar( UCS4ptr, (char *)op_data[0], len0_bytes, cnv_UTF8 );
if ( charLength < 0 )
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("CHAR FUNCTION");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
else
{
if ( charLength < len0_bytes )
str_pad(((char *)op_data[0]) + charLength, len0_bytes - charLength, ' ');
getOperand(0)->setVarLength(charLength, op_data[-MAX_OPERANDS]);
}
}
}
else
{
// ITM_UNICODE_CHAR or ITM_NCHAR_MP_CHAR
// check if the code value is legal for UNICODE only. No need
// for KANJI/KSC5601 as both take code-point values with any bit-patterns.
if ( (getOperType() == ITM_UNICODE_CHAR) && (asciiCode < 0 || asciiCode >= 0xFFFE))
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("CHAR");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
NAWchar wcharCode = (NAWchar)asciiCode;
#if defined( NA_LITTLE_ENDIAN )
// swap the byte order on little-endian machines as NCHAR_MP charsets are stored
// in multi-byte form (i.e. in big-endian order).
if (getOperType() == ITM_NCHAR_MP_CHAR)
{
*(NAWchar*)op_data[0] = reversebytesUS(wcharCode);
} else
*(NAWchar*)op_data[0] = wcharCode;
#else
*(NAWchar*)op_data[0] = wcharCode;
#endif
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_char_length::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
Int32 offset = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Int32 numOfChar = 0;
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if(CharInfo::maxBytesPerChar(cs) == 1)
{
*(Int32 *)op_data[0] = offset;
return ex_expr::EXPR_OK;
}
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
offset = Attributes::trimFillerSpaces( op_data[1], prec1, offset, cs );
}
// convert to number of character
numOfChar = Attributes::convertOffsetToChar (op_data[1], offset, cs);
if(numOfChar < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("CHAR FUNCTION");
return ex_expr::EXPR_ERROR;
}
// Move operand's length into result.
// The data type of result is long.
*(Int32 *)op_data[0] = numOfChar;
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionConvertHex::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
static const char HexArray[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'};
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
if (getOperType() == ITM_CONVERTTOHEX)
{
Int32 i;
if ( DFS2REC::isDoubleCharacter(getOperand(1)->getDatatype()) )
{
NAWchar *w_p = (NAWchar*)op_data[1];
Int32 w_len = len1 / sizeof(NAWchar);
for (i = 0; i < w_len; i++)
{
op_data[0][4 * i ] = HexArray[0x0F & w_p[i] >> 12];
op_data[0][4 * i + 1] = HexArray[0x0F & w_p[i] >> 8];
op_data[0][4 * i + 2] = HexArray[0x0F & w_p[i] >> 4];
op_data[0][4 * i + 3] = HexArray[0x0F & w_p[i]];
}
} else {
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
for (i = 0; i < len1; i++)
{
op_data[0][2 * i] = HexArray[0x0F & op_data[1][i] >> 4];
op_data[0][2 * i + 1] = HexArray[0x0F & op_data[1][i]];
}
}
getOperand(0)->setVarLength(2 * len1, op_data[-MAX_OPERANDS]);
}
else
{
// convert from hex.
// make sure that length is an even number.
if ((len1 % 2) != 0)
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("CONVERTFROMHEX");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
Int32 i = 0;
Int32 j = 0;
while (i < len1)
{
if (((op_data[1][i] >= '0') && (op_data[1][i] <= '9')) ||
((op_data[1][i] >= 'A') && (op_data[1][i] <= 'F')) &&
(((op_data[1][i+1] >= '0') && (op_data[1][i+1] <= '9')) ||
((op_data[1][i+1] >= 'A') && (op_data[1][i+1] <= 'F'))))
{
unsigned char upper4Bits;
unsigned char lower4Bits;
if ((op_data[1][i] >= '0') && (op_data[1][i] <= '9'))
upper4Bits = (unsigned char)(op_data[1][i]) - '0';
else
upper4Bits = (unsigned char)(op_data[1][i]) - 'A' + 10;
if ((op_data[1][i+1] >= '0') && (op_data[1][i+1] <= '9'))
lower4Bits = (unsigned char)(op_data[1][i+1]) - '0';
else
lower4Bits = (unsigned char)(op_data[1][i+1]) - 'A' + 10;
op_data[0][j] = (upper4Bits << 4) | lower4Bits;
i += 2;
j++;
}
else
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("CONVERTFROMHEX");
if (derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
} // while
getOperand(0)->setVarLength(len1 / 2, op_data[-MAX_OPERANDS]);
} // CONVERTFROMHEX
return ex_expr::EXPR_OK;
}
Int32 ex_function_position::findPosition (char* pat,
Int32 patLen,
char* src,
Int32 srcLen,
NABoolean patternInFront)
{
Int32 i, j, k;
// Pattern must be able to "fit" in source string
if (patLen > srcLen)
return 0;
// One time check at beginning of src string if flag indicate so.
if (patternInFront)
return ((str_cmp(pat, src, patLen) == 0) ? 1 : 0);
// Search for pattern throughout the src string
for (i=0; (i + patLen) <= srcLen; i++) {
NABoolean found = TRUE ;
for (j=i, k=0; found && (k < patLen); k++, j++) {
if (src[j] != pat[k])
found = 0;
}
if (found)
return i+1;
}
return 0;
}
Lng32 ex_function_position::findPosition
(char * sourceStr,
Lng32 sourceLen,
char * searchStr,
Lng32 searchLen,
short bytesPerChar,
Int16 nPasses,
CharInfo::Collation collation,
short charOffsetFlag , // 1: char, 0: offset
CharInfo::CharSet cs )
{
// If searchLen is <= 0 or searchLen > sourceLen or
// if searchStr is not present in sourceStr,
// return a position of 0;
// otherwise return the position of searchStr in
// sourceStr.
if (searchLen <= 0)
return 0;
Int32 position = 1;
Int32 collPosition = 1;
Int32 char_count = 1;
Int32 number_bytes;
while (position + searchLen -1 <= sourceLen)
{
if (str_cmp(searchStr, &sourceStr[position-1], (Int32)searchLen) != 0)
if (CollationInfo::isSystemCollation(collation))
{
position += nPasses;
collPosition ++;
}
else
{
number_bytes = Attributes::getFirstCharLength
(&sourceStr[position-1], sourceLen - position + 1, cs);
if(number_bytes <= 0)
return (Lng32)-1;
++char_count;
position += number_bytes;
}
else
{
if (CollationInfo::isSystemCollation(collation))
{
return collPosition;
}
else
{
if(charOffsetFlag)
return char_count;
else
return position;
}
}
}
return 0;
}
ex_expr::exp_return_type
ex_function_char_length_doublebyte::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
// Move operand's length into result.
// The data type of result is long.
*(Lng32 *)op_data[0] =
(getOperand(1)->getLength(op_data[-MAX_OPERANDS+1])) >> 1;
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_position::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
// search for operand 1
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
// in operand 2
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
if ( cs == CharInfo::UTF8 )
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
// If len1 is 0, return a position of 1.
Lng32 position;
if (len1 > 0)
{
short nPasses = CollationInfo::getCollationNPasses(getCollation());
position = findPosition(op_data[2],
len2,
op_data[1],
len1,
1,
nPasses,
getCollation(),
1,
cs);
if(position < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("POSITION FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
else
{
// if len1 <= 0, return position of 1.
position = 1;
}
// Now copy the position into result which is a long.
*(Int32 *)op_data[0] = position;
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_position_doublebyte::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Lng32 len1 = ( getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]) ) / sizeof(NAWchar);
// If len1 is 0, return a position of 1.
Lng32 position = 1;
if (len1 > 0)
{
Lng32 len2 = ( getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]) ) / sizeof(NAWchar);
NAWchar* pat = (NAWchar*)op_data[1];
NAWchar* source = (NAWchar*)op_data[2];
// If len1 > len2 or if operand 1 is not present in operand 2, return
// a position of 0; otherwise return the position of operand 1 in
// operand 2.
short found = 0;
while (position+len1-1 <= len2 && !found)
{
if (wc_str_cmp(pat, &source[position-1], (Int32)len1))
position++;
else
found = 1;
}
if (!found) position = 0;
}
// Now copy the position into result which is a long.
*(Lng32 *)op_data[0] = position;
return ex_expr::EXPR_OK;
};
static Lng32 findTokenPosition(char * sourceStr, Lng32 sourceLen,
char * searchStr, Lng32 searchLen,
short bytesPerChar)
{
// If searchLen is <= 0 or searchLen > sourceLen or
// if searchStr is not present in sourceStr,
// return a position of 0;
// otherwise return the position of searchStr in
// sourceStr.
Lng32 position = 0;
if (searchLen <= 0)
position = 0;
else
{
NABoolean found = FALSE;
position = 1;
while (position+searchLen-1 <= sourceLen && !found)
{
if (str_cmp(searchStr, &sourceStr[position-1], (Int32)searchLen) != 0)
position += bytesPerChar;
else
found = 1;
}
if (!found) position = 0;
}
return position;
}
ex_expr::exp_return_type ExFunctionTokenStr::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
// search for operand 1
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
// in operand 2
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
if ( cs == CharInfo::UTF8 )
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
Lng32 position;
position = findTokenPosition(op_data[2], len2, op_data[1], len1, 1);
if (position <= 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Lng32 startPos = position + len1 - 1;
Lng32 i;
if (op_data[2][startPos] == '\'')
{
// find the ending single quote.
startPos++;
i = startPos;
while ((i < len2) &&
(op_data[2][i] != '\''))
i++;
}
else
{
// find the ending space character
// startPos++;
i = startPos;
while ((i < len2) &&
(op_data[2][i] != ' '))
i++;
}
/* if (op_data[2][startPos] != '\'')
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (i == len2)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
*/
str_cpy_all(op_data[0], &op_data[2][startPos], (i - startPos));
if ((i - startPos) <= 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
// If result is a varchar, store the length of substring
// in the varlen indicator.
if (getOperand(0)->getVCIndicatorLength() > 0)
getOperand(0)->setVarLength(i - startPos, op_data[-MAX_OPERANDS]);
else
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ExFunctionReverseStr::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
char * tgt = op_data[0];
char * src = op_data[1];
Lng32 srcPos = 0;
Lng32 tgtPos = 0;
if (cs == CharInfo::ISO88591)
{
tgtPos = len1 - 1;
for (srcPos = 0; srcPos < len1; srcPos++)
{
tgt[tgtPos--] = src[srcPos];
}
}
else if (cs == CharInfo::UCS2)
{
Lng32 bpc = unicode_char_set::bytesPerChar();
srcPos = 0;
tgtPos = len1 - bpc;
while (srcPos < len1)
{
str_cpy_all(&tgt[tgtPos], &src[srcPos], bpc);
tgtPos -= bpc;
srcPos += bpc;
}
}
else if (cs == CharInfo::UTF8)
{
UInt32 UCS4value;
cnv_charset charset = convertCharsetEnum(cs);
Lng32 charLen;
srcPos = 0;
tgtPos = len1;
while(srcPos < len1)
{
charLen = LocaleCharToUCS4(&op_data[1][srcPos],
len1 - srcPos,
&UCS4value,
charset);
if (charLen < 0)
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("REVERSE FUNCTION");
return ex_expr::EXPR_ERROR;
}
tgtPos -= charLen;
str_cpy_all(&tgt[tgtPos], &src[srcPos], charLen);
srcPos += charLen;
}
}
else
{
const char *csname = CharInfo::getCharSetName(cs);
ExRaiseSqlError(heap, diagsArea, EXE_INVALID_CHARACTER);
*(*diagsArea) << DgString0(csname) << DgString1("REVERSE FUNCTION");
return ex_expr::EXPR_ERROR;
}
if (getOperand(0)->getVCIndicatorLength() > 0)
getOperand(0)->setVarLength(len1, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_current::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
if (getOperand())
{
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(0);
rec_datetime_field srcStartField;
rec_datetime_field srcEndField;
if (datetimeOpType->getDatetimeFields(datetimeOpType->getPrecision(),
srcStartField,
srcEndField) != 0)
{
return ex_expr::EXPR_ERROR;
}
ExpDatetime::currentTimeStamp(op_data[0],
srcStartField,
srcEndField,
datetimeOpType->getScale());
}
else
{
ExpDatetime::currentTimeStamp(op_data[0],
REC_DATE_YEAR,
REC_DATE_SECOND,
ExpDatetime::MAX_DATETIME_FRACT_PREC);
}
return ex_expr::EXPR_OK;
};
// MV,
ex_expr::exp_return_type ExFunctionGenericUpdateOutput::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// We do not set the value here.
// The return value is written into the space allocated for it by the
// executor work method.
// The value is initialized to zero here in case VSBB is rejected by the
// optimizer, so the executor will not override this value.
if (origFunctionOperType() == ITM_VSBBROWCOUNT)
*(Lng32 *)op_data[0] = 1; // Simple Insert RowCount - 1 row.
else
*(Lng32 *)op_data[0] = 0; // Simple Insert RowType is 0.
return ex_expr::EXPR_OK;
}
// Triggers -
ex_expr::exp_return_type ExFunctionInternalTimestamp::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
ex_function_current currentFun;
return (currentFun.eval(op_data, heap, diagsArea));
}
ex_expr::exp_return_type ex_function_bool::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
ex_expr::exp_return_type retcode = ex_expr::EXPR_OK;
switch (getOperType())
{
case ITM_RETURN_TRUE:
{
*(Lng32 *)op_data[0] = 1;
}
break;
case ITM_RETURN_FALSE:
{
*(Lng32 *)op_data[0] = 0;
}
break;
case ITM_RETURN_NULL:
{
*(Lng32 *)op_data[0] = -1;
}
break;
default:
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
retcode = ex_expr::EXPR_ERROR;
}
break;
}
return retcode;
}
ex_expr::exp_return_type ex_function_converttimestamp::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int64 juliantimestamp;
str_cpy_all((char *) &juliantimestamp, op_data[1], sizeof(juliantimestamp));
const Int64 minJuliantimestamp = (Int64) 1487311632 * (Int64) 100000000;
const Int64 maxJuliantimestamp = (Int64) 2749273487LL * (Int64) 100000000 +
(Int64) 99999999;
if ((juliantimestamp < minJuliantimestamp) ||
(juliantimestamp > maxJuliantimestamp)) {
char tmpbuf[24];
memset(tmpbuf, 0, sizeof(tmpbuf) );
sprintf(tmpbuf, "%ld", juliantimestamp);
ExRaiseSqlError(heap, diagsArea, EXE_CONVERTTIMESTAMP_ERROR);
if(*diagsArea)
**diagsArea << DgString0(tmpbuf);
if(derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
short timestamp[8];
INTERPRETTIMESTAMP(juliantimestamp, timestamp);
short year = timestamp[0];
char month = (char) timestamp[1];
char day = (char) timestamp[2];
char hour = (char) timestamp[3];
char minute = (char) timestamp[4];
char second = (char) timestamp[5];
Lng32 fraction = timestamp[6] * 1000 + timestamp[7];
char *datetimeOpData = op_data[0];
str_cpy_all(datetimeOpData, (char *) &year, sizeof(year));
datetimeOpData += sizeof(year);
*datetimeOpData++ = month;
*datetimeOpData++ = day;
*datetimeOpData++ = hour;
*datetimeOpData++ = minute;
*datetimeOpData++ = second;
str_cpy_all(datetimeOpData, (char *) &fraction, sizeof(fraction));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_dateformat::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
char *opData = op_data[1];
char *formatStr = op_data[2];
char *result = op_data[0];
if ((getDateFormat() == ExpDatetime::DATETIME_FORMAT_NUM1) ||
(getDateFormat() == ExpDatetime::DATETIME_FORMAT_NUM2))
{
// numeric to TIME conversion.
if(ExpDatetime::convNumericTimeToASCII(opData,
result,
getOperand(0)->getLength(),
getDateFormat(),
formatStr,
heap,
diagsArea) < 0) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
}
else
{
// Convert the given datetime value to an ASCII string value in the
// given format.
//
if ((DFS2REC::isAnyCharacter(getOperand(1)->getDatatype())) &&
(DFS2REC::isDateTime(getOperand(0)->getDatatype())))
{
Lng32 sourceLen = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(0);
if(datetimeOpType->convAsciiToDate(opData,
sourceLen,
result,
getOperand(0)->getLength(),
getDateFormat(),
heap,
diagsArea,
0) < 0) {
if (diagsArea &&
(!(*diagsArea) ||
((*diagsArea) &&
(*diagsArea)->getNumber(DgSqlCode::ERROR_) == 0)))
{
// we expect convAsciiToDate to raise a diagnostic; if it
// didn't, raise an internal error here
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
}
return ex_expr::EXPR_ERROR;
}
}
else
{
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(1);
if(datetimeOpType->convDatetimeToASCII(opData,
result,
getOperand(0)->getLength(),
getDateFormat(),
formatStr,
heap,
diagsArea) < 0) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
}
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_dayofweek::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Int64 interval;
short year;
char month;
char day;
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(1);
char *datetimeOpData = op_data[1];
str_cpy_all((char *) &year, datetimeOpData, sizeof(year));
datetimeOpData += sizeof(year);
month = *datetimeOpData++;
day = *datetimeOpData;
interval = datetimeOpType->getTotalDays(year, month, day);
unsigned short result = (unsigned short)((interval + 1) % 7) + 1;
str_cpy_all(op_data[0], (char *) &result, sizeof(result));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_extract::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int64 result = 0;
if (getOperand(1)->getDatatype() == REC_DATETIME) {
ExpDatetime *datetimeOpType = (ExpDatetime *) getOperand(1);
char *datetimeOpData = op_data[1];
rec_datetime_field opStartField;
rec_datetime_field opEndField;
rec_datetime_field extractStartField = getExtractField();
rec_datetime_field extractEndField = extractStartField;
if (extractStartField > REC_DATE_MAX_SINGLE_FIELD) {
extractStartField = REC_DATE_YEAR;
if (extractEndField == REC_DATE_YEARQUARTER_EXTRACT ||
extractEndField == REC_DATE_YEARMONTH_EXTRACT ||
extractEndField == REC_DATE_YEARQUARTER_D_EXTRACT ||
extractEndField == REC_DATE_YEARMONTH_D_EXTRACT)
extractEndField = REC_DATE_MONTH;
else {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
}
if (datetimeOpType->getDatetimeFields(datetimeOpType->getPrecision(),
opStartField,
opEndField) != 0) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
for (Int32 field = opStartField; field <= extractEndField; field++) {
switch (field) {
case REC_DATE_YEAR: {
short value;
if (field >= extractStartField && field <= extractEndField) {
str_cpy_all((char *) &value, datetimeOpData, sizeof(value));
result = value;
}
datetimeOpData += sizeof(value);
break;
}
case REC_DATE_MONTH:
case REC_DATE_DAY:
case REC_DATE_HOUR:
case REC_DATE_MINUTE:
if (field >= extractStartField && field <= extractEndField) {
switch (getExtractField())
{
case REC_DATE_YEARQUARTER_EXTRACT:
// 10*year + quarter - human readable quarter format
result = 10*result + ((*datetimeOpData)+2) / 3;
break;
case REC_DATE_YEARQUARTER_D_EXTRACT:
// 4*year + 0-based quarter - dense quarter format, better for MDAM
result = 4*result + (*datetimeOpData-1) / 3;
break;
case REC_DATE_YEARMONTH_EXTRACT:
// 100*year + month - human readable yearmonth format
result = 100*result + *datetimeOpData;
break;
case REC_DATE_YEARMONTH_D_EXTRACT:
// 12*year + 0-based month - dense month format, better for MDAM
result = 12*result + *datetimeOpData-1;
break;
default:
// regular extract of month, day, hour, minute
result = *datetimeOpData;
break;
}
}
datetimeOpData++;
break;
case REC_DATE_SECOND:
if (field == getExtractField()) {
result = *datetimeOpData;
datetimeOpData++;
short fractionPrecision = datetimeOpType->getScale();
if (fractionPrecision > 0) {
do {
result *= 10;
} while (--fractionPrecision > 0);
Lng32 fraction;
str_cpy_all((char *) &fraction, datetimeOpData, sizeof(fraction));
result += fraction;
}
}
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
}
} else {
Int64 interval;
switch (getOperand(1)->getLength()) {
case SQL_SMALL_SIZE: {
short value;
str_cpy_all((char *) &value, op_data[1], sizeof(value));
interval = value;
break;
}
case SQL_INT_SIZE: {
Lng32 value;
str_cpy_all((char *) &value, op_data[1], sizeof(value));
interval = value;
break;
}
case SQL_LARGE_SIZE:
str_cpy_all((char *) &interval, op_data[1], sizeof(interval));
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
rec_datetime_field startField;
if (ExpInterval::getIntervalStartField(getOperand(1)->getDatatype(), startField) != 0)
return ex_expr::EXPR_ERROR;
if (getExtractField() == startField)
result = interval;
else {
switch (getExtractField()) {
case REC_DATE_MONTH:
//
// The sign of the result of a modulus operation involving a negative
// operand is implementation-dependent according to the C++ reference
// manual. In this case, we prefer the result to be negative.
//
result = interval % 12;
if ((interval < 0) && (result > 0))
result = - result;
break;
case REC_DATE_HOUR:
//
// The sign of the result of a modulus operation involving a negative
// operand is implementation-dependent according to the C++ reference
// manual. In this case, we prefer the result to be negative.
//
result = interval % 24;
if ((interval < 0) && (result > 0))
result = - result;
break;
case REC_DATE_MINUTE:
//
// The sign of the result of a modulus operation involving a negative
// operand is implementation-dependent according to the C++ reference
// manual. In this case, we prefer the result to be negative.
//
result = interval % 60;
if ((interval < 0) && (result > 0))
result = - result;
break;
case REC_DATE_SECOND: {
Lng32 divisor = 60;
for (short fp = getOperand(1)->getScale(); fp > 0; fp--)
divisor *= 10;
result = interval;
interval = result / (Int64) divisor;
result -= interval * (Int64) divisor;
break;
}
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
}
}
copyInteger (op_data[0], getOperand(0)->getLength(), &result, sizeof(result));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_juliantimestamp::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
ex_expr::exp_return_type retcode = ex_expr::EXPR_OK;
Int64 juliantimestamp;
char *datetimeOpData = op_data[1];
short year;
char month;
char day;
char hour;
char minute;
char second;
Lng32 fraction;
str_cpy_all((char *) &year, datetimeOpData, sizeof(year));
datetimeOpData += sizeof(year);
month = *datetimeOpData++;
day = *datetimeOpData++;
hour = *datetimeOpData++;
minute = *datetimeOpData++;
second = *datetimeOpData++;
str_cpy_all((char *) &fraction, datetimeOpData, sizeof(fraction));
short timestamp[] = {
year, month, day, hour, minute, second, (short)(fraction / 1000), (short)(fraction % 1000)
};
short error;
juliantimestamp = COMPUTETIMESTAMP(timestamp, &error);
if (error) {
char tmpbuf[24];
memset(tmpbuf, 0, sizeof(tmpbuf) );
sprintf(tmpbuf, "%ld", juliantimestamp);
ExRaiseSqlError(heap, diagsArea, EXE_JULIANTIMESTAMP_ERROR);
if(*diagsArea)
**diagsArea << DgString0(tmpbuf);
if(derivedFunction())
{
**diagsArea << DgSqlCode(-EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0(exClauseGetText(origFunctionOperType()));
}
return ex_expr::EXPR_ERROR;
}
str_cpy_all(op_data[0], (char *) &juliantimestamp, sizeof(juliantimestamp));
return retcode;
}
ex_expr::exp_return_type ex_function_exec_count::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
execCount_++;
str_cpy_all(op_data[0], (char *) &execCount_, sizeof(execCount_));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_curr_transid::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
// this function is not used yet anywhere, whoever wants to start using
// it should fill in the missing code here
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
// -----------------------------------------------------------------------
// Helper function for CURRENT_USER and SESSION_USER function.
// Used by exp and UDR code to get the CURRENT_USER and SESSION_USER
// information. SESSION_USER is the user that is logged on to the
// current SQL session. CURRENT_USER is the one with whose privileges
// a SQL statement is executed, With Definer Rights SPJ, the CURRENT_USER is
// the owner of the SPJ while SESSION_USER is the user who invoked the SPJ.
//
// Returns the current login user name as a C-style string (null terminated)
// in inputUserNameBuffer parameter.
// (e.g. returns "Domain1\Administrator" on NT if logged
// in as Domain1\Administrator,
// "role-mgr" on NSK if logged in as alias "role-mgr",
// "ROLE.MGR" on NSK if logged in as Guardian userid ROLE.MGR)
// Returns FEOK as the return value on success, otherwise returns an error status.
// Returns FEBUFTOOSMALL if the input buffer supplied is not big enough to
// accommodate the actual user name.
// Optionally returns the actual length of the user name (in bytes) in
// actualLength parameter. Returns 0 as the actual length if the function returns
// an error code, except for FEBUFTOOSMALL return code in which case it
// returns the actual length so that the caller can get an idea of the minimum
// size of the input buffer to be provided.
// -----------------------------------------------------------------------
short exp_function_get_user(
OperatorTypeEnum userType, // IN - CURRENT_USER or SESSION_USER
char *inputUserNameBuffer, // IN - buffer for returning the user name
Lng32 inputBufferLength, // IN - length(max) of the above buffer in bytes
Lng32 *actualLength) // OUT optional - actual length of the user name
{
if (actualLength)
*actualLength = 0;
short result = FEOK;
Int32 lActualLen = 0;
Int32 userID;
if (userType == ITM_SESSION_USER)
userID = ComUser::getSessionUser();
else
// Default to CURRENT_USER
userID = ComUser::getCurrentUser();
assert (userID != NA_UserIdDefault);
char userName[MAX_USERNAME_LEN+1];
Int16 status = ComUser::getUserNameFromUserID( (Int32) userID
, (char *)&userName
, (Int32) inputBufferLength
, lActualLen );
if (status == FEOK)
{
str_cpy_all(inputUserNameBuffer, userName, lActualLen);
inputUserNameBuffer[lActualLen] = '\0';
}
else
result = FEBUFTOOSMALL;
if (((result == FEOK) || (result == FEBUFTOOSMALL)) && actualLength)
*actualLength = lActualLen;
return result;
}
ex_expr::exp_return_type ex_function_ansi_user::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
const Lng32 MAX_USER_NAME_LEN = ComSqlId::MAX_LDAP_USER_NAME_LEN;
char username[MAX_USER_NAME_LEN + 1];
Lng32 username_len = 0;
short retcode = FEOK;
retcode = exp_function_get_user ( getOperType(),
username,
MAX_USER_NAME_LEN + 1,
&username_len
);
if (((retcode != FEOK) && (retcode != FENOTFOUND)) ||
((retcode == FEOK) && (username_len == 0)) ) {
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
str_cpy_all(op_data[0], username, username_len);
getOperand(0)->setVarLength(username_len, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_user::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int32 userIDLen = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Int64 id64;
switch (userIDLen)
{
case SQL_SMALL_SIZE:
id64 = *((short *) op_data[1]);
break;
case SQL_INT_SIZE:
id64 = *((Lng32 *) op_data[1]);
break;
case SQL_LARGE_SIZE:
id64 = *((Int64 *) op_data[1]);
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (id64 < -SQL_INT32_MAX || id64 > SQL_INT32_MAX)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
Int32 authID = (Int32)(id64);
// *****************************************************************************
// * *
// * Code to handle functions AUTHNAME and AUTHTYPE. Piggybacked on USER *
// * function code in parser, binder, and optimizer. Perhaps there is a *
// * better way to implement. *
// * *
// * AUTHNAME invokes the porting layer, which calls CLI, as it may be *
// * necessary to read metadata (and therefore have a transaction within *
// * a transaction). *
// * *
// * AUTHTYPE calls Catman directly, as Catman knows the values and ranges *
// * for various types of authentication ID values. Examples include *
// * PUBLIC, SYSTEM, users, and roles. AUTHTYPE returns a single character *
// * that can be used within a case, if, or where clause. *
// * *
// *****************************************************************************
switch (getOperType())
{
case ITM_AUTHNAME:
{
Int16 result;
Int32 authNameLen = 0;
char authName[MAX_AUTHNAME_LEN + 1];
result = ComUser::getAuthNameFromAuthID(authID,
authName,
sizeof(authName),
authNameLen);
if (result != FEOK)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
if (authNameLen > getOperand(0)->getLength())
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
getOperand(0)->setVarLength(authNameLen, op_data[-MAX_OPERANDS]);
str_cpy_all(op_data[0], authName, authNameLen);
return ex_expr::EXPR_OK;
}
case ITM_AUTHTYPE:
{
char authType[2];
authType[1] = 0;
authType[0] = ComUser::getAuthType(authID);
getOperand(0)->setVarLength(1, op_data[-MAX_OPERANDS]);
str_cpy_all(op_data[0], authType, 1);
return ex_expr::EXPR_OK;
}
case ITM_USER:
case ITM_USERID:
default:
{
// Drop down to user code
}
}
Int32 userNameLen = 0;
char userName[MAX_USERNAME_LEN+1];
Int16 result = ComUser::getUserNameFromUserID(authID,
(char *)&userName,
MAX_USERNAME_LEN+1,
userNameLen);
if ((result != FEOK) && (result != FENOTFOUND))
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
else if (result == FENOTFOUND || userNameLen == 0)
{
// set the user name same as user id
// avoids exceptions if userID not present in USERS table
if (authID < 0)
{
userName[0] = '-';
str_itoa((ULng32)(-authID), &userName[1]);
}
else
{
str_itoa((ULng32)(authID), userName);
}
userNameLen = str_len(userName);
}
if (userNameLen > getOperand(0)->getLength())
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_USER_FUNCTION_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
getOperand(0)->setVarLength(userNameLen, op_data[-MAX_OPERANDS]);
str_cpy_all(op_data[0], userName, userNameLen);
return ex_expr::EXPR_OK;
};
////////////////////////////////////////////////////////////////////
//
// encodeKeyValue
//
// This routine encodes key values so that they can be sorted simply
// using binary collation. The routine is called by the executor.
//
// Note: The target MAY point to the source to change the original
// value.
//
////////////////////////////////////////////////////////////////////
void ex_function_encode::encodeKeyValue(Attributes * attr,
const char *source,
const char *varlenPtr,
char *target,
NABoolean isCaseInsensitive,
Attributes * tgtAttr,
char *tgt_varlen_ptr,
const Int32 tgtLength ,
CharInfo::Collation collation,
CollationInfo::CollationType collType)
{
Lng32 fsDatatype = attr->getDatatype();
Lng32 length = attr->getLength();
Lng32 precision = attr->getPrecision();
switch (fsDatatype) {
#if defined( NA_LITTLE_ENDIAN )
case REC_BIN8_SIGNED:
//
// Flip the sign bit.
//
*(UInt8*)target = *(UInt8*)source;
target[0] ^= 0200;
break;
case REC_BIN8_UNSIGNED:
case REC_BOOLEAN:
*(UInt8*)target = *(UInt8*)source;
break;
case REC_BIN16_SIGNED:
//
// Flip the sign bit.
//
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
target[0] ^= 0200;
break;
case REC_BPINT_UNSIGNED:
case REC_BIN16_UNSIGNED:
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
break;
case REC_BIN32_SIGNED:
//
// Flip the sign bit.
//
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
target[0] ^= 0200;
break;
case REC_BIN32_UNSIGNED:
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
break;
case REC_BIN64_SIGNED:
//
// Flip the sign bit.
//
*((_int64 *) target) = reversebytes( *((_int64 *) source) );
target[0] ^= 0200;
break;
case REC_BIN64_UNSIGNED:
*((UInt64 *) target) = reversebytes( *((UInt64 *) source) );
break;
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND:
switch(length)
{
case 2: // Signed 16 bit
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
break;
case 4: // Signed 32 bit
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
break;
case 8: // Signed 64 bit
*((_int64 *) target) = reversebytes( *((_int64 *) source) );
break;
default:
assert(FALSE);
break;
}; // switch(length)
target[0] ^= 0200;
break;
case REC_DATETIME: {
// This method has been modified as part of the MP Datetime
// Compatibility project. It has been made more generic so that
// it depends only on the start and end fields of the datetime type.
//
rec_datetime_field startField;
rec_datetime_field endField;
ExpDatetime *dtAttr = (ExpDatetime *)attr;
// Get the start and end fields for this Datetime type.
//
dtAttr->getDatetimeFields(dtAttr->getPrecision(),
startField,
endField);
// Copy all of the source to the destination, then reverse only
// those fields of the target that are longer than 1 byte
//
if (target != source)
str_cpy_all(target, source, length);
// Reverse the YEAR and Fractional precision fields if present.
//
char *ptr = target;
for(Int32 field = startField; field <= endField; field++) {
switch (field) {
case REC_DATE_YEAR:
// convert YYYY from little endian to big endian
//
*((unsigned short *) ptr) = reversebytes( *((unsigned short *) ptr) );
ptr += sizeof(short);
break;
case REC_DATE_MONTH:
case REC_DATE_DAY:
case REC_DATE_HOUR:
case REC_DATE_MINUTE:
// One byte fields are copied as is...
ptr++;
break;
case REC_DATE_SECOND:
ptr++;
// if there is a fraction, make it big endian
// (it is an unsigned long, beginning after the SECOND field)
//
if (dtAttr->getScale() > 0)
*((ULng32 *) ptr) = reversebytes( *((ULng32 *) ptr) );
break;
}
}
break;
}
#else
case REC_BIN8_SIGNED:
case REC_BIN16_SIGNED:
case REC_BIN32_SIGNED:
case REC_BIN64_SIGNED:
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND:
//
// Flip the sign bit.
//
if (target != source)
str_cpy_all(target, source, length);
target[0] ^= 0200;
break;
#endif
case REC_DECIMAL_LSE:
//
// If the number is negative, complement all the bytes. Otherwise, set
// the sign bit.
//
if (source[0] & 0200) {
for (Lng32 i = 0; i < length; i++)
target[i] = ~source[i];
} else {
if (target != source)
str_cpy_all(target, source, length);
target[0] |= 0200;
}
break;
case REC_NUM_BIG_UNSIGNED: {
BigNum type(length, precision, 0, 1);
type.encode(source, target);
break;
}
case REC_NUM_BIG_SIGNED: {
BigNum type(length, precision, 0, 0);
type.encode(source, target);
break;
}
case REC_IEEE_FLOAT32: {
//
// unencoded float (IEEE 754 - 1985 standard):
//
// +-+----------+---------------------+
// | | exponent | mantissa |
// | | (8 bits) | (23 bits) |
// +-+----------+---------------------+
// |
// +- Sign bit
//
// Encoded float (IEEE 754 - 1985 standard):
//
// +-+--------+-----------------------+
// | |Exponent| Mantissa |
// | |(8 bits)| (23 bits) |
// +-+--------+-----------------------+
// || |
// |+- Complemented if sign was neg.-+
// |
// +- Sign bit complement
//
// the following code is independent of the "endianess" of the
// architecture. Instead, it assumes IEEE 754 - 1985 standard
// for representation of floats
// source may not be aligned, move it to a temp var.
float floatsource;
str_cpy_all((char*)&floatsource, source, length);
ULng32 *dblword = (ULng32 *) &floatsource;
if (floatsource < 0) // the sign is negative,
*dblword = ~*dblword; // flip all the bits
else
floatsource = -floatsource; // complement the sign bit
// here comes the dependent part
#ifdef NA_LITTLE_ENDIAN
*(ULng32 *) target = reversebytes(*dblword);
#else
// *(unsigned long *) target = *dblword;
str_cpy_all(target, (char*)&floatsource, length);
#endif
break;
}
case REC_IEEE_FLOAT64: {
//
// unencoded double (IEEE 754 - 1985 standard):
//
// +-+--------- -+--------------------+
// | | exponent | mantissa |
// | | (11 bits) | (52 bits) |
// +-+--------- -+--------------------+
// |
// +- Sign bit
//
// Encoded double (IEEE 754 - 1985 standard):
//
// +-+-----------+--------------------+
// | | Exponent | Mantissa |
// | | (11 bits) | (52 bits) |
// +-+-----------+--------------------+
// || |
// |+- Complemented if sign was neg.-+
// |
// +- Sign bit complement
//
// the following code is independent of the "endianess" of the
// archtiecture. Instead, it assumes IEEE 754 - 1985 standard
// for representation of floats
//double doublesource = *(double *) source;
// source may not be aligned, move it to a temp var.
double doublesource;
str_cpy_all((char*)&doublesource, source, length);
Int64 *quadword = (Int64 *) &doublesource;
if (doublesource < 0) // the sign is negative,
*quadword = ~*quadword; // flip all the bits
else
doublesource = -doublesource; // complement the sign bit
// here comes the dependent part
#ifdef NA_LITTLE_ENDIAN
*(Int64 *) target = reversebytes(*quadword);
#else
// *(Int64 *) target = *quadword;
str_cpy_all(target, (char*)&doublesource, length);
#endif
break;
}
case REC_BYTE_F_ASCII: {
if (CollationInfo::isSystemCollation(collation ))
{
Int16 nPasses = CollationInfo::getCollationNPasses(collation);
if (collType == CollationInfo::Sort ||
collType == CollationInfo::Compare)
{
encodeCollationKey(
(const UInt8 *)source,
length,
(UInt8 *)target,
tgtLength,
nPasses,
collation,
TRUE);
}
else //search
{
Int32 effEncodedKeyLength = 0;
encodeCollationSearchKey(
(const UInt8 *)source,
length,
(UInt8 *)target,
tgtLength,
effEncodedKeyLength,
nPasses,
collation,
TRUE);
assert(tgtAttr && tgt_varlen_ptr);
tgtAttr->setVarLength(effEncodedKeyLength, tgt_varlen_ptr);
}
}
else
{
//------------------------------------------
if (target != source)
str_cpy_all(target, source, length);
if (isCaseInsensitive)
{
// upcase target
for (Int32 i = 0; i < length; i++)
{
target[i] = TOUPPER(source[i]);
}
}
//--------------------------
}
}
break;
case REC_BYTE_V_ASCII:
case REC_BYTE_V_ASCII_LONG:
{
Int32 vc_len = attr->getLength(varlenPtr);
if (CollationInfo::isSystemCollation(collation))
{
Int16 nPasses = CollationInfo::getCollationNPasses(collation);
NABoolean rmTspaces = getRmTSpaces(collation);
if (collType == CollationInfo::Sort ||
collType == CollationInfo::Compare)
{
encodeCollationKey(
(UInt8 *)source,
(Int16)vc_len,
(UInt8 *)target,
tgtLength,
nPasses,
collation,
rmTspaces);
}
else
{
Int32 effEncodedKeyLength = 0;
encodeCollationSearchKey(
(UInt8 *)source,
(Int16)vc_len,
(UInt8 *)target,
tgtLength,
effEncodedKeyLength,
nPasses,
collation,
rmTspaces);
assert(tgtAttr && tgt_varlen_ptr);
tgtAttr->setVarLength(effEncodedKeyLength, tgt_varlen_ptr);
}
}
else
{
//
// Copy the source to the target.
//
if (!isCaseInsensitive)
str_cpy_all(target, source, vc_len);
else
{
// upcase target
for (Int32 i = 0; i < vc_len; i++)
{
target[i] = TOUPPER(source[i]);
}
}
//
// Blankpad the target (if needed).
//
if (vc_len < length)
str_pad(&target[vc_len],
(Int32) (length - vc_len), ' ');
}
}
break;
// added for Unicode data type.
case REC_NCHAR_V_UNICODE:
{
Int32 vc_len = attr->getLength(varlenPtr);
//
// Copy the source to the target.
//
str_cpy_all(target, source, vc_len);
//
// Blankpad the target (if needed).
//
if (vc_len < length)
wc_str_pad((NAWchar*)&target[vc_len],
(Int32) (length - vc_len)/sizeof(NAWchar), unicode_char_set::space_char());
#if defined( NA_LITTLE_ENDIAN )
wc_swap_bytes((NAWchar*)target, length/sizeof(NAWchar));
#endif
break;
}
// added for Unicode data type.
case REC_NCHAR_F_UNICODE:
{
if (target != source)
str_cpy_all(target, source, length);
#if defined( NA_LITTLE_ENDIAN )
wc_swap_bytes((NAWchar*)target, length/sizeof(NAWchar));
#endif
break;
}
case REC_BYTE_V_ANSI:
{
short vc_len;
vc_len = strlen(source);
//
// Copy the source to the target.
//
str_cpy_all(target, source, vc_len);
//
// Blankpad the target (if needed).
//
if (vc_len < length)
str_pad(&target[vc_len], (Int32) (length - vc_len), ' ');
}
break;
default:
//
// Encoding is not needed. Just copy the source to the target.
//
if (target != source)
str_cpy_all(target, source, length);
break;
}
}
////////////////////////////////////////////////////////////////////
// class ex_function_encode
////////////////////////////////////////////////////////////////////
ex_function_encode::ex_function_encode(){};
ex_function_encode::ex_function_encode(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
short descFlag)
: ex_function_clause(oper_type, 2, attr, space),
flags_(0),
collation_((Int16) CharInfo::DefaultCollation)
{
if (descFlag)
setIsDesc(TRUE);
else
setIsDesc(FALSE);
setCollEncodingType(CollationInfo::Sort);
};
ex_function_encode::ex_function_encode(OperatorTypeEnum oper_type,
Attributes ** attr,
Space * space,
CharInfo::Collation collation,
short descFlag,
CollationInfo::CollationType collType)
: ex_function_clause(oper_type, 2, attr, space),
flags_(0),
collation_((Int16)collation)
{
if (descFlag)
setIsDesc(TRUE);
else
setIsDesc(FALSE);
setCollEncodingType(collType);
};
ex_expr::exp_return_type ex_function_encode::processNulls(
char * op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
if ((CollationInfo::isSystemCollation((CharInfo::Collation) collation_)) &&
getCollEncodingType() != CollationInfo::Sort)
{
return ex_clause::processNulls(op_data,heap,diagsArea);
}
else if (regularNullability())
{
return ex_clause::processNulls(op_data,heap,diagsArea);
}
// if value is missing,
// then move max or min value to result.
if (getOperand(1)->getNullFlag() &&
(!op_data[1])) // missing value (is a null value)
{
if (NOT isDesc())
{
// NULLs sort high for ascending comparison.
// Pad result with highest value.
// For SQL/MP tables, DP2 expects missing value columns to be
// 0 padded after the null-indicator.
str_pad(op_data[2 * MAX_OPERANDS],
(Int32)getOperand(0)->getStorageLength(), '\0');
str_pad(op_data[2 * MAX_OPERANDS],
ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH,
'\377');
}
else
{
// NULLs sort low for descending comparison.
// Pad result with lowest value.
str_pad(op_data[2 * MAX_OPERANDS],
(Int32)getOperand(0)->getStorageLength(),
'\377');
str_pad(op_data[2 * MAX_OPERANDS],
ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH,
'\0');
}
return ex_expr::EXPR_NULL;
}
return ex_expr::EXPR_OK;
};
ex_expr::exp_return_type ex_function_encode::evalDecode(char *op_data[],
CollHeap* heap)
{
char * result = op_data[0];
Attributes *srcOp = getOperand(1);
decodeKeyValue(srcOp,
isDesc(),
op_data[1],
op_data[-MAX_OPERANDS+1],
result,
op_data[-MAX_OPERANDS],
FALSE);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_encode::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea**)
{
if (isDecode())
{
return evalDecode(op_data, heap);
}
Int16 prependedLength = 0;
char * result = op_data[0];
Attributes *tgtOp = getOperand(0);
Attributes *srcOp = getOperand(1);
if ((srcOp->getNullFlag()) && // nullable
(NOT regularNullability()))
{
// If target is aligned format then can't use the 2 byte null here ...
assert( !tgtOp->isSQLMXAlignedFormat() );
// if sort is set for char types with collations (including default)
if (getCollEncodingType() == CollationInfo::Sort)
{
// value cannot be null in this proc. That is handled in process_nulls.
str_pad(result, ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH, '\0');
result += ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH;
prependedLength = ExpTupleDesc::KEY_NULL_INDICATOR_LENGTH;
}
}
if (srcOp->isComplexType())
((ComplexType *)srcOp)->encode(op_data[1], result, isDesc());
else
{
Int32 tgtLength = tgtOp->getLength() - prependedLength ;
encodeKeyValue(srcOp,
op_data[1],
op_data[-MAX_OPERANDS+1],
result,
caseInsensitive(),
tgtOp,
op_data[-MAX_OPERANDS],
tgtLength,
(CharInfo::Collation) collation_,
getCollEncodingType());
}
if (isDesc())
{
// compliment all bytes
for (Lng32 k = 0; k < tgtOp->getLength(); k++)
op_data[0][k] = (char)(~(op_data[0][k]));
}
return ex_expr::EXPR_OK;
}
void ex_function_encode::getCollationWeight(
CharInfo::Collation collation,
Int16 pass,
UInt16 chr,
UInt8 * weightStr,
Int16 & weightStrLen)
{
UChar wght = getCollationWeight(collation, pass, chr);
switch (collation)
{
case CharInfo::CZECH_COLLATION:
case CharInfo::CZECH_COLLATION_CI:
{
if ((CollationInfo::Pass)pass != CollationInfo::SecondPass)
{
if (wght > 0 )
{
weightStr[0] = wght;
weightStrLen = 1;
}
else
{
weightStrLen = 0;
}
}
else
{
if (getCollationWeight(collation, CollationInfo::FirstPass, chr) > 0 )
{
weightStr[0] = wght;
weightStrLen = 1;
}
else
{
weightStr[0] = 0;
weightStr[1] = wght;
weightStrLen = 2;
}
}
}
break;
default:
{
if (wght > 0 )
{
weightStr[0] = wght;
weightStrLen = 1;
}
else
{
weightStrLen = 0;
}
}
}
}
unsigned char ex_function_encode::getCollationWeight(
CharInfo::Collation collation,
Int16 pass,
UInt16 chr)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].weightTable[pass][chr];
}
Int16 ex_function_encode::getNumberOfDigraphs( const CharInfo::Collation collation)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].numberOfDigraphs ;
}
UInt8 * ex_function_encode::getDigraph(const CharInfo::Collation collation, const Int32 digraphNum)
{
return (UInt8 *) collParams[CollationInfo::getCollationParamsIndex(collation)].digraphs[digraphNum] ;
}
Int16 ex_function_encode::getDigraphIndex(const CharInfo::Collation collation, const Int32 digraphNum)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].digraphIdx[digraphNum];
}
NABoolean ex_function_encode::getRmTSpaces(const CharInfo::Collation collation)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].rmTSpaces;
}
NABoolean ex_function_encode::getNumberOfChars(const CharInfo::Collation collation)
{
return collParams[CollationInfo::getCollationParamsIndex(collation)].numberOfChars;
}
NABoolean ex_function_encode::isOneToOneCollation(const CharInfo::Collation collation)
{
for (UInt16 i =0 ; i < getNumberOfChars(collation); i++)
{
for (UInt16 j =i +1 ; j < getNumberOfChars(collation); j++)
{
NABoolean isOneToOne = FALSE;
for (Int16 pass=0 ; pass < CollationInfo::getCollationNPasses(collation); pass++)
{
if (getCollationWeight(collation,pass,i) != getCollationWeight(collation,pass,j) )
{
isOneToOne = TRUE;
}
}
if (!isOneToOne)
{
return FALSE;
}
}
}
return TRUE;
}
void ex_function_encode::encodeCollationKey(const UInt8 * src,
Int32 srcLength,
UInt8 * encodeKey,
const Int32 encodedKeyLength,
Int16 nPasses,
CharInfo::Collation collation,
NABoolean rmTSpaces )
{
assert (CollationInfo::isSystemCollation(collation));
UInt8 * ptr;
if (src[0] == CollationInfo::getCollationMaxChar(collation))
{
str_pad((char*) encodeKey, srcLength, CollationInfo::getCollationMaxChar(collation));
if (str_cmp((char*)src, (char*)encodeKey, srcLength) == 0)
{
str_pad((char*) encodeKey, encodedKeyLength,'\377' );
return;
}
}
if (src[0] == '\0')
{
str_pad((char*) encodeKey, encodedKeyLength, '\0');
if (str_cmp((char*)src, (char*)encodeKey,srcLength) == 0)
{
return;
}
}
Int16 charNum=0;
NABoolean hasDigraphs = FALSE;
Int32 trailingSpaceLength =0;
UInt8 digraph[2];
digraph[0]=digraph[1]=0;
Int16 weightStrLength=0;
ptr= encodeKey;
/////////////////////////////////////////////
for ( Int32 i = srcLength -1 ; rmTSpaces && i> 0 && src[i]== 0x20; i--)
{
trailingSpaceLength++;
}
for (short i= CollationInfo::FirstPass; i< nPasses; i++)
{
if (i != CollationInfo::FirstPass)
{
*ptr++= 0x0;
}
if ((i == CollationInfo::FirstPass) ||
(i != CollationInfo::FirstPass && hasDigraphs))
{
//loop through the chars in the string, find digraphs an assighn weights
for (Int32 srcIdx = 0; srcIdx < srcLength- trailingSpaceLength; srcIdx++)
{
digraph[0] = digraph[1];
digraph[1] = src[srcIdx];
NABoolean digraphFound = FALSE;
for (Int32 j = 0 ; j < getNumberOfDigraphs(collation); j++)
{
if (digraph[0] == getDigraph(collation, j)[0] &&
digraph[1] == getDigraph(collation, j)[1])
{
digraphFound = hasDigraphs = TRUE;
charNum = getDigraphIndex(collation,j);
ptr--;
break;
}
}
if (!digraphFound)
{
charNum = src[srcIdx];
}
getCollationWeight(collation,i, charNum,ptr,weightStrLength);
ptr = ptr + weightStrLength;
}
}
else
{
for (Int32 srcIdx = 0; srcIdx < srcLength- trailingSpaceLength; srcIdx++)
{
charNum = src[srcIdx];
getCollationWeight(collation, i, charNum,ptr,weightStrLength);
ptr = ptr + weightStrLength;
}
}
}
str_pad( (char *) ptr,(encodeKey - ptr) + encodedKeyLength, '\0');
} // ex_function_encode::encodeCollationKey
void ex_function_encode::encodeCollationSearchKey(const UInt8 * src,
Int32 srcLength,
UInt8 * encodeKey,
const Int32 encodedKeyLength,
Int32 & effEncodedKeyLength,
Int16 nPasses,
CharInfo::Collation collation,
NABoolean rmTSpaces )
{
assert (CollationInfo::isSystemCollation(collation));
UInt8 * ptr;
Int16 charNum=0;
NABoolean hasDigraphs = FALSE;
Int32 trailingSpaceLength =0;
UInt8 digraph[2];
digraph[0]=digraph[1]=0;
ptr= encodeKey;
/////////////////////////////////////////////
for ( Int32 i = srcLength -1 ; rmTSpaces && i> 0 && src[i]== 0x20; i--)
{
trailingSpaceLength++;
}
for (Int32 srcIdx = 0; srcIdx < srcLength- trailingSpaceLength; srcIdx++)
{
digraph[0] = digraph[1];
digraph[1] = src[srcIdx];
NABoolean digraphFound = FALSE;
for (Int32 j = 0 ; j < getNumberOfDigraphs(collation); j++)
{
if (digraph[0] == getDigraph(collation, j)[0] &&
digraph[1] == getDigraph(collation, j)[1])
{
digraphFound = hasDigraphs = TRUE;
charNum = getDigraphIndex(collation,j);
ptr = ptr - nPasses;
break;
}
}
if (!digraphFound)
{
charNum = src[srcIdx];
}
//don't include ignorable characters
short ignorable = 0;
for (short np = 0; np < nPasses ; np++)
{
ptr[np]= getCollationWeight(collation, np, charNum);
if (ptr[np] == '\0')
{
ignorable++;
}
}
if (ignorable != nPasses) //
{
ptr = ptr + nPasses;
}
if (digraphFound &&
ignorable != nPasses)
{
for (short np = CollationInfo::FirstPass; np < nPasses ; np++)
{
ptr[np]= '\0';
}
ptr = ptr + nPasses;
}
}
effEncodedKeyLength = ptr - encodeKey ;
str_pad( (char *) ptr,(encodeKey - ptr) + encodedKeyLength, '\0');
} // ex_function_encode::encodeCollationSearchKey
////////////////////////////////////////////////////////////////////////
// class ex_function_explode_varchar
////////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ex_function_explode_varchar::processNulls(
char * op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
Attributes *tgt = getOperand(0);
if (getOperand(1)->getNullFlag() && (!op_data[1])) // missing value (is a null value)
{
if (tgt->getNullFlag()) // if result is nullable
{
// move null value to result
ExpTupleDesc::setNullValue( op_data[0],
tgt->getNullBitIndex(),
tgt->getTupleFormat() );
if (forInsert_)
{
// move 0 to length bytes
tgt->setVarLength(0, op_data[MAX_OPERANDS]);
} // for Insert
else
{
// move maxLength to result length bytes
tgt->setVarLength(tgt->getLength(), op_data[MAX_OPERANDS]);
}
return ex_expr::EXPR_NULL; // indicate that a null input was processed
}
else
{
// Attempt to put NULL into column with NOT NULL NONDROPPABLE constraint.
ExRaiseFunctionSqlError(heap, diagsArea, EXE_ASSIGNING_NULL_TO_NOT_NULL,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
} // source is a null value
// first operand is not null -- set null indicator in result if needed
if (tgt->getNullFlag())
{
ExpTupleDesc::clearNullValue( op_data[0],
tgt->getNullBitIndex(),
tgt->getTupleFormat() );
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_explode_varchar::eval(char *op_data[],
CollHeap*heap,
ComDiagsArea**diagsArea)
{
if (forInsert_)
{
// move source to target. No blankpadding.
return convDoIt(op_data[1],
getOperand(1)->getLength(op_data[-MAX_OPERANDS + 1]),
getOperand(1)->getDatatype(),
getOperand(1)->getPrecision(),
getOperand(1)->getScale(),
op_data[0],
getOperand(0)->getLength(),
getOperand(0)->getDatatype(),
getOperand(0)->getPrecision(),
getOperand(0)->getScale(),
op_data[-MAX_OPERANDS],
getOperand(0)->getVCIndicatorLength(),
heap,
diagsArea);
}
else
{
// move source to target. Blankpad target to maxLength.
if (convDoIt(op_data[1],
getOperand(1)->getLength(op_data[-MAX_OPERANDS + 1]),
getOperand(0)->getDatatype(),
getOperand(1)->getPrecision(),
getOperand(1)->getScale(),
op_data[0],
getOperand(0)->getLength(),
REC_BYTE_F_ASCII,
getOperand(0)->getPrecision(),
getOperand(0)->getScale(),
NULL,
0,
heap,
diagsArea))
return ex_expr::EXPR_ERROR;
// Move max length to length bytes of target.
getOperand(0)->setVarLength(getOperand(0)->getLength(),
op_data[-MAX_OPERANDS]);
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ex_function_hash
////////////////////////////////////////////////////////////////////
ULng32 ex_function_hash::HashHash(ULng32 inValue) {
// Hashhash -
// input : inValue - double word to be hashed
// output : 30-bit hash values uniformly distributed (mod s) for
// any s < 2**30
// This algorithm creates near-uniform output for arbitrarily distributed
// input by selecting for each fw of the key a quasi-random universal
// hash function from the class of linear functions ax + b (mod p)
// over the field of integers modulo the prime 2**31-1. The output is at
// least comparable in quality to cubics of the form
// ax**3 + bx**2 + cx + d (mod p), and is considerably closer to true
// uniformity than a single linear function chosen once per execution.
// The latter preserve the uniform 2nd central moment of bucket totals,
// and the former the 4th central moment. For probabilistic counting
// applications, the theoretical standard error cannot be achieved with
// less than cubic polynomials, but the present algorithm is approx 3-5x
// in speed. (Cf. histogram doc. for bibliography, but especially:
// Carter and Wegman, "Universal Clases of Hash Functions",
// Journ. Comp. Sys. Sci., 18: April 1979, pp. 145-154
// 22: 1981, pp. 265-279
// Dietzfelbinger, et al., "Polynomial Hash Functions...",
// ICALP '92, pp. 235-246. )
// N.B. - For modular arithmetic the 64-bit product of two 32-bit
// operands must be reduced (mod p). The high-order 32 bits are available
// in hardware but not necessarily through C syntax.
// Two additional optimizations should be noted:
// 1. Instead of processing 3-byte operands, as would be required with
// universal hashing over the field 2**31-1, with alignment delays, we
// process fullwords, and choose distinct 'random' coefficients for
// 2 keys congruent (mod p) using a 32-bit function, and then proceed
// with modular linear hashing over the smaller field.
// 2. For p = 2**c -1 for any c, shifts, and's and or's can be substituted
// for division, as recommended by Carter and Wegman. In addition, the
// output distribution is unaffected (i.e. with probability
// < 1/(2**31-1) if we omit tests for 0 (mod p).
// To reduce a (mod p), create k1 and k2 (<= p) with a = (2**31)k1 + k2,
// and reduce again to (2**31)k3 + k4, where k4 < 2**31 and k3 = 0 or 1.
// Multi-word keys:
// If k = k1||...||kn we compute the quasi-random coefficients c & d using
// ki, but take h(ki) = c*(ki xor h(ki-1)) + d, where h(k0) = 0, and use
// H(k) = h(kn). This precludes the commutative anomaly
// H(k || k') = H(k' || k)
register ULng32 u, v, c, d, k0;
ULng32 a1, a2, b1, b2;
ULng32 c1 = (ULng32)5233452345LL;
ULng32 c2 = (ULng32)8578458478LL;
ULng32 d1 = 1862598173LL;
ULng32 d2 = 3542657857LL;
ULng32 hashValue = 0;
ULng32 k = inValue;
u = (c1 >> 16) * (k >> 16);
v = c1 * k;
c = u ^ v ^ c2;
u = (d1 >> 16) * (k >> 16);
v = d1 * k;
d = u ^ v ^ d2;
c = ((c & 0x80000000) >> 31) + (c & 0x7fffffff);
d = ((d & 0x80000000) >> 31) + (d & 0x7fffffff);
/* compute hash value 1 */
k0 = hashValue ^ k;
/*hmul(c,k0);
u=u0; v=v0;*/
a1 = c >> 16;
a2 = c & 0xffff;
b1 = k0 >> 16;
b2 = k0 & 0xffff;
v = (((a1 * b2) & 0xffff) + ((b1 * a2) & 0xffff));
u = a1 * b1 + (((a1 * b2) >> 16) + ((b1 * a2) >> 16))
+ ((v & 0x10000) >> 16);
v = c * k0;
if (v < (a2 * b2))
u++;
u = u << 1;
u = ((v & 0x80000000) >> 31) | u;
v = v & 0x7fffffff;
v = u + v;
v = ((v & 0x80000000) >> 31) + (v & 0x7fffffff);
/*v = ((v & 0x80000000) >> 31) + (v & 0x7fffffff);
if ( v == 0x7fffffff) v = 0;*/
v = v + d;
v = ((v & 0x80000000) >> 31) + (v & 0x7fffffff);
/*v = ((v & 0x80000000) >> 31) + (v & 0x7fffffff);
if ( v == 0x7fffffff) v = 0;*/
return (v);
};
ex_expr::exp_return_type ex_function_hash::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Attributes *srcOp = getOperand(1);
ULng32 hashValue = 0;
if (srcOp->getNullFlag() && (! op_data[ -(2 * MAX_OPERANDS) + 1 ]))
{
// operand is a null value. All null values hash to
// the same hash value. Choose any arbitrary constant
// number as the hash value.
hashValue = ExHDPHash::nullHashValue; //;666654765;
}
else
{
// get the actual length stored in the data, or fixed length
Lng32 length = srcOp->getLength(op_data[-MAX_OPERANDS + 1]);
// if VARCHAR, skip trailing blanks and adjust length.
if (srcOp->getVCIndicatorLength() > 0) {
switch ( srcOp->getDatatype() ) {
// added to correctly handle VARNCHAR.
case REC_NCHAR_V_UNICODE:
{
// skip trailing blanks
NAWchar* wstr = (NAWchar*)(op_data[1]);
Lng32 wstr_length = length / sizeof(NAWchar);
while ((wstr_length > 0) &&
( wstr[wstr_length-1] == unicode_char_set::space_char())
)
wstr_length--;
length = sizeof(NAWchar)*wstr_length;
}
break;
default:
//case REC_BYTE_V_ASCII:
// skip trailing blanks
while ((length > 0) &&
(op_data[1][length-1] == ' '))
length--;
break;
}
}
UInt32 flags = ExHDPHash::NO_FLAGS;
switch(srcOp->getDatatype()) {
case REC_NCHAR_V_UNICODE:
case REC_NCHAR_V_ANSI_UNICODE:
flags = ExHDPHash::SWAP_TWO;
break;
}
hashValue = ExHDPHash::hash(op_data[1], flags, length);
};
*(ULng32 *)op_data[0] = hashValue;
return ex_expr::EXPR_OK;
};
Lng32 ex_function_hivehash::hashForCharType(char* data, Lng32 length)
{
// To compute: SUM (i from 0 to n-1) (s(i) * 31^(n-1-i)
ULng32 resultCopy = 0;
ULng32 result = (ULng32)data[0];
for (Lng32 i=1; i<length; i++ ) {
// perform result * 31, optimized as (result <<5 - result)
resultCopy = result;
result <<= 5;
result -= resultCopy;
result += (ULng32)(data[i]);
}
return result;
}
ex_expr::exp_return_type ex_function_hivehash::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Attributes *srcOp = getOperand(1);
ULng32 hashValue = 0;
Lng32 length;
if (srcOp->getNullFlag() && (! op_data[ -(2 * MAX_OPERANDS) + 1 ]))
{
// operand is a null value. All null values hash to the same hash value.
hashValue = 0; // hive semantics: hash(NULL) = 0
} else
if ( (DFS2REC::isSQLVarChar(srcOp->getDatatype()) ||
DFS2REC::isANSIVarChar(srcOp->getDatatype())) &&
getOperand(1)->getVCIndicatorLength() > 0 )
{
length = srcOp->getLength(op_data[-MAX_OPERANDS + 1]);
hashValue = ex_function_hivehash::hashForCharType(op_data[1],length);
} else
if ( DFS2REC::isSQLFixedChar(srcOp->getDatatype()) ) {
length = srcOp->getLength();
hashValue = ex_function_hivehash::hashForCharType(op_data[1],length);
} else
if ( DFS2REC::isBinary(srcOp->getDatatype()) ) {
hashValue = *(ULng32*)(op_data[1]);
} // TBD: other SQ types
*(ULng32 *)op_data[0] = hashValue;
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ExHashComb
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ExHashComb::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// always assume that both operands and result are of the same
// (unsigned) type and length
// with built-in long long type we could also support 8 byte integers
ULng32 op1, op2;
switch (getOperand(0)->getStorageLength())
{
case 4:
op1 = *((ULng32 *) op_data[1]);
op2 = *((ULng32 *) op_data[2]);
*((ULng32 *) op_data[0]) = ((op1 << 1) | (op1 >> 31)) ^ op2;
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ExHiveHashComb
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ExHiveHashComb::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// always assume that both operands and result are of the same
// (unsigned) type and length
// with built-in long long type we could also support 8 byte integers
ULng32 op1, op2;
switch (getOperand(0)->getStorageLength())
{
case 4:
op1 = *((ULng32 *) op_data[1]);
op2 = *((ULng32 *) op_data[2]);
// compute op1 * 31 + op2, optimized as op1 << 5 - op1 + op2
*((ULng32 *) op_data[0]) = op1 << 5 - op1 + op2;
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
// -------------------------------------------------------------
// Hash Functions used by Hash Partitioning. These functions cannot
// change once Hash Partitioning is released! Defined for all data
// types, returns a 32 bit non-nullable hash value for the data item.
// The ::hash() function uses a loop over the key bytes; the other
// hash2()/hash4()/hash8() are more efficient but are only applicable
// to keys whose sizes are known at compile time: 2/4/8 bytes.
//--------------------------------------------------------------
ULng32 ExHDPHash::hash(const char *data, UInt32 flags, Int32 length)
{
ULng32 hashValue = 0;
unsigned char *valp = (unsigned char *)data;
Int32 iter = 0; // iterator over the key bytes, if needed
switch(flags) {
case NO_FLAGS:
case SWAP_EIGHT:
{
// Speedup for long keys - compute first 8 bytes fast (the rest with a loop)
if ( length >= 8 ) {
hashValue = hash8(data, flags); // do the first 8 bytes fast
// continue with the 9-th byte (only when length > 8 )
valp = (unsigned char *)&data[8];
iter = 8;
}
for(; iter < length; iter++) {
// Make sure the hashValue is sensitive to the byte position.
// One bit circular shift.
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*valp++];
}
break;
}
case SWAP_TWO:
{
// Speedup for long keys - compute first 8 bytes fast (the rest with a loop)
if ( length >= 8 ) {
hashValue = hash8(data, flags); // do the first 8 bytes fast
// continue with the 9-th byte (only when length > 8 )
valp = (unsigned char *)&data[8];
iter = 8;
}
// Loop over all the bytes of the value and compute the hash value.
for(; iter < length; iter+=2) {
// Make sure the hashValue is sensitive to the byte position.
// One bit circular shift.
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+1)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*valp];
valp += 2;
}
break;
}
case SWAP_FOUR:
{
hashValue = hash4(data, flags);
break;
}
case (SWAP_FIRSTTWO | SWAP_LASTFOUR):
case SWAP_FIRSTTWO:
case SWAP_LASTFOUR:
{
if((flags & SWAP_FIRSTTWO) != 0) {
hashValue = randomHashValues[*(valp+1)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*valp];
valp += 2;
iter += 2;
}
if((flags & SWAP_LASTFOUR) != 0) {
length -= 4;
}
for(; iter < length; iter++) {
// Make sure the hashValue is sensitive to the byte position.
// One bit circular shift.
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*valp++];
}
if((flags & SWAP_LASTFOUR) != 0) {
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+3)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+2)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+1)];
hashValue =
(hashValue << 1 | hashValue >> 31) ^ randomHashValues[*(valp+0)];
}
break;
}
default:
assert(FALSE);
}
return hashValue;
}
ex_expr::exp_return_type ExHDPHash::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
Attributes *srcOp = getOperand(1);
ULng32 hashValue;
if (srcOp->getNullFlag() && (! op_data[ -(2 * MAX_OPERANDS) + 1 ]))
{
// operand is a null value. All null values hash to
// the same hash value. Choose any arbitrary constant
// number as the hash value.
//
hashValue = ExHDPHash::nullHashValue; //666654765;
}
else {
Int32 length = (Int32)srcOp->getLength(op_data[-MAX_OPERANDS + 1]);
// if VARCHAR, skip trailing blanks and adjust length.
if (srcOp->getVCIndicatorLength() > 0) {
switch ( srcOp->getDatatype() ) {
// added to correctly handle VARNCHAR.
case REC_NCHAR_V_UNICODE:
{
// skip trailing blanks
NAWchar* wstr = (NAWchar*)(op_data[1]);
Int32 wstr_length = length / sizeof(NAWchar);
while ((wstr_length > 0) &&
( wstr[wstr_length-1] == unicode_char_set::space_char()))
wstr_length--;
length = sizeof(NAWchar) * wstr_length;
}
break;
default:
// skip trailing blanks
while ((length > 0) &&
(op_data[1][length-1] == ' '))
length--;
break;
}
}
UInt32 flags = NO_FLAGS;
switch(srcOp->getDatatype()) {
case REC_NUM_BIG_UNSIGNED:
case REC_NUM_BIG_SIGNED:
case REC_BIN16_SIGNED:
case REC_BIN16_UNSIGNED:
case REC_NCHAR_F_UNICODE:
case REC_NCHAR_V_UNICODE:
case REC_NCHAR_V_ANSI_UNICODE:
flags = SWAP_TWO;
break;
case REC_BIN32_SIGNED:
case REC_BIN32_UNSIGNED:
case REC_IEEE_FLOAT32:
flags = SWAP_FOUR;
break;
case REC_BIN64_SIGNED:
case REC_BIN64_UNSIGNED:
case REC_IEEE_FLOAT64:
flags = SWAP_EIGHT;
break;
case REC_DATETIME:
{
rec_datetime_field start;
rec_datetime_field end;
ExpDatetime *datetime = (ExpDatetime*) srcOp;
datetime->getDatetimeFields(srcOp->getPrecision(), start, end);
if(start == REC_DATE_YEAR) {
flags = SWAP_FIRSTTWO;
}
if(end == REC_DATE_SECOND && srcOp->getScale() > 0) {
flags |= SWAP_LASTFOUR;
}
}
break;
default:
if(srcOp->getDatatype() >= REC_MIN_INTERVAL &&
srcOp->getDatatype() <= REC_MAX_INTERVAL) {
if (srcOp->getLength() == 8)
flags = SWAP_EIGHT;
else if (srcOp->getLength() == 4)
flags = SWAP_FOUR;
else if (srcOp->getLength() == 2)
flags = SWAP_TWO;
else
assert(FALSE);
}
}
hashValue = hash(op_data[1], flags, length);
}
*(ULng32 *)op_data[0] = hashValue;
return ex_expr::EXPR_OK;
} // ExHDPHash::eval()
// --------------------------------------------------------------
// This function is used to combine two hash values to produce a new
// hash value. Used by Hash Partitioning. This function cannot change
// once Hash Partitioning is released! Defined for all data types,
// returns a 32 bit non-nullable hash value for the data item.
// --------------------------------------------------------------
ex_expr::exp_return_type ExHDPHashComb::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// always assume that both operands and result are of the same
// (unsigned) type and length
assert(getOperand(0)->getStorageLength() == 4 &&
getOperand(1)->getStorageLength() == 4 &&
getOperand(2)->getStorageLength() == 4);
ULng32 op1, op2;
op1 = *((ULng32 *) op_data[1]);
op2 = *((ULng32 *) op_data[2]);
// One bit, circular shift
op1 = ((op1 << 1) | (op1 >> 31));
op1 = op1 ^ op2;
*((ULng32 *) op_data[0]) = op1;
return ex_expr::EXPR_OK;
} // ExHDPHashComb::eval()
// ex_function_replace_null
//
ex_expr::exp_return_type
ex_function_replace_null::processNulls(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_replace_null::eval(char *op_data[],
CollHeap*,
ComDiagsArea **) {
Attributes *tgt = getOperand(0);
// Mark the result as non-null
if(tgt->getNullFlag())
ExpTupleDesc::clearNullValue(op_data[ -(2 * MAX_OPERANDS) ],
tgt->getNullBitIndex(),
tgt->getTupleFormat());
// If the input is NULL, replace it with the value in op_data[3]
if (! op_data[ - (2 * MAX_OPERANDS) + 1]) {
for(Lng32 i=0; i < tgt->getStorageLength(); i++)
op_data[0][i] = op_data[3][i];
}
else {
for(Lng32 i=0; i < tgt->getStorageLength(); i++)
op_data[0][i] = op_data[2][i];
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ex_function_mod
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ex_function_mod::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Int32 lenr = (Int32) getOperand(0)->getLength();
Int32 len1 = (Int32) getOperand(1)->getLength();
Int32 len2 = (Int32) getOperand(2)->getLength();
Int64 op1, op2, result;
switch (len1)
{
case 1:
op1 = *((Int8 *) op_data[1]);
break;
case 2:
op1 = *((short *) op_data[1]);
break;
case 4:
op1 = *((Lng32 *) op_data[1]);
break;
case 8:
op1 = *((Int64 *) op_data[1]);
break;
default:
ExRaiseFunctionSqlError(heap, diagsArea, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
switch (len2)
{
case 1:
op2 = *((Int8 *) op_data[2]);
break;
case 2:
op2 = *((short *) op_data[2]);
break;
case 4:
op2 = *((Lng32 *) op_data[2]);
break;
case 8:
op2 = *((Int64 *) op_data[2]);
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
if (op2 == 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_DIVISION_BY_ZERO,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
result = op1 % op2;
switch (lenr)
{
case 1:
*((Int8 *) op_data[0]) = (short) result;
break;
case 2:
*((short *) op_data[0]) = (short) result;
break;
case 4:
*((Lng32 *) op_data[0]) = (Lng32)result;
break;
case 8:
*((Int64 *) op_data[0]) = result;
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ex_function_mask
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ex_function_mask::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
// always assume that both operands and result are of the same
// (unsigned) type and length
// with built-in long long type we could also support 8 byte integers
ULng32 op1, op2, result;
switch (getOperand(0)->getStorageLength())
{
case 1:
op1 = *((UInt8 *) op_data[1]);
op2 = *((UInt8 *) op_data[2]);
if(getOperType() == ITM_MASK_SET) {
result = op1 | op2;
} else {
result = op1 & ~op2;
}
*((unsigned short *) op_data[0]) = (unsigned short) result;
break;
case 2:
op1 = *((unsigned short *) op_data[1]);
op2 = *((unsigned short *) op_data[2]);
if(getOperType() == ITM_MASK_SET) {
result = op1 | op2;
} else {
result = op1 & ~op2;
}
*((unsigned short *) op_data[0]) = (unsigned short) result;
break;
case 4:
op1 = *((ULng32 *) op_data[1]);
op2 = *((ULng32 *) op_data[2]);
if(getOperType() == ITM_MASK_SET) {
result = op1 | op2;
} else {
result = op1 & ~op2;
}
*((ULng32 *) op_data[0]) = result;
break;
case 8:
{
Int64 lop1 = *((Int64 *) op_data[1]);
Int64 lop2 = *((Int64 *) op_data[2]);
Int64 lresult;
if(getOperType() == ITM_MASK_SET) {
lresult = lop1 | lop2;
} else {
lresult = lop1 & ~lop2;
}
*((Int64 *) op_data[0]) = lresult;
break;
}
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
// class ExFunctionShift
////////////////////////////////////////////////////////////////////
ex_expr::exp_return_type ExFunctionShift::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
if(getOperand(2)->getStorageLength() != 4) {
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
ULng32 shift = *((ULng32 *)op_data[2]);
ULng32 value, result;
switch (getOperand(0)->getStorageLength()) {
case 1:
value = *((UInt8 *) op_data[1]);
if(getOperType() == ITM_SHIFT_RIGHT) {
result = value >> shift;
} else {
result = value << shift;
}
*((UInt8 *) op_data[0]) = (UInt8) result;
break;
case 2:
value = *((unsigned short *) op_data[1]);
if(getOperType() == ITM_SHIFT_RIGHT) {
result = value >> shift;
} else {
result = value << shift;
}
*((unsigned short *) op_data[0]) = (unsigned short) result;
break;
case 4:
value = *((ULng32 *) op_data[1]);
if(getOperType() == ITM_SHIFT_RIGHT) {
result = value >> shift;
} else {
result = value << shift;
}
*((ULng32 *) op_data[0]) = result;
break;
case 8:
{
Int64 value = *((Int64 *) op_data[1]);
Int64 result;
if(getOperType() == ITM_SHIFT_RIGHT) {
result = value >> shift;
} else {
result = value << shift;
}
*((Int64 *) op_data[0]) = result;
break;
}
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
static
ex_expr::exp_return_type getDoubleValue(double *dest,
char *source,
Attributes *operand,
CollHeap *heap,
ComDiagsArea** diagsArea)
{
switch(operand->getDatatype()) {
case REC_FLOAT64:
*dest = *(double *)(source);
return ex_expr::EXPR_OK;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
}
static
ex_expr::exp_return_type setDoubleValue(char *dest,
Attributes *operand,
double *source,
CollHeap *heap,
ComDiagsArea** diagsArea)
{
switch(operand->getDatatype()) {
case REC_FLOAT64:
*(double *)dest = *source;
return ex_expr::EXPR_OK;
default:
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
}
ex_expr::exp_return_type ExFunctionSVariance::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
double sumOfValSquared = 0;
double sumOfVal = 0;
double countOfVal = 1;
double avgOfVal;
double result = 0;
if(getDoubleValue(&sumOfValSquared, op_data[1], getOperand(1),
heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
if(getDoubleValue(&sumOfVal, op_data[2], getOperand(2), heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
if(getDoubleValue(&countOfVal, op_data[3], getOperand(3), heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
avgOfVal = sumOfVal/countOfVal;
if(countOfVal == 1) {
result = 0.0;
}
else {
result = (sumOfValSquared - (sumOfVal * avgOfVal)) / (countOfVal - 1);
if(result < 0.0) {
result = 0.0;
}
}
if(setDoubleValue(op_data[0], getOperand(0), &result, heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionSStddev::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
double sumOfValSquared = 0;
double sumOfVal = 0;
double countOfVal = 1;
double avgOfVal;
double result = 0;
if(getDoubleValue(&sumOfValSquared, op_data[1], getOperand(1),
heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
if(getDoubleValue(&sumOfVal, op_data[2], getOperand(2), heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
if(getDoubleValue(&countOfVal, op_data[3], getOperand(3), heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
avgOfVal = sumOfVal/countOfVal;
if(countOfVal == 1) {
result = 0.0;
}
else {
short err = 0;
result = (sumOfValSquared - (sumOfVal * avgOfVal)) / (countOfVal - 1);
if(result < 0.0) {
result = 0.0;
} else {
result = MathSqrt(result, err);
}
if (err)
{
ExRaiseSqlError(heap, diagsArea, EXE_BAD_ARG_TO_MATH_FUNC);
**diagsArea << DgString0("SQRT");
ExRaiseSqlError(heap, diagsArea, EXE_MAPPED_FUNCTION_ERROR);
**diagsArea << DgString0("STDDEV");
return ex_expr::EXPR_ERROR;
}
}
if(setDoubleValue(op_data[0], getOperand(0), &result, heap, diagsArea)) {
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExpRaiseErrorFunction::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
char catName[ComAnsiNamePart::MAX_IDENTIFIER_EXT_LEN+1];
char schemaName[ComAnsiNamePart::MAX_IDENTIFIER_EXT_LEN+1];
// Don't do anything with the op[] data
// Create a DiagsArea to return the SQLCODE and the ConstraintName
// and TableName.
if (raiseError())
ExRaiseSqlError(heap, diagsArea, (ExeErrorCode)getSQLCODE());
else
ExRaiseSqlWarning(heap, diagsArea, (ExeErrorCode)getSQLCODE());
// SQLCODE correspoding to Triggered Action Exception
if (getSQLCODE() == ComDiags_TrigActionExceptionSQLCODE)
{
assert(constraintName_ && tableName_);
extractCatSchemaNames(catName, schemaName, constraintName_);
*(*diagsArea) << DgTriggerCatalog(catName);
*(*diagsArea) << DgTriggerSchema(schemaName);
*(*diagsArea) << DgTriggerName(constraintName_);
extractCatSchemaNames(catName, schemaName, tableName_);
*(*diagsArea) << DgCatalogName(catName);
*(*diagsArea) << DgSchemaName(schemaName);
*(*diagsArea) << DgTableName(tableName_);
}
else if (getSQLCODE() == ComDiags_SignalSQLCODE) // Signal Statement
{
if (constraintName_)
*(*diagsArea) << DgString0(constraintName_); // The SQLSTATE
if (getNumOperands()==2)
{
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
op_data[1][len1] = '\0';
*(*diagsArea) << DgString1(op_data[1]); // The string expression
}
else
if (tableName_)
*(*diagsArea) << DgString1(tableName_); // The message
}
else
{
if (constraintName_)
{
extractCatSchemaNames(catName, schemaName, constraintName_);
*(*diagsArea) << DgConstraintCatalog(catName);
*(*diagsArea) << DgConstraintSchema(schemaName);
*(*diagsArea) << DgConstraintName(constraintName_);
}
if (tableName_)
{
extractCatSchemaNames(catName, schemaName, tableName_);
*(*diagsArea) << DgCatalogName(catName);
*(*diagsArea) << DgSchemaName(schemaName);
*(*diagsArea) << DgTableName(tableName_);
}
}
// If it's a warning, we should return a predictable boolean value.
*((ULng32*)op_data[0]) = 0;
if (raiseError())
return ex_expr::EXPR_ERROR;
else
return ex_expr::EXPR_OK;
}
// -----------------------------------------------------------------------
// methods for ExFunctionPack
// -----------------------------------------------------------------------
// Constructor.
ExFunctionPack::ExFunctionPack(Attributes** attr,
Space* space,
Lng32 width,
Lng32 base,
NABoolean nullsPresent)
: ex_function_clause(ITM_PACK_FUNC,3,attr,space),
width_(width), base_(base)
{
setNullsPresent(nullsPresent);
}
// Evaluator.
ex_expr::exp_return_type ExFunctionPack::eval(char* op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
char guard1 = op_data[0][-1];
char guard2 = op_data[0][getOperand(0)->getLength()];
// Extract no of rows already in the packed record.
Lng32 noOfRows;
str_cpy_all((char*)&noOfRows,op_data[0],sizeof(Lng32));
// Extract the packing factor.
Lng32 pf = *(Lng32 *)op_data[2];
// The clause returns an error for no more slots in the packed record.
if(noOfRows >= pf)
{
ExRaiseSqlError(heap,diagsArea,EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
// Whether the source is null.
char* nullFlag = op_data[-2*ex_clause::MAX_OPERANDS+1];
// If null bit map is present in the packed record.
if(nullsPresent())
{
// Offset of null bit from the beginning of the null bitmap.
Lng32 nullBitOffsetInBytes = noOfRows >> 3;
// Offset of null bit from the beginning of the byte it is in.
Lng32 nullBitOffsetInBits = noOfRows & 0x7;
// Extract the byte in which the null bit is in.
char* nullByte = op_data[0] + nullBitOffsetInBytes + sizeof(Int32);
// Used to set/unset the null bit.
unsigned char nullByteMask = (1 << nullBitOffsetInBits);
// Turn bit off/on depending on whether operand is null.
if(nullFlag == 0)
*nullByte |= nullByteMask; // set null bit on.
else
*nullByte &= (~nullByteMask); // set null bit off.
}
else if(nullFlag == 0)
{
// Bit map is not present but input is null. We got a problem.
ExRaiseSqlError(heap,diagsArea,EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
// We have contents to copy only if source is not null.
if(nullFlag != 0)
{
// Width of each packet in the packed record. -ve means in no of bits.
if(width_ < 0)
{
Lng32 widthInBits = -width_;
// Length of data region which has already been occupied in bits.
Lng32 tgtBitsOccupied = (noOfRows * widthInBits);
// Byte offset for data of this packet from beginning of data region.
Lng32 tgtByteOffset = base_ + (tgtBitsOccupied >> 3);
// Bit offset for data of this packet from beginning of its byte.
Lng32 tgtBitOffset = (tgtBitsOccupied & 0x7);
// Byte offset of data source left to be copied.
Lng32 srcByteOffset = 0;
// Bit offset of data source from beginning of its byte to be copied.
Lng32 srcBitOffset = 0;
// No of bits to copy in total.
Lng32 bitsToCopy = widthInBits;
// There are still bits remaining to be copied.
while(bitsToCopy > 0)
{
// Pointer to the target byte.
char* tgtBytePtr = (op_data[0] + tgtByteOffset);
// No of bits left in the target byte.
Lng32 bitsLeftInTgtByte = 8 - tgtBitOffset;
// No of bits left in the source byte.
Lng32 bitsLeftInSrcByte = 8 - srcBitOffset;
Lng32 bitsToCopyThisRound = (bitsLeftInTgtByte > bitsLeftInSrcByte ?
bitsLeftInSrcByte : bitsLeftInTgtByte);
if(bitsToCopyThisRound > bitsToCopy) bitsToCopyThisRound = bitsToCopy;
// Mask has ones in the those positions where bits will be copied to.
unsigned char mask = ((0xFF >> tgtBitOffset) <<
(8 - bitsToCopyThisRound)) >>
(8 - tgtBitOffset - bitsToCopyThisRound);
// Clear target bits. Keep other bits unchanged in the target byte.
(*tgtBytePtr) &= (~mask);
// Align source bits with its the destination. Mask off other bits.
unsigned char srcByte = *(op_data[1] + srcByteOffset);
srcByte = ((srcByte >> srcBitOffset) << tgtBitOffset) & mask;
// Make the copy.
(*tgtBytePtr) |= srcByte;
// Move source byte and bit offsets.
srcBitOffset += bitsToCopyThisRound;
if(srcBitOffset >= 8)
{
srcByteOffset++;
srcBitOffset -= 8;
}
// Move target byte and bit offsets.
tgtBitOffset += bitsToCopyThisRound;
if(tgtBitOffset >= 8)
{
tgtByteOffset++;
tgtBitOffset -= 8;
}
bitsToCopy -= bitsToCopyThisRound;
}
}
else // width_ > 0
{
// Width in bytes: we can copy full strings of bytes.
Lng32 tgtByteOffset = base_ + (noOfRows * width_);
str_cpy_all(op_data[0]+tgtByteOffset,op_data[1],width_);
}
}
// Update the "noOfRows" in the packed record.
noOfRows++;
str_cpy_all(op_data[0],(char*)&noOfRows,sizeof(Lng32));
// $$$ supported as a CHAR rather than a VARCHAR for now.
// getOperand(0)->
// setVarLength(offset+lengthToCopy,op_data[-ex_clause::MAX_OPERANDS]);
if(guard1 != op_data[0][-1] ||
guard2 != op_data[0][getOperand(0)->getLength()]) {
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
// Signal a completely packed record to the caller.
if(noOfRows == pf) return ex_expr::EXPR_TRUE;
// Signal an incompletely packed record to the caller.
return ex_expr::EXPR_FALSE;
}
// ExUnPackCol::eval() ------------------------------------------
// The ExUnPackCol clause extracts a set of bits from a CHAR value.
// The set of bits to extract is described by a base offset, a width,
// and an index. The offset and width are known at compile time, but
// the index is a run time variable. ExUnPackCol clause also gets
// the null indicator of the result from a bitmap within the CHAR
// field.
//
ex_expr::exp_return_type
ExUnPackCol::eval(char *op_data[], CollHeap *heap, ComDiagsArea **diagsArea)
{
// The width of the extract in BITS.
//
Lng32 width = width_;
// The base offset of the data in BYTES.
//
Lng32 base = base_;
// Boolean indicating if the NULL Bitmap is present.
// If it is present, then it starts at a 4 (sizeof(int)) byte offset.
//
NABoolean np = nullsPresent();
// Which piece of data are we extracting.
//
Lng32 index = *(Lng32 *)op_data[2];
// NULL Processing...
//
if(np) {
// The bit to be extracted.
//
Lng32 bitOffset = index;
// The byte of the CHAR field containing the bit.
//
Lng32 byteOffset = sizeof(Int32) + (bitOffset >> 3);
// The bit of the byte at byteOffset to be extracted.
//
bitOffset = bitOffset & 0x7;
// A pointer to the null indicators of the operands.
//
char **null_data = &op_data[-2 * ex_clause::MAX_OPERANDS];
// The mask used to test the NULL bit.
//
UInt32 mask = 1 << bitOffset;
// The byte containing the NULL Flag.
//
UInt32 byte = op_data[1][byteOffset];
// Is the NULL Bit set?
//
if(byte & mask) {
// The value is NULL, so set the result to NULL, and
// return since we do not need to extract the data.
//
*(short *)null_data[0] = (short)0xFFFF;
return ex_expr::EXPR_OK;
} else {
// The value is non-NULL, so set the indicator,
// continue to extract the data value.
//
*(short *)null_data[0] = 0;
}
}
// Bytes masks used for widths (1-8) of bit extracts.
//
const UInt32 masks[] = {0,1,3,7,15,31,63,127,255};
// Handle some special cases:
// Otherwise do a generic bit extract.
//
if(width == 8 || width == 4 || width == 2 || width == 1) {
// Items per byte for special case widths (1-8).
//
const UInt32 itemsPerBytes[] = {0,8,4,2,2,1,1,1,1};
// Amount to shift the index to get a byte index for the
// special case widths.
//
const UInt32 itemsPerByteShift[] = {0,3,2,1,1,0,0,0,0};
// Extracted value.
//
UInt32 value;
// An even more special case.
//
if(width == 8) {
// Must use unsigned assignment so that sign extension is not done.
// Later when signed bit precision integers are support will have
// to have a special case for those.
//
value = (unsigned char)op_data[1][base + index];
} else {
// The number of items in a byte.
//
UInt32 itemsPerByte = itemsPerBytes[width];
// The amount to shift the index to get a byte offset.
//
UInt32 shift = itemsPerByteShift[width];
// The offset of the byte containing the value.
//
Lng32 byteIndex = index >> shift;
// The index into the byte of the value.
//
Lng32 itemIndex = index & ( itemsPerByte - 1);
// A mask to extract an item of size width.
//
UInt32 mask = masks[width];
// The byte containing the item.
//
value = op_data[1][base + byteIndex];
// Shift the byte, so that the value to be
// extracted is in the least significant bits.
//
value = value >> (width * itemIndex);
// Clear all bits except those of the value.
//
value = value & mask;
}
// Copy value to result.
//
switch(getOperand(0)->getLength()) {
case 1:
*(unsigned char *)op_data[0] = value;
return ex_expr::EXPR_OK;
case 2:
*(unsigned short *)op_data[0] = value;
return ex_expr::EXPR_OK;
case 4:
*(ULng32 *)op_data[0] = value;
return ex_expr::EXPR_OK;
default:
// ERROR - This should never happen.
//
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
// Handle special case of a Byte copy.
//
if((width % 8) == 0) {
width = width/8;
str_cpy_all(op_data[0], &op_data[1][base + (index * width)], width);
return ex_expr::EXPR_OK;
}
char guard1 = op_data[0][-1];
char guard2 = op_data[0][getOperand(0)->getLength()];
// The general case of arbitrary bit lengths that can span byte boundaries.
//
// The offset to the value in bits.
//
Lng32 bitOffset = index * width;
// The offset to the last bit of the value in bits.
//
Lng32 bitOffsetEnd = bitOffset + width - 1;
// The offset to the byte containing the first bit of the value.
// in bytes.
//
Lng32 byteOffset = base + (bitOffset >> 3);
// The offset to the byte containing the first bit beyond the value.
// in bytes.
//
Lng32 byteOffsetEnd = base + (bitOffsetEnd >> 3);
// The offset of the first bit in the byte.
//
bitOffset = bitOffset & 0x7;
// The amount to shift the byte to the right to align
// the lower portion.
//
Lng32 rshift = bitOffset;
// The amount to shift the byte to the left to align
// the upper portion.
//
Lng32 lshift = 8 - bitOffset;
// An index into the destination.
//
Lng32 dindex = 0;
// Copy all the bits to the destination.
//
Int32 i = byteOffset;
for(; i <= byteOffsetEnd; i++) {
// Get a byte containing bits of the value.
//
unsigned char byte = op_data[1][i];
if(dindex > 0) {
// After the first byte, must copy the upper
// portion of the byte to the previous byte of
// the result. This is the second time writing
// to this byte.
//
op_data[0][dindex - 1] |= byte << lshift;
}
if(dindex < (Lng32) getOperand(0)->getLength()) {
// Copy the lower portion of this byte of the result
// to the destination. This is the first time this
// byte is written to.
//
op_data[0][dindex] = byte >> rshift;
}
dindex++;
}
// Clear all bits of the result that did not come
// from the extracted value.
//
for(i = 0; i < (Lng32) getOperand(0)->getLength(); i++) {
unsigned char mask = (width > 7) ? 0xFF : masks[width];
op_data[0][i] &= mask;
width -= 8;
width = (width < 0) ? 0 : width;
}
if(guard1 != op_data[0][-1] ||
guard2 != op_data[0][getOperand(0)->getLength()]) {
ExRaiseSqlError(heap, diagsArea, EXE_INTERNAL_ERROR);
return ex_expr::EXPR_ERROR;
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_translate::eval(char *op_data[],
CollHeap* heap,
ComDiagsArea** diagsArea)
{
Int32 copyLen = 0;
Int32 convertedLen = 0;
Int32 convType = get_conv_type();
Attributes * op0 = getOperand(0);
Attributes * op1 = getOperand(1);
ULng32 convFlags = (flags_ & TRANSLATE_FLAG_ALLOW_INVALID_CODEPOINT ?
CONV_ALLOW_INVALID_CODE_VALUE : 0);
return convDoIt(op_data[1],
op1->getLength(op_data[-MAX_OPERANDS + 1]),
op1->getDatatype(),
op1->getPrecision(),
(convType == CONV_UTF8_F_UCS2_V) ? (Int32)(CharInfo::UTF8) : op1->getScale(),
op_data[0],
op0->getLength(),
op0->getDatatype(),
op0->getPrecision(),
(convType == CONV_UCS2_F_UTF8_V) ? (Int32)(CharInfo::UTF8) : op0->getScale(),
op_data[-MAX_OPERANDS],
op0->getVCIndicatorLength(),
heap,
diagsArea,
(ConvInstruction)convType,
NULL,
convFlags);
}
void ExFunctionRandomNum::initSeed(char *op_data[])
{
if (seed_==0)
{
if (simpleRandom())
{
// start with 1 and go up to max
seed_ = 1;
return;
}
if (getNumOperands() == 2)
{
// seed is specified as an argument. Use it.
seed_ = *(ULng32 *)op_data[1];
return;
}
// Pick an initial seed. According to the reference given below
// (in the eval function), all initial seeds between 1 and
// 2147483646 are equally valid. So, we just need to pick one
// in this range. Do this based on a timestamp.
// Use ex_function_current to get timestamp.
//
char currBuff[32];
char *opData[1];
opData[0] = currBuff;
ex_function_current currentFun;
currentFun.eval(&opData[0], 0, 0);
// Extract year, month, etc.
//
char *p = currBuff;
short year = *((short*) p);
p += sizeof(short);
char month = *p++;
char day = *p++;
char hour = *p++;
char minute = *p++;
char second = *p++;
Lng32 fraction = *((Lng32*) p);
// Local variables year, ..., fraction are now initialized.
// From the values of these variables, generate a seed in the
// desired range.
Lng32 x = year * month * day;
if (hour) x *= hour;
p = (char*) &x;
assert(sizeof(Lng32)==4);
p[0] |= (second<<1);
p[1] |= (minute<<1);
p[2] |= (minute<<2);
p[3] |= second;
seed_ = x + fraction;
if (seed_<0)
seed_ += 2147483647;
if ( seed_ < 1 ) seed_ = 1;
}
}
void ExFunctionRandomNum::genRand(char *op_data[])
{
// Initialize seed if not already done
initSeed(op_data);
Lng32 t = 0;
const Lng32 M = 2147483647;
if (simpleRandom())
{
t = seed_ + 1;
}
else
{
// Algorithm is taken from "Random Number Generators: Good Ones
// Are Hard To Find", by Stephen K. Park and Keith W. Miller,
// Communications of the ACM, Volume 31, Number 10, Oct 1988.
const Lng32 A = 16807;
const Lng32 Q = 127773;
const Lng32 R = 2836;
Lng32 h = seed_/Q;
Lng32 l = seed_%Q;
t = A*l-R*h;
}
if (t>0)
seed_ = t;
else
seed_ = t + M;
}
ex_expr::exp_return_type ExFunctionRandomNum::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
genRand(op_data); // generates and sets the random number in seed_
*((ULng32*)op_data[0]) = (ULng32) seed_;
return ex_expr::EXPR_OK;
}
void ExFunctionRandomSelection::initDiff()
{
if (difference_ == -1)
{
difference_ = 0;
while (selProbability_ >= 1.0)
{
difference_++;
selProbability_ -= 1.0;
}
// Normalize the selProbability to a 32 bit integer and store in
// normProbability
normProbability_ = (Lng32) (selProbability_ * 0x7fffffff);
// reset the selProbability_ to original value in case this function
// gets called again
selProbability_ += difference_;
}
}
ex_expr::exp_return_type ExFunctionRandomSelection::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
initDiff(); // gets executed only once
genRand(NULL); // generates and sets the random number in seed_
if (getRand() < normProbability_)
*((ULng32*)op_data[0]) = (ULng32) (difference_ + 1);
else
*((ULng32*)op_data[0]) = (ULng32) (difference_);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExHash2Distrib::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
ULng32 keyValue = *(ULng32*)op_data[1];
ULng32 numParts = *(ULng32*)op_data[2];
ULng32 partNo =
(ULng32)(((Int64)keyValue * (Int64)numParts) >> 32);
*(ULng32*)op_data[0] = partNo;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExProgDistrib::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
ULng32 keyValue = *(Lng32*)op_data[1];
ULng32 totNumValues = *(Lng32*) op_data[2];
ULng32 resultValue = 1;
ULng32 offset = keyValue;
ULng32 i = 2;
while(offset >= i && i <= totNumValues) {
Lng32 n1 = offset % i;
Lng32 n2 = offset / i;
if (n1 == 0) {
offset = (i-1) * (n2 - 1) + resultValue;
resultValue = i;
i++;
} else {
Lng32 n3 = n2 << 1;
if(n1 > n3) {
Lng32 n = n1/n3 + (n1%n3 != 0);
offset -= n2 * n;
i += n;
} else {
offset -= n2;
i++;
}
}
}
*((ULng32 *)op_data[0]) = resultValue - 1;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExProgDistribKey::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
ULng32 value = *(ULng32*)op_data[1];
ULng32 offset = *(ULng32*)op_data[2];
ULng32 totNumValues = *(ULng32*)op_data[3];
ULng32 uniqueVal = offset >> 16;
offset = offset & 0x0000FFFF;
value++;
ULng32 i = totNumValues;
while(i >= 2) {
if (value==i) {
value = (ULng32) (offset-1)%(i-1) + 1;
offset = ((offset-1)/(i-1) + 1) * i;
i--;
} else if(offset < i) {
i = (offset>value?offset:value);
} else {
offset = offset + (offset-1)/(i-1);
i--;
}
}
Int64 result = offset;
result = ((result << 16) | uniqueVal) << 16;
*((Int64 *)op_data[0]) = result;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExPAGroup::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
ULng32 partNum = *(ULng32*)op_data[1];
ULng32 totNumGroups = *(ULng32*) op_data[2];
ULng32 totNumParts = *(ULng32*) op_data[3];
ULng32 scaleFactor = totNumParts / totNumGroups;
ULng32 transPoint = (totNumParts % totNumGroups);
ULng32 groupPart;
if(partNum < (transPoint * (scaleFactor + 1))) {
groupPart = partNum / (scaleFactor + 1);
} else {
groupPart = (partNum - transPoint) / scaleFactor;
}
*((ULng32 *)op_data[0]) = groupPart;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionRangeLookup::eval(char *op_data[],
CollHeap*,
ComDiagsArea**)
{
// Two operands get passed to ExFunctionRangeLookup: a pointer to
// the actual, encoded key, and a pointer into a constant array
// that contains the encoded split ranges. The result is a 4 byte
// integer, not NULL, that contains the partition number.
char *encodedKey = op_data[1];
char *sKeys = op_data[2];
Lng32 *result = (Lng32 *) op_data[0];
// Now perform a binary search in sKeys
Lng32 lo = 0;
Lng32 hi = numParts_; // note we have one more entry than parts
Lng32 probe;
Lng32 cresult;
while (hi-lo > 1)
{
// try the element in the middle (may round down)
probe = (lo+hi)/2;
// compare our encoded key with that middle split range
cresult = str_cmp(encodedKey,
&sKeys[probe*partKeyLen_],
partKeyLen_);
if (cresult <= 0)
hi = probe; // search first half, discard second half
if (cresult >= 0)
lo = probe; // search second half, discard first half
}
// Once we have narrowed it down to a difference between lo and hi
// of 0 or 1, we know that lo points to the index of our partition
// because the partition number must be greater or equal to lo and
// less than hi. Remember that we set hi to one more than we had
// partition numbers.
*result = lo;
return ex_expr::EXPR_OK;
}
ExRowsetArrayScan::ExRowsetArrayScan(){};
ExRowsetArrayRowid::ExRowsetArrayRowid(){};
ExRowsetArrayInto::ExRowsetArrayInto(){};
ExRowsetArrayScan::ExRowsetArrayScan(Attributes **attr,
Space *space,
Lng32 maxNumElem,
Lng32 elemSize,
NABoolean elemNullInd)
: maxNumElem_(maxNumElem),
elemSize_(elemSize),
elemNullInd_(elemNullInd),
ex_function_clause(ITM_ROWSETARRAY_SCAN, 3, attr, space)
{
};
ExRowsetArrayRowid::ExRowsetArrayRowid(Attributes **attr,
Space *space,
Lng32 maxNumElem)
: maxNumElem_(maxNumElem),
ex_function_clause(ITM_ROWSETARRAY_ROWID, 3, attr, space)
{
};
ExRowsetArrayInto::ExRowsetArrayInto(Attributes **attr,
Space *space,
Lng32 maxNumElem,
Lng32 elemSize,
NABoolean elemNullInd)
: maxNumElem_(maxNumElem),
numElem_(0),
elemSize_(elemSize),
elemNullInd_(elemNullInd),
ex_function_clause(ITM_ROWSETARRAY_INTO, 3, attr, space)
{
};
// ExRowsetArrayScan::eval() ------------------------------------------
// The ExRowsetArrayScan clause extracts an element of the Rowset array
// The size of the element is known at compile time, but the index is a
// run time variable.
ex_expr::exp_return_type
ExRowsetArrayScan::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to the result
// op_data[1] points to the array
// op_data[2] points to the index
Lng32 index = *(Lng32 *)op_data[2];
if (index < 0 || index >= maxNumElem_)
{
// The index cannot be greater than the dimension of the array
// It is likely that there was an item expression evaluated at
// execution time to obtain the rowsetSize which is greater than
// the maximum allowed.
ExRaiseSqlError(heap, diagsArea, EXE_ROWSET_INDEX_OUTOF_RANGE);
**diagsArea << DgSqlCode(-EXE_ROWSET_INDEX_OUTOF_RANGE);
return ex_expr::EXPR_ERROR;
}
Attributes *ResultAttr = getOperand(0);
Attributes *SourceAttr = getOperand(1);
Lng32 size = ResultAttr->getStorageLength();
char *SourceElemPtr = &op_data[1][(index * size) + sizeof(Lng32)];
// NULL Processing...
if(elemNullInd_) {
// A pointer to the null indicators of the operands.
char **ResultNullData = &op_data[-2 * ex_clause::MAX_OPERANDS];
char *SourceElemIndPtr = SourceElemPtr;
SourceElemPtr += SourceAttr->getNullIndicatorLength();
// Set the indicator
if (ResultAttr->getNullFlag()) {
str_cpy_all(ResultNullData[0], SourceElemIndPtr,
SourceAttr->getNullIndicatorLength());
}
if ( ExpTupleDesc::isNullValue( SourceElemIndPtr,
SourceAttr->getNullBitIndex(),
SourceAttr->getTupleFormat() ) )
{
// The value is NULL, return since we do not need to extract the data.
return ex_expr::EXPR_NULL;
}
}
// For SQLVarChars, we have to copy both length and value fields.
// op_data[-ex_clause::MAX_OPERANDS] points to the length field of the
// SQLVarChar;
// The size of the field is sizeof(short) for rowset SQLVarChars.
if(SourceAttr->getVCIndicatorLength() > 0){
str_cpy_all((char*)op_data[-ex_clause::MAX_OPERANDS],
(char*)(&op_data[-ex_clause::MAX_OPERANDS+1][index*size]),
SourceAttr->getVCIndicatorLength()); //sizeof(short));
SourceElemPtr += SourceAttr->getVCIndicatorLength();
str_cpy_all(op_data[0], SourceElemPtr, size - SourceAttr->getVCIndicatorLength());
}
else {
// Note we do not have variable length for host variables. But we may not
// need to copy the whole length for strings.
str_cpy_all(op_data[0], SourceElemPtr, size);
}
return ex_expr::EXPR_OK;
}
Long ExRowsetArrayScan::pack(void * space)
{
return packClause(space, sizeof(ExRowsetArrayScan));
}
Long ExRowsetArrayRowid::pack(void * space)
{
return packClause(space, sizeof(ExRowsetArrayRowid));
}
Long ExRowsetArrayInto::pack(void * space)
{
return packClause(space, sizeof(ExRowsetArrayInto));
}
// ExRowsetArrayRowid::eval() ------------------------------------------
// The ExRowsetArrayRowid clause returns the value of the current index
ex_expr::exp_return_type
ExRowsetArrayRowid::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to the result
// op_data[1] points to the array
// op_data[2] points to the index
// The width of each data item in bytes
Lng32 index = *(Lng32 *)op_data[2];
if (index < 0 || index >= maxNumElem_)
{
// The index cannot be greater than the dimension of the array
// It is likely that there was an item expression evaluated at
// execution time to obtain the rowsetSize which is greater than
// the maximum allowed.
ExRaiseSqlError(heap, diagsArea, EXE_ROWSET_INDEX_OUTOF_RANGE);
**diagsArea << DgSqlCode(-EXE_ROWSET_INDEX_OUTOF_RANGE);
return ex_expr::EXPR_ERROR;
}
// Note we do not have variable length for host variables. But we may not
// need to copy the whole length for strings.
str_cpy_all(op_data[0], (char *)&index, sizeof(index));
return ex_expr::EXPR_OK;
}
// ExRowsetArrayInto::eval() ------------------------------------------
// The ExRowsetArrayInto clause appends a value into the Rowset array
// The size of the element is known at compile time
ex_expr::exp_return_type
ExRowsetArrayInto::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to the array (Result)
// op_data[1] points to the value to insert
// op_data[2] points to the rowset size expression
Lng32 runtimeMaxNumElem = *(Lng32 *)op_data[2];
if (numElem_ >= runtimeMaxNumElem || numElem_ >= maxNumElem_) {
// Overflow, we cannot add more elements to this rowset array
ExRaiseSqlError(heap, diagsArea, EXE_ROWSET_OVERFLOW);
**diagsArea << DgSqlCode(-EXE_ROWSET_OVERFLOW);
return ex_expr::EXPR_ERROR;
}
// Get number of rows stored in the array
Lng32 nrows;
str_cpy_all((char*)&nrows,op_data[0],sizeof(Lng32));
if (nrows >= runtimeMaxNumElem || nrows >= maxNumElem_) {
// Overflow, we cannot add more elements to this rowset array
ExRaiseSqlError(heap, diagsArea, EXE_ROWSET_OVERFLOW);
**diagsArea << DgSqlCode(-EXE_ROWSET_OVERFLOW);
return ex_expr::EXPR_ERROR;
}
Attributes *resultAttr = getOperand(0);
NABoolean resultIsNull = FALSE;
char *sourceNullData = op_data[-2 * ex_clause::MAX_OPERANDS + 1];
Attributes *sourceAttr = getOperand(1);
Lng32 elementSize = ((SimpleType *) resultAttr)->getStorageLength();
char *resultElemPtr = &op_data[0][(nrows * elementSize) +
sizeof (Lng32)];
// NULL Processing...
if (elemNullInd_) {
char *resultElemIndPtr = resultElemPtr;
// Set the indicator
if (sourceAttr->getNullFlag() && sourceNullData == 0) {
ExpTupleDesc::setNullValue(resultElemIndPtr,
resultAttr->getNullBitIndex(),
resultAttr->getTupleFormat());
resultIsNull = TRUE;
} else {
ExpTupleDesc::clearNullValue(resultElemIndPtr,
resultAttr->getNullBitIndex(),
resultAttr->getTupleFormat());
}
} else if (sourceAttr->getNullFlag() && sourceNullData == 0) {
// Source is null, but we do not have a way to express it
ExRaiseSqlError(heap, diagsArea, EXE_MISSING_INDICATOR_VARIABLE);
**diagsArea << DgSqlCode(-EXE_MISSING_INDICATOR_VARIABLE);
return ex_expr::EXPR_ERROR;
}
// Copy the result if not null
// For SQLVarChars, copy both val and len fields.
if (resultIsNull == FALSE){
if (DFS2REC::isSQLVarChar(resultAttr->getDatatype())) {
unsigned short VCLen = 0;
str_cpy_all((char *) &VCLen,
(char*)op_data[-ex_clause::MAX_OPERANDS + 1],
resultAttr->getVCIndicatorLength());
str_cpy_all( resultElemPtr+resultAttr->getNullIndicatorLength(),
(char *) &VCLen,
resultAttr->getVCIndicatorLength());
str_cpy_all(
resultElemPtr+resultAttr->getNullIndicatorLength()+
resultAttr->getVCIndicatorLength(),
op_data[1], VCLen);
}
else {
str_cpy_all(resultElemPtr + resultAttr->getNullIndicatorLength(),
op_data[1], resultAttr->getLength());
} // if isSQLVarChar
} // if resultIsNULL
// Update the number of elements in the object associated with the array
// and the array itself
nrows++;
str_cpy_all(op_data[0],(char*)&nrows,sizeof(Lng32));
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_nullifzero::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Attributes *tgtOp = getOperand(0);
char * tgt = op_data[0];
char * tgtNull = op_data[-2 * MAX_OPERANDS];
char * src = op_data[1];
Lng32 srcLen = getOperand(1)->getLength();
NABoolean resultIsNull = TRUE;
for (Int32 i = 0; i < srcLen; i++)
{
tgt[i] = src[i];
if (src[i] != 0)
{
resultIsNull = FALSE;
}
}
if (resultIsNull)
{
ExpTupleDesc::setNullValue(tgtNull,
tgtOp->getNullBitIndex(),
tgtOp->getTupleFormat());
}
else
{
ExpTupleDesc::clearNullValue(tgtNull,
tgtOp->getNullBitIndex(),
tgtOp->getTupleFormat());
}
return ex_expr::EXPR_OK;
}
//
// NVL(e1, e2) returns e2 if e1 is NULL otherwise e1. NVL(e1, e2) is
// equivalent to ANSI/ISO
// COALESCE(e1, e2)
// or,
// CASE WHEN e1 IS NULL THEN e2 ELSE e1 END
// Both arguments can be nullable and actually null; they both can
// be constants as well.
// NVL() on CHAR type expressions is mapped to CASE. ISNULL(e1, e2) is
// mapped into NVL(e1, e2)
// Datatypes of e1 and e2 must be comparable/compatible.
//
ex_expr::exp_return_type ex_function_nvl::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
// Common index into op_data[] to access Null Indicators
Int32 opNullIdx = -2 * MAX_OPERANDS;
Attributes *tgtOp = getOperand(0);
Attributes *arg1 = getOperand(1);
Attributes *arg2 = getOperand(2);
char * tgt = op_data[0];
char * tgtNull = op_data[opNullIdx];
char * src;
UInt32 srcLen;
NABoolean resultIsNull = TRUE;
// As of today, NVL() on CHAR types becomes CASE. So make sure we are
// not dealing with any CHAR types
assert(!DFS2REC::isAnyCharacter(arg1->getDatatype()) &&
!DFS2REC::isAnyCharacter(arg2->getDatatype()));
// Locate the operand that is not null: if both are null
// resultIsNull will still be TRUE and we will just set the
// NULL flag of the result. If any operand is NOT NULL we copy
// that value into result and clear NULL flag of the result.
if (!arg1->getNullFlag() || op_data[opNullIdx + 1])
{
// First operand is either NOT NULLABLE or NON NULL Value.
// This is the result.
src = op_data[1];
srcLen = arg1->getLength();
resultIsNull = FALSE;
}
else
{
// Second operand could be the result, if it is not null.
src = op_data[2];
srcLen = arg2->getLength();
// Second operand is either NOT NULLABLE or NON NULL Value.
// This is the result.
if (!arg2->getNullFlag() || op_data[opNullIdx + 2])
resultIsNull = FALSE;
}
if (resultIsNull)
{
// Result must be nullable
assert(tgtOp->getNullFlag());
ExpTupleDesc::setNullValue(tgtNull,
tgtOp->getNullBitIndex(),
tgtOp->getTupleFormat());
}
else
{
// clear nullflag of result if it is nullable
if (tgtOp->getNullFlag())
ExpTupleDesc::clearNullValue(tgtNull,
tgtOp->getNullBitIndex(),
tgtOp->getTupleFormat());
}
// Copy src to result: this could be NULL
assert((UInt32)(tgtOp->getLength()) >= srcLen);
str_cpy_all(tgt, src, srcLen);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_json_object_field_text::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
// search for operand 1
Lng32 len1 = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
if ( cs == CharInfo::UTF8 )
{
Int32 prec1 = ((SimpleType *)getOperand(1))->getPrecision();
len1 = Attributes::trimFillerSpaces( op_data[1], prec1, len1, cs );
}
// in operand 2
Lng32 len2 = getOperand(2)->getLength(op_data[-MAX_OPERANDS+2]);
if ( cs == CharInfo::UTF8 )
{
Int32 prec2 = ((SimpleType *)getOperand(2))->getPrecision();
len2 = Attributes::trimFillerSpaces( op_data[2], prec2, len2, cs );
}
char *rltStr = NULL;
JsonReturnType ret = json_extract_path_text(&rltStr, op_data[1], 1, op_data[2]);
if (ret != JSON_OK)
{
ExRaiseJSONError(heap, diagsArea, ret);
return ex_expr::EXPR_ERROR;
}
if (rltStr != NULL)
{
Lng32 rltLen = str_len(rltStr)+1;
str_cpy_all(op_data[0], rltStr, rltLen);
free(rltStr);
// If result is a varchar, store the length of substring
// in the varlen indicator.
if (getOperand(0)->getVCIndicatorLength() > 0)
getOperand(0)->setVarLength(rltLen, op_data[-MAX_OPERANDS]);
}
else
getOperand(0)->setVarLength(0, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
//
// Clause used to clear header bytes for both disk formats
// SQLMX_FORMAT and SQLMX_ALIGNED_FORMAT. The number of bytes to clear
// is different for both formats.
// This clause is only generated for insert expressions and update expressions
// (updates that are non-optimized since olt optimized updates do a strcpy
// of the old image and then update the specific columns).
ex_expr::exp_return_type ExHeaderClause::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
char *tgtData = op_data[0];
Attributes *tgtOp = getOperand(0);
// Clear the entire header (not the VOA area)
str_pad( tgtData, (Int32)adminSz_, '\0' );
if ( bitmapOffset_ > 0 )
((ExpAlignedFormat *)tgtData)->setBitmapOffset( bitmapOffset_ );
// Can not use the tgt attributes offset value here since for the aligned
// format this may not be the first fixed field since the fixed fields
// are re-ordered.
if ( isSQLMXAlignedFormat() )
((ExpAlignedFormat *)tgtData)->setFirstFixedOffset( firstFixedOffset_ );
else
ExpTupleDesc::setFirstFixedOffset( tgtData,
firstFixedOffset_,
tgtOp->getTupleFormat() );
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ex_function_queryid_extract::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 retcode = 0;
char * qidStr = op_data[1];
char * attrStr = op_data[2];
Lng32 qidLen = getOperand(1)->getLength();
Lng32 attrLen = getOperand(2)->getLength();
Lng32 attr = -999;
NABoolean isNumeric = FALSE;
// remove trailing blanks from attrStr
while (attrLen && attrStr[attrLen-1] == ' ')
attrLen--;
if (strncmp(attrStr, "SEGMENTNUM", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_SEGMENTNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "CPU", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_CPUNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "CPUNUM", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_CPUNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "PIN", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_PIN;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "EXESTARTTIME", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_EXESTARTTIME;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "SESSIONID", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_SESSIONID;
}
else if (strncmp(attrStr, "SESSIONNUM", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_SESSIONNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "USERNAME", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_USERNAME;
}
else if (strncmp(attrStr, "SESSIONNAME", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_SESSIONNAME;
}
else if (strncmp(attrStr, "QUERYNUM", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_QUERYNUM;
isNumeric = TRUE;
}
else if (strncmp(attrStr, "STMTNAME", attrLen) == 0)
{
attr = ComSqlId::SQLQUERYID_STMTNAME;
}
Int64 value;
if (!isNumeric)
value = 99; // set max valueStr length
char valueStr[100];
retcode = ComSqlId::getSqlQueryIdAttr(
attr, qidStr, qidLen, value, valueStr);
if (retcode < 0)
{
ExRaiseFunctionSqlError(heap, diagsArea, (ExeErrorCode)(-retcode),
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
char * valPtr;
short datatype;
Lng32 length;
if (isNumeric)
{
valPtr = (char*)&value;
datatype = REC_BIN64_SIGNED;
length = 8;
}
else
{
valPtr = valueStr;
datatype = REC_BYTE_V_ANSI;
length = (Lng32)value + 1; // include null terminator
}
if (convDoIt(valPtr, length, datatype, 0, 0,
op_data[0],
getOperand(0)->getLength(),
getOperand(0)->getDatatype(),
getOperand(0)->getPrecision(),
getOperand(0)->getScale(),
op_data[-MAX_OPERANDS],
getOperand(0)->getVCIndicatorLength(),
heap, diagsArea))
return ex_expr::EXPR_ERROR;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionUniqueId::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
Lng32 retcode = 0;
char * result = op_data[0];
Int64 uniqueUID;
ComUID comUID;
comUID.make_UID();
#if defined( NA_LITTLE_ENDIAN )
uniqueUID = reversebytes(comUID.get_value());
#else
uniqueUID = comUID.get_value();
#endif
str_cpy_all(result, (char*)&uniqueUID, sizeof(Int64));
str_pad(&result[sizeof(Int64)], sizeof(Int64), '\0');
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionRowNum::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
char * result = op_data[0];
Int64 rowNum = getExeGlobals()->rowNum();
str_cpy_all(result, (char*)&rowNum, sizeof(Int64));
str_pad(&result[sizeof(Int64)], sizeof(Int64), '\0');
return ex_expr::EXPR_OK;
}
short ExFunctionHbaseColumnLookup::extractColFamilyAndName(
const char * input,
short len,
NABoolean isVarchar,
std::string &colFam, std::string &colName)
{
if (! input)
return -1;
Lng32 i = 0;
Lng32 startPos = 0;
if (isVarchar)
{
len = *(short*)input;
startPos = sizeof(len);
}
else if (len == -1)
{
len = strlen(input);
startPos = 0;
}
else
{
startPos = 0;
}
Lng32 j = 0;
i = startPos;
NABoolean colonFound = FALSE;
while ((j < len) && (not colonFound))
{
if (input[i] != ':')
{
i++;
}
else
{
colonFound = TRUE;
}
j++;
}
if (colonFound) // ":" found
{
colFam.assign(&input[startPos], i - startPos);
i++;
if (i < (startPos + len))
{
colName.assign(&input[i], (startPos + len) - i);
}
}
else
{
colName.assign(&input[startPos], i - startPos);
}
return 0;
}
ex_expr::exp_return_type
ExFunctionHbaseColumnLookup::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result. The result is a varchar.
Attributes *resultAttr = getOperand(0);
Attributes *colDetailAttr = getOperand(1);
char * resultStart = op_data[0];
char * resultNull = op_data[-2 * MAX_OPERANDS];
char * result = resultStart;
char * colDetail = op_data[1];
Lng32 sourceLen = 0;
if (colDetailAttr->getVCIndicatorLength() == sizeof(Lng32))
str_cpy_all((char*)&sourceLen, op_data[-MAX_OPERANDS+1], sizeof(Lng32));
else
{
short tempLen = 0;
str_cpy_all((char*)&tempLen, op_data[-MAX_OPERANDS+1], sizeof(short));
sourceLen = tempLen;
}
char * pos = colDetail;
NABoolean done = FALSE;
NABoolean colFound = FALSE;
while (NOT done)
{
short colNameLen = 0;
Lng32 colValueLen = 0;
memcpy((char*)&colNameLen, pos, sizeof(short));
pos += sizeof(short);
if ((colNameLen == strlen(colName_)) &&
(str_cmp(colName_, pos, colNameLen) == 0))
{
pos += colNameLen;
memcpy((char*)&colValueLen, pos, sizeof(Lng32));
pos += sizeof(Lng32);
NABoolean charType = DFS2REC::isAnyCharacter(resultAttr->getDatatype());
if (! charType)
{
// lengths must match for non-char types
if (colValueLen != resultAttr->getLength())
continue;
}
UInt32 flags = 0;
ex_expr::exp_return_type rc =
convDoIt(pos,
colValueLen,
(charType ? REC_BYTE_F_ASCII : resultAttr->getDatatype()),
(charType ? 0 : resultAttr->getPrecision()),
(charType ? 0 : resultAttr->getScale()),
result,
resultAttr->getLength(),
resultAttr->getDatatype(),
resultAttr->getPrecision(),
resultAttr->getScale(),
NULL,
0,
heap,
diagsArea);
if ((rc != ex_expr::EXPR_OK) ||
((diagsArea) && (*diagsArea) && ((*diagsArea)->getNumber(DgSqlCode::WARNING_)) > 0))
{
if (rc == ex_expr::EXPR_OK)
{
(*diagsArea)->negateAllWarnings();
}
return ex_expr::EXPR_ERROR;
}
getOperand(0)->setVarLength(colValueLen, op_data[-MAX_OPERANDS]);
colFound = TRUE;
done = TRUE;
}
else
{
pos += colNameLen;
memcpy((char*)&colValueLen, pos, sizeof(Lng32));
pos += sizeof(Lng32);
pos += colValueLen;
if (pos >= (colDetail + sourceLen))
{
done = TRUE;
}
}
} // while
if (NOT colFound)
{
// move null value to result
ExpTupleDesc::setNullValue(resultNull,
resultAttr->getNullBitIndex(),
resultAttr->getTupleFormat() );
}
else
{
ExpTupleDesc::clearNullValue(resultNull,
resultAttr->getNullBitIndex(),
resultAttr->getTupleFormat() );
}
return ex_expr::EXPR_OK;
}
NABoolean ExFunctionHbaseColumnsDisplay::toBeDisplayed(
char * colName, Lng32 colNameLen)
{
if ((! colNames()) || (numCols_ == 0))
return TRUE;
char * currColName = colNames();
for (Lng32 i = 0; i < numCols_; i++)
{
short currColNameLen = *(short*)currColName;
currColName += sizeof(short);
if ((colNameLen == currColNameLen) &&
(memcmp(colName, currColName, colNameLen) == 0))
return TRUE;
currColName += currColNameLen;
}
return FALSE;
}
ex_expr::exp_return_type
ExFunctionHbaseColumnsDisplay::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result. The result is a varchar.
Attributes *resultAttr = getOperand(0);
Attributes *colDetailAttr = getOperand(1);
char * resultStart = op_data[0];
char * result = resultStart;
char * colDetail = op_data[1];
Lng32 sourceLen = 0;
if (colDetailAttr->getVCIndicatorLength() == sizeof(Lng32))
str_cpy_all((char*)&sourceLen, op_data[-MAX_OPERANDS+1], sizeof(Lng32));
else
{
short tempLen = 0;
str_cpy_all((char*)&tempLen, op_data[-MAX_OPERANDS+1], sizeof(short));
sourceLen = tempLen;
}
char * pos = colDetail;
NABoolean done = FALSE;
while (NOT done)
{
short colNameLen = 0;
Lng32 colValueLen = 0;
memcpy((char*)&colNameLen, pos, sizeof(short));
pos += sizeof(short);
memcpy(result, pos, colNameLen);
pos += colNameLen;
// if this col name need to be returned, then return it.
if (NOT toBeDisplayed(result, colNameLen))
{
goto label_continue;
}
result += colNameLen;
memcpy(result, " => ", strlen(" => "));
result += strlen(" => ");
memcpy((char*)&colValueLen, pos, sizeof(Lng32));
pos += sizeof(Lng32);
memcpy(result, pos, colValueLen);
result += colValueLen;
pos += colValueLen;
if (pos < (colDetail + sourceLen))
{
memcpy(result, ", ", strlen(", "));
result += strlen(", ");
}
label_continue:
if (pos >= (colDetail + sourceLen))
{
done = TRUE;
}
}
// store the row length in the varlen indicator.
getOperand(0)->setVarLength((result-resultStart), op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionHbaseColumnCreate::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result. The result is a varchar.
// Values in result have already been populated by clauses evaluated
// before this clause is reached.
Attributes *resultAttr = getOperand(0);
char * resultStart = op_data[0];
char * result = resultStart;
str_cpy_all(result, (char*)&numEntries_, sizeof(numEntries_));
result += sizeof(short);
str_cpy_all(result, (char*)&colNameMaxLen_, sizeof(colNameMaxLen_));
result += sizeof(short);
str_cpy_all(result, (char*)&colValVCIndLen_, sizeof(colValVCIndLen_));
result += sizeof(short);
str_cpy_all(result, (char*)&colValMaxLen_, sizeof(colValMaxLen_));
result += sizeof(Int32);
for (Lng32 i = 0; i < numEntries_; i++)
{
// validate that column name is of right format: colfam:colname
std::string colFam;
std::string colNam;
ExFunctionHbaseColumnLookup::extractColFamilyAndName(
result, -1, TRUE/*isVarchar*/, colFam, colNam);
if (colFam.empty())
{
short colNameLen;
str_cpy_all((char*)&colNameLen, result, sizeof(short));
result += sizeof(short);
std::string colNamData(result, colNameLen);
ExRaiseSqlError(heap, diagsArea, (ExeErrorCode)1426, NULL, NULL, NULL, NULL,
colNamData.data());
return ex_expr::EXPR_ERROR;
}
result += sizeof(short);
result += ROUND2(colNameMaxLen_);
// skip the nullable bytes
result += sizeof(short);
if (colValVCIndLen_ == sizeof(short))
result += sizeof(short);
else
{
result = (char*)ROUND4((Int64)result);
result += sizeof(Lng32);
}
result += ROUND2(colValMaxLen_);
}
resultAttr->setVarLength(result - resultStart, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionCastType::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
// op_data[0] points to result.
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
char * resultData = op_data[0];
char * srcData = op_data[1];
Lng32 sourceLen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 resultLen = resultAttr->getLength();
if (sourceLen < resultLen)
{
ExRaiseFunctionSqlError(heap, diagsArea, EXE_STRING_OVERFLOW,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
str_cpy_all(resultData, srcData, resultLen);
getOperand(0)->setVarLength(resultLen, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionSequenceValue::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
short rc = 0;
// op_data[0] points to result. The result is a varchar.
Attributes *resultAttr = getOperand(0);
char * result = op_data[0];
SequenceValueGenerator * seqValGen = getExeGlobals()->seqGen();
Int64 seqVal = 0;
if (isCurr())
rc = seqValGen->getCurrSeqVal(sga_, seqVal);
else
rc = seqValGen->getNextSeqVal(sga_, seqVal);
if (rc)
{
ExRaiseSqlError(heap, diagsArea, (ExeErrorCode)ABS(rc));
return ex_expr::EXPR_ERROR;
}
*(Int64*)result = seqVal;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionHbaseTimestamp::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
short rc = 0;
// op_data[0] points to result.
Attributes *resultAttr = getOperand(0);
char * result = op_data[0];
Int64 * hbaseTS = (Int64*)op_data[1];
*(Int64*)result = hbaseTS[colIndex_];
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type
ExFunctionHbaseVersion::eval(char *op_data[], CollHeap *heap,
ComDiagsArea **diagsArea)
{
short rc = 0;
// op_data[0] points to result.
Attributes *resultAttr = getOperand(0);
char * result = op_data[0];
Int64 * hbaseVersion = (Int64*)op_data[1];
*(Int64*)result = hbaseVersion[colIndex_];
return ex_expr::EXPR_OK;
}
////////////////////////////////////////////////////////////////////
//
// decodeKeyValue
//
// This routine decodes an encoded key value.
//
// Note: The target MAY point to the source to change the original
// value.
//
////////////////////////////////////////////////////////////////////
short ex_function_encode::decodeKeyValue(Attributes * attr,
NABoolean isDesc,
char *inSource,
char *varlen_ptr,
char *target,
char *target_varlen_ptr,
NABoolean handleNullability
)
{
Lng32 fsDatatype = attr->getDatatype();
Lng32 length = attr->getLength();
Lng32 precision = attr->getPrecision();
Lng32 encodedKeyLen = length;
if ((handleNullability) &&
(attr->getNullFlag()))
encodedKeyLen += attr->getNullIndicatorLength();
char * source = inSource;
if (isDesc)
{
// compliment all bytes
for (Lng32 k = 0; k < encodedKeyLen; k++)
target[k] = ~(source[k]);
source = target;
}
if ((handleNullability) &&
(attr->getNullFlag()))
{
if (target != source)
str_cpy_all(target, source, attr->getNullIndicatorLength());
source += attr->getNullIndicatorLength();
target += attr->getNullIndicatorLength();
}
switch (fsDatatype) {
#if defined( NA_LITTLE_ENDIAN )
case REC_BIN8_SIGNED:
//
// Flip the sign bit.
//
*(UInt8*)target = *(UInt8*)source;
target[0] ^= 0200;
break;
case REC_BIN8_UNSIGNED:
case REC_BOOLEAN:
*(UInt8*)target = *(UInt8*)source;
break;
case REC_BIN16_SIGNED:
//
// Flip the sign bit.
//
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
target[sizeof(short)-1] ^= 0200;
break;
case REC_BPINT_UNSIGNED:
case REC_BIN16_UNSIGNED:
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
break;
case REC_BIN32_SIGNED:
//
// Flip the sign bit.
//
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
target[sizeof(Lng32)-1] ^= 0200;
break;
case REC_BIN32_UNSIGNED:
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
break;
case REC_BIN64_SIGNED:
//
// Flip the sign bit.
//
*((_int64 *) target) = reversebytes( *((_int64 *) source) );
target[sizeof(_int64)-1] ^= 0200;
break;
case REC_BIN64_UNSIGNED:
*((UInt64 *) target) = reversebytes( *((UInt64 *) source) );
break;
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND:
switch(length)
{
case 2: // Signed 16 bit
*((unsigned short *) target) = reversebytes( *((unsigned short *) source) );
target[SQL_SMALL_SIZE-1] ^= 0200;
break;
case 4: // Signed 32 bit
*((ULng32 *) target) = reversebytes( *((ULng32 *) source) );
target[SQL_INT_SIZE-1] ^= 0200;
break;
case 8: // Signed 64 bit
*((_int64 *) target) = reversebytes( *((_int64 *) source) );
target[SQL_LARGE_SIZE-1] ^= 0200;
break;
default:
assert(FALSE);
break;
}; // switch(length)
break;
case REC_DATETIME: {
// This method has been modified as part of the MP Datetime
// Compatibility project. It has been made more generic so that
// it depends only on the start and end fields of the datetime type.
//
rec_datetime_field startField;
rec_datetime_field endField;
ExpDatetime *dtAttr = (ExpDatetime *)attr;
// Get the start and end fields for this Datetime type.
//
dtAttr->getDatetimeFields(dtAttr->getPrecision(),
startField,
endField);
// Copy all of the source to the destination, then reverse only
// those fields of the target that are longer than 1 byte
//
if (target != source)
str_cpy_all(target, source, length);
// Reverse the YEAR and Fractional precision fields if present.
//
char *ptr = target;
for(Int32 field = startField; field <= endField; field++) {
switch (field) {
case REC_DATE_YEAR:
// convert YYYY from little endian to big endian
//
*((unsigned short *) ptr) = reversebytes( *((unsigned short *) ptr) );
ptr += sizeof(short);
break;
case REC_DATE_MONTH:
case REC_DATE_DAY:
case REC_DATE_HOUR:
case REC_DATE_MINUTE:
// One byte fields are copied as is...
ptr++;
break;
case REC_DATE_SECOND:
ptr++;
// if there is a fraction, make it big endian
// (it is an unsigned long, beginning after the SECOND field)
//
if (dtAttr->getScale() > 0)
*((ULng32 *) ptr) = reversebytes( *((ULng32 *) ptr) );
break;
}
}
break;
}
#else
case REC_BIN8_SIGNED:
case REC_BIN16_SIGNED:
case REC_BIN32_SIGNED:
case REC_BIN64_SIGNED:
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND:
//
// Flip the sign bit.
//
if (target != source)
str_cpy_all(target, source, length);
target[0] ^= 0200;
break;
#endif
case REC_DECIMAL_LSE:
//
// If the number was negative, complement all the bytes. Otherwise, set
// the sign bit.
//
if (NOT(source[0] & 0200)) {
for (Lng32 i = 0; i < length; i++)
target[i] = ~source[i];
} else {
if (target != source)
str_cpy_all(target, source, length);
target[0] &= ~0200;
}
break;
case REC_NUM_BIG_SIGNED:
case REC_NUM_BIG_UNSIGNED: {
BigNum type(length, precision, 0, 0);
type.decode(source, target);
break;
}
case REC_IEEE_FLOAT32: {
//
// Encoded float (IEEE 754 - 1985 standard):
//
// +-+--------+-----------------------+
// | |Exponent| Mantissa |
// | |(8 bits)| (23 bits) |
// +-+--------+-----------------------+
// || |
// |+- Complemented if sign was neg.-+
// |
// +- Sign bit complement
//
// unencoded float (IEEE 754 - 1985 standard):
//
// +-+----------+---------------------+
// | | exponent | mantissa |
// | | (8 bits) | (23 bits) |
// +-+----------+---------------------+
// |
// +- Sign bit
//
// the following code is independent of the "endianess" of the
// archtiecture. Instead, it assumes IEEE 754 - 1985 standard
// for representation of floats
if (source[0] & 0200)
{
// sign bit is on. Indicates this was a positive number.
// Copy to target and clear the sign bit.
if (target != source)
str_cpy_all(target, source, length);
target[0] &= 0177;
}
else
{
// this was a negative number.
// flip all bits.
for (Lng32 i = 0; i < length; i++)
target[i] = ~source[i];
}
// here comes the dependent part
#ifdef NA_LITTLE_ENDIAN
*(ULng32 *) target = reversebytes(*(ULng32 *)target);
#endif
break;
}
case REC_IEEE_FLOAT64: {
//
// Encoded double (IEEE 754 - 1985 standard):
//
// +-+-----------+--------------------+
// | | Exponent | Mantissa |
// | | (11 bits) | (52 bits) |
// +-+-----------+--------------------+
// || |
// |+- Complemented if sign was neg.-+
// |
// +- Sign bit complement
//
// unencoded double (IEEE 754 - 1985 standard):
//
// +-+--------- -+--------------------+
// | | exponent | mantissa |
// | | (11 bits) | (52 bits) |
// +-+--------- -+--------------------+
// |
// +- Sign bit
//
// the following code is independent of the "endianess" of the
// archtiecture. Instead, it assumes IEEE 754 - 1985 standard
// for representation of floats
if (source[0] & 0200)
{
// sign bit is on. Indicates this was a positive number.
// Copy to target and clear the sign bit.
if (target != source)
str_cpy_all(target, source, length);
target[0] &= 0177;
}
else
{
// this was a negative number.
// flip all bits.
for (Lng32 i = 0; i < length; i++)
target[i] = ~source[i];
}
// here comes the dependent part
#ifdef NA_LITTLE_ENDIAN
*(Int64 *) target = reversebytes(*(Int64 *)target);
#endif
break;
}
case REC_BYTE_V_ASCII:
case REC_BYTE_V_ASCII_LONG: {
//
// Copy the source to the target.
//
short vc_len;
// See bug LP 1444134, make this compatible with encoding for
// varchars and remove the VC indicator
assert(attr->getVCIndicatorLength() == sizeof(vc_len));
str_cpy_all((char *) &vc_len, varlen_ptr, attr->getVCIndicatorLength());
if (target != source)
str_cpy_all(target, source, vc_len);
//
// Blankpad the target (if needed).
//
if (vc_len < length)
str_pad(&target[vc_len], (Int32) (length - vc_len), ' ');
//
// Make the length bytes to be the maximum length for this field. This
// will make all encoded varchar keys to have the same length and so the
// comparison will depend on the fixed part of the varchar buffer.
//
vc_len = (short) length;
if (target_varlen_ptr)
str_cpy_all(target_varlen_ptr, (char *) &vc_len, attr->getVCIndicatorLength());
break;
}
case REC_NCHAR_V_UNICODE:
{
//
// Copy the source to the target.
//
// See bug LP 1444134, make this compatible with encoding for
// varchars and remove the VC indicator
short vc_len;
assert(attr->getVCIndicatorLength() == sizeof(vc_len));
str_cpy_all((char *) &vc_len, varlen_ptr, attr->getVCIndicatorLength());
if (target != source)
str_cpy_all(target, source, vc_len);
//
// Blankpad the target (if needed).
//
if (vc_len < length)
wc_str_pad((NAWchar*)&target[attr->getVCIndicatorLength() + vc_len],
(Int32) (length - vc_len)/sizeof(NAWchar), unicode_char_set::space_char());
#if defined( NA_LITTLE_ENDIAN )
wc_swap_bytes((NAWchar*)&target[attr->getVCIndicatorLength()], length/sizeof(NAWchar));
#endif
//
// Make the length bytes to be the maximum length for this field. This
// will make all encoded varchar keys to have the same length and so the
// comparison will depend on the fixed part of the varchar buffer.
//
vc_len = (short) length;
if (target_varlen_ptr)
str_cpy_all(target_varlen_ptr, (char *) &vc_len, attr->getVCIndicatorLength());
break;
}
case REC_NCHAR_F_UNICODE:
{
if (target != source)
str_cpy_all(target, source, length);
#if defined( NA_LITTLE_ENDIAN )
wc_swap_bytes((NAWchar*)target, length/sizeof(NAWchar));
#endif
break;
}
default:
//
// Decoding is not needed. Just copy the source to the target.
//
if (target != source)
str_cpy_all(target, source, length);
break;
}
return 0;
}
static Lng32 convAsciiLength(Attributes * attr)
{
Lng32 d_len = 0;
Int32 scale_len = 0;
Lng32 datatype = attr->getDatatype();
Lng32 length = attr->getLength();
Lng32 precision = attr->getPrecision();
Lng32 scale = attr->getScale();
if (scale > 0)
scale_len = 1;
switch (datatype)
{
case REC_BPINT_UNSIGNED:
// Can set the display size based on precision. For now treat it as
// unsigned smallint
d_len = SQL_USMALL_DISPLAY_SIZE;
break;
case REC_BIN16_SIGNED:
d_len = SQL_SMALL_DISPLAY_SIZE + scale_len;
break;
case REC_BIN16_UNSIGNED:
d_len = SQL_USMALL_DISPLAY_SIZE + scale_len;
break;
case REC_BIN32_SIGNED:
d_len = SQL_INT_DISPLAY_SIZE + scale_len;
break;
case REC_BIN32_UNSIGNED:
d_len = SQL_UINT_DISPLAY_SIZE + scale_len;
break;
case REC_BIN64_SIGNED:
d_len = SQL_LARGE_DISPLAY_SIZE + scale_len;
break;
case REC_BIN64_UNSIGNED:
d_len = SQL_ULARGE_DISPLAY_SIZE + scale_len;
break;
case REC_NUM_BIG_UNSIGNED:
case REC_NUM_BIG_SIGNED:
d_len = precision + 1 + scale_len; // Precision + sign + decimal point
break;
case REC_BYTE_F_ASCII:
d_len = length;
break;
case REC_NCHAR_F_UNICODE:
case REC_NCHAR_V_UNICODE:
case REC_BYTE_V_ASCII:
case REC_BYTE_V_ASCII_LONG:
d_len = length;
break;
case REC_DECIMAL_UNSIGNED:
d_len = length + scale_len;
break;
case REC_DECIMAL_LSE:
d_len = length + 1 + scale_len;
break;
case REC_FLOAT32:
d_len = SQL_REAL_DISPLAY_SIZE;
break;
case REC_FLOAT64:
d_len = SQL_DOUBLE_PRECISION_DISPLAY_SIZE;
break;
case REC_DATETIME:
switch (precision) {
// add different literals for sqldtcode_date...etc. These literals
// are from sqlcli.h and cannot be included here in this file.
case 1 /*SQLDTCODE_DATE*/:
{
d_len = DATE_DISPLAY_SIZE;
}
break;
case 2 /*SQLDTCODE_TIME*/:
{
d_len = TIME_DISPLAY_SIZE +
(scale > 0 ? (1 + scale) : 0);
}
break;
case 3 /*SQLDTCODE_TIMESTAMP*/:
{
d_len = TIMESTAMP_DISPLAY_SIZE +
(scale > 0 ? (1 + scale) : 0);
}
break;
default:
d_len = length;
break;
}
break;
case REC_INT_YEAR:
case REC_INT_MONTH:
case REC_INT_YEAR_MONTH:
case REC_INT_DAY:
case REC_INT_HOUR:
case REC_INT_DAY_HOUR:
case REC_INT_MINUTE:
case REC_INT_HOUR_MINUTE:
case REC_INT_DAY_MINUTE:
case REC_INT_SECOND:
case REC_INT_MINUTE_SECOND:
case REC_INT_HOUR_SECOND:
case REC_INT_DAY_SECOND: {
rec_datetime_field startField;
rec_datetime_field endField;
ExpInterval::getIntervalStartField(datatype, startField);
ExpInterval::getIntervalEndField(datatype, endField);
// this code is copied from IntervalType::getStringSize in
// w:/common/IntervalType.cpp
d_len = 1 + 1 +
precision +
3/*IntervalFieldStringSize*/ * (endField - startField);
if (scale)
d_len += scale + 1; // 1 for "."
}
break;
default:
d_len = length;
break;
}
return d_len;
}
//helper function, convert a string into IPV4 , if valid, it can support leading and padding space
static Lng32 string2ipv4(char *srcData, Lng32 slen, unsigned int *inet_addr)
{
Int16 i = 0, j = 0 , p=0, leadingspace=0;
char buf[16];
Int16 dot=0;
if(slen < MIN_IPV4_STRING_LEN )
return 0;
unsigned char *ipv4_bytes= (unsigned char *)inet_addr;
if(srcData[0] == ' ')
{
char * next = srcData;
while (*next == ' ')
{
leadingspace++;
next++;
}
}
for(i=leadingspace , j = 0; i < slen ; i++)
{
if(srcData[i] == '.')
{
buf[j]=0;
p = str_atoi(buf, j);
if( p < 0 || p > 255 || j == 0)
{
return 0;
}
else
{
if(ipv4_bytes)
ipv4_bytes[dot] = (unsigned char)p;
}
j = 0;
dot++;
if(dot > 3) return 0;
}
else if(srcData[i] == ' ')
{
break; //space is terminator
}
else
{
if(isdigit(srcData[i]) == 0)
{
return 0;
}
else
buf[j] = srcData[i];
j++;
}
}
Int16 stoppos=i;
// the last part
buf[j]=0; //null terminator
for(i = 0; i < j; i ++) //check for invalid character
{
if(isdigit(buf[i]) == 0)
{
return 0;
}
}
p = str_atoi(buf, j);
if( p < 0 || p > 255 || j == 0) // check for invalid number
{
return 0;
}
else
{
if(ipv4_bytes)
ipv4_bytes[dot] = (unsigned char)p;
}
//if terminated by space
if( stoppos < slen -1)
{
for(j = stoppos ; j < slen; j++)
{
if(srcData[j] != ' ') return 0;
}
}
if(dot != 3)
return 0;
else
return 1;
}
ex_expr::exp_return_type ExFunctionInetAton::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
char * srcData = op_data[1];
char * resultData = op_data[0];
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
Lng32 slen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 rlen = resultAttr->getLength();
unsigned int addr;
int ret=string2ipv4(srcData, slen, &addr);
if(ret)
{
*(unsigned int *)op_data[0]=addr;
return ex_expr::EXPR_OK;
}
else
{
ExRaiseSqlError(heap, diags, EXE_INVALID_CHARACTER);
*(*diags) << DgString0("IP format") << DgString1("INET_ATON FUNCTION");
return ex_expr::EXPR_ERROR;
}
}
ex_expr::exp_return_type ExFunctionInetNtoa::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
char buf[16]; //big enough
unsigned long addr = *(unsigned long*)op_data[1];
char * resultData = op_data[0];
Attributes *resultAttr = getOperand(0);
const unsigned char *ipv4_bytes= (const unsigned char *) &addr;
if( addr > 4294967295 )
{
ExRaiseSqlError(heap, diags, EXE_BAD_ARG_TO_MATH_FUNC);
*(*diags) << DgString0("INET_NTOA");
return ex_expr::EXPR_ERROR;
}
str_sprintf(buf, "%d.%d.%d.%d",
ipv4_bytes[0], ipv4_bytes[1], ipv4_bytes[2], ipv4_bytes[3]);
int slen = str_len(buf);
str_cpy_all(resultData, buf, slen);
getOperand(0)->setVarLength(slen, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionCrc32::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
Lng32 slen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 rlen = resultAttr->getLength();
*(ULng32*)op_data[0] = 0;
ULng32 crc = crc32(0L, Z_NULL, 0);
crc = crc32 (crc, (const Bytef*)op_data[1], slen);
*(ULng32*)op_data[0] = crc;
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionSha2::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
unsigned char sha[SHA512_DIGEST_LENGTH + 1] = {0};
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
Lng32 slen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
// the length of result
Lng32 rlen = SHA512_DIGEST_LENGTH;
switch (mode) {
case 0:
case 256:
SHA256_CTX sha_ctx_256;
if (!SHA256_Init(&sha_ctx_256))
goto sha2_error;
if (!SHA256_Update(&sha_ctx_256, op_data[1], slen))
goto sha2_error;
if (!SHA256_Final((unsigned char *)sha, &sha_ctx_256))
goto sha2_error;
rlen = SHA256_DIGEST_LENGTH;
break;
case 224:
SHA256_CTX sha_ctx_224;
if (!SHA224_Init(&sha_ctx_224))
goto sha2_error;
if (!SHA224_Update(&sha_ctx_224, op_data[1], slen))
goto sha2_error;
if (!SHA224_Final((unsigned char *)sha, &sha_ctx_224))
goto sha2_error;
rlen = SHA224_DIGEST_LENGTH;
break;
case 384:
SHA512_CTX sha_ctx_384;
if (!SHA384_Init(&sha_ctx_384))
goto sha2_error;
if (!SHA384_Update(&sha_ctx_384, op_data[1], slen))
goto sha2_error;
if (!SHA384_Final((unsigned char *)sha, &sha_ctx_384))
goto sha2_error;
rlen = SHA384_DIGEST_LENGTH;
break;
case 512:
SHA512_CTX sha_ctx_512;
if (!SHA512_Init(&sha_ctx_512))
goto sha2_error;
if (!SHA512_Update(&sha_ctx_512, op_data[1], slen))
goto sha2_error;
if (!SHA512_Final((unsigned char *)sha, &sha_ctx_512))
goto sha2_error;
rlen = SHA512_DIGEST_LENGTH;
break;
default:
ExRaiseSqlError(heap, diags, EXE_BAD_ARG_TO_MATH_FUNC);
*(*diags) << DgString0("SHA2");
return ex_expr::EXPR_ERROR;
}
str_pad(op_data[0], rlen, ' ');
char tmp[3];
for(int i=0; i < rlen; i++ )
{
tmp[0]=tmp[1]=tmp[2]='0';
sprintf(tmp, "%.2x", (int)sha[i]);
str_cpy_all(op_data[0]+i*2, tmp, 2);
}
return ex_expr::EXPR_OK;
sha2_error:
ExRaiseFunctionSqlError(heap, diags, EXE_INTERNAL_ERROR,
derivedFunction(),
origFunctionOperType());
return ex_expr::EXPR_ERROR;
}
ex_expr::exp_return_type ExFunctionSha::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
unsigned char sha[SHA_DIGEST_LENGTH + 1]={0};
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
Lng32 slen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 rlen = resultAttr->getLength();
str_pad(op_data[0], rlen , ' ');
SHA_CTX sha_ctx;
SHA1_Init(&sha_ctx);
SHA1_Update(&sha_ctx, op_data[1], slen);
SHA1_Final((unsigned char*) sha,&sha_ctx);
char tmp[3];
for(int i=0; i < SHA_DIGEST_LENGTH ; i++ )
{
tmp[0]=tmp[1]=tmp[2]='0';
sprintf(tmp, "%.2x", (int)sha[i]);
str_cpy_all(op_data[0]+i*2, tmp, 2);
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionMd5::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
unsigned char md5[17]={0};
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
Lng32 slen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 rlen = resultAttr->getLength();
str_pad(op_data[0], rlen, ' ');
MD5_CTX md5_ctx;
MD5_Init(&md5_ctx);
MD5_Update(&md5_ctx, op_data[1], slen);
MD5_Final((unsigned char*) md5,&md5_ctx);
char tmp[3];
for(int i=0; i < 16; i++ )
{
tmp[0]=tmp[1]=tmp[2]='0';
sprintf(tmp, "%.2x", (int)md5[i]);
str_cpy_all(op_data[0]+i*2, tmp, 2);
}
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionIsIP::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diags)
{
char * resultData = op_data[0];
char * srcData = op_data[1];
Int16 i = 0, j = 0 , p=0;
Attributes *resultAttr = getOperand(0);
Attributes *srcAttr = getOperand(1);
Lng32 slen = srcAttr->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 rlen = resultAttr->getLength();
if(getOperType() == ITM_ISIPV4)
{
if(string2ipv4(srcData, slen, NULL) == 0)
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
else
{
*(Int16 *)op_data[0] = 1;
return ex_expr::EXPR_OK;
}
}
else
{
Int16 gapcounter = 0 , portidx = 0;;
char portion[IPV6_PARTS_NUM][MAX_IPV6_STRING_LEN + 1];
char trimdata[MAX_IPV6_STRING_LEN + 1];
str_pad(trimdata,MAX_IPV6_STRING_LEN + 1, 0);
if(slen < MIN_IPV6_STRING_LEN )
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
char *ptr= srcData;
//cannot start with single :
if (*ptr == ':')
{
if (*(ptr+1) != ':')
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
}
else if (*ptr == ' ')
{
while(*ptr==' ') ptr++;
}
char * start=ptr;
if(slen - (srcData - ptr) > MAX_IPV6_STRING_LEN ) // must be padding space
{
if( start[MAX_IPV6_STRING_LEN] != ' ')
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
else {
for(j = MAX_IPV6_STRING_LEN; j >=0; j--)
{
if(ptr[j] != ' ') //stop, j is the last non-space char
break;
}
str_cpy_all(trimdata,start, j);
start = trimdata;
}
}
char ipv4[MAX_IPV6_STRING_LEN + 1];
j = 0;
int ipv4idx = 0;
// try to split the string into portions delieted by ':'
// also check '::', call it gap, there is only up to 1 gap
// if there is a gap, portion number can be smaller than 8
// without gap, portion number should be 8
// each portion must be 16 bit integer in HEX format
// leading 0 can be omit
for(i = 0; i< slen; i++)
{
if(start[i] == ':')
{
portion[portidx][j] = 0; //set the terminator
if(start[i+1] == ':')
{
if(j != 0) //some characters are already saved into current portion
portidx++;
gapcounter++;
j = 0; //reset temp buffer pointer
i++;
continue;
}
else
{
//new portion start
if( j == 0 )
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
portidx++;
j=0;
continue;
}
}
else if( start[i] == '.') //ipv4 mixed format
{
if( ipv4idx > 0 )
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
str_cpy_all(ipv4, portion[portidx],str_len(portion[portidx]));
if(strlen(start+i) < MAX_IPV4_STRING_LEN) //15 is the maximum IPV4 string length
str_cat((const char*)ipv4, start+i, ipv4);
else
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
if(string2ipv4(ipv4, strlen(ipv4), NULL) == 0)
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
else
{
ipv4idx = 2; //ipv4 use 2 portions, 32 bits
break; // ipv4 string must be the last portion
}
}
portion[portidx][j] = start[i];
j++;
}
if(gapcounter > 1 || portidx > IPV6_PARTS_NUM - 1)
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
else if(gapcounter ==0 && portidx+ipv4idx < IPV6_PARTS_NUM - 1)
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
//check each IPV6 portion
for(i =0; i < portidx ; i++)
{
int len = strlen(portion[i]);
if( len > 4) //IPV4 portion can be longer than 4 chars
{
if(ipv4idx == 0 || ((ipv4idx == 2) && ( i != portidx -1) ) ) // no IPV4 portion, or this is not the IPV4 portion
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
}
for(j = 0; j < len; j++)
{
if( (portion[i][j] >= 'A' && portion[i][j] <= 'F') ||
(portion[i][j] >= 'a' && portion[i][j] <= 'f') ||
(portion[i][j] >= '0' && portion[i][j] <= '9')
) //good
continue;
else
{
*(Int16 *)op_data[0] = 0;
return ex_expr::EXPR_OK;
}
}
}
//everything is good, this is IPV6
*(Int16 *)op_data[0] = 1;
return ex_expr::EXPR_OK;
}
}
// Parse json errors
static void ExRaiseJSONError(CollHeap* heap, ComDiagsArea** diagsArea, JsonReturnType type)
{
switch(type)
{
case JSON_INVALID_TOKEN:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_TOKEN);
break;
case JSON_INVALID_VALUE:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_VALUE);
break;
case JSON_INVALID_STRING:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_STRING);
break;
case JSON_INVALID_ARRAY_START:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_ARRAY_START);
break;
case JSON_INVALID_ARRAY_NEXT:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_ARRAY_NEXT);
break;
case JSON_INVALID_OBJECT_START:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_OBJECT_START);
break;
case JSON_INVALID_OBJECT_LABEL:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_OBJECT_LABEL);
break;
case JSON_INVALID_OBJECT_NEXT:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_OBJECT_NEXT);
break;
case JSON_INVALID_OBJECT_COMMA:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_OBJECT_COMMA);
break;
case JSON_INVALID_END:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_INVALID_END);
break;
case JSON_END_PREMATURELY:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_END_PREMATURELY);
break;
default:
ExRaiseSqlError(heap, diagsArea, EXE_JSON_UNEXPECTED_ERROR);
break;
}
}
/*
* SOUNDEX(str) returns a character string containing the phonetic
* representation of the input string. It lets you compare words that
* are spelled differently, but sound alike in English.
* The phonetic representation is defined in "The Art of Computer Programming",
* Volume 3: Sorting and Searching, by Donald E. Knuth, as follows:
*
* 1. Retain the first letter of the string and remove all other occurrences
* of the following letters: a, e, h, i, o, u, w, y.
*
* 2. Assign numbers to the remaining letters (after the first) as follows:
* b, f, p, v = 1
* c, g, j, k, q, s, x, z = 2
* d, t = 3
* l = 4
* m, n = 5
* r = 6
*
* 3. If two or more letters with the same number were adjacent in the original
* name (before step 1), or adjacent except for any intervening h and w, then
* omit all but the first.
*
* 4. Return the first four bytes padded with 0.
* */
ex_expr::exp_return_type ExFunctionSoundex::eval(char *op_data[],
CollHeap *heap,
ComDiagsArea** diagsArea)
{
ULng32 previous = 0;
ULng32 current = 0;
char *srcStr = op_data[1];
char *tgtStr = op_data[0];
Lng32 srcLen = getOperand(1)->getLength(op_data[-MAX_OPERANDS+1]);
Lng32 tgtLen = getOperand(0)->getLength();
CharInfo::CharSet cs = ((SimpleType *)getOperand(1))->getCharSet();
str_pad(tgtStr, tgtLen, '\0');
tgtStr[0] = toupper(srcStr[0]); // Retain the first letter, convert to capital anyway
Int16 setLen = 1; // The first character is set already
for(int i=1; i < srcLen; ++i)
{
char chr = toupper(srcStr[i]);
switch(chr)
{
case 'A':
case 'E':
case 'H':
case 'I':
case 'O':
case 'U':
case 'W':
case 'Y':
current = 0;
break;
case 'B':
case 'F':
case 'P':
case 'V':
current = 1;
break;
case 'C':
case 'G':
case 'J':
case 'K':
case 'Q':
case 'S':
case 'X':
case 'Z':
current = 2;
break;
case 'D':
case 'T':
current = 3;
break;
case 'L':
current = 4;
break;
case 'M':
case 'N':
current = 5;
break;
case 'R':
current = 6;
break;
default:
break;
}
if(current) // Only non-zero valued letter shall ve retained, 0 will be discarded
{
if(previous != current)
{
str_itoa(current, &tgtStr[setLen]);
setLen++; // A new character is set in target
}
}
previous = current;
if(setLen == tgtLen) // Don't overhit the target string
break;
} // end of for loop
if(setLen < tgtLen)
str_pad(tgtStr+setLen, (tgtLen - setLen), '0');
return ex_expr::EXPR_OK;
}
ex_expr::exp_return_type ExFunctionAESEncrypt::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
CharInfo::CharSet cs = ((SimpleType *)getOperand(0))->getCharSet();
Attributes *tgt = getOperand(0);
Lng32 source_len = getOperand(1)->getLength(op_data[-MAX_OPERANDS + 1]);
char * source = op_data[1];
Lng32 key_len = getOperand(2)->getLength(op_data[-MAX_OPERANDS + 2]);
unsigned char * key = (unsigned char *)op_data[2];
unsigned char * result = (unsigned char *)op_data[0];
unsigned char rkey[EVP_MAX_KEY_LENGTH];
int u_len, f_len;
EVP_CIPHER_CTX ctx;
const EVP_CIPHER * cipher = aes_algorithm_type[aes_mode];
int iv_len_need = EVP_CIPHER_iv_length(cipher);
unsigned char * iv = NULL;
if (iv_len_need) {
if (args_num == 3) {
Lng32 iv_len_input = getOperand(3)->getLength(op_data[-MAX_OPERANDS + 3]);
if (iv_len_input == 0 || iv_len_input < iv_len_need) {
// the length of iv is too short
ExRaiseSqlError(heap, diagsArea, EXE_AES_INVALID_IV);
*(*diagsArea) << DgInt0(iv_len_input) << DgInt1(iv_len_need);
return ex_expr::EXPR_ERROR;
}
iv = (unsigned char *)op_data[3];
}
else {
// it does not have iv argument, but the algorithm need iv
ExRaiseSqlError(heap, diagsArea,EXE_ERR_PARAMCOUNT_FOR_FUNC);
*(*diagsArea) << DgString0("AES_ENCRYPT");
return ex_expr::EXPR_ERROR;
}
}
else {
if (args_num == 3) {
// the algorithm doesn't need iv, give a warning
ExRaiseSqlWarning(heap, diagsArea, EXE_OPTION_IGNORED);
*(*diagsArea) << DgString0("IV");
}
}
aes_create_key(key, key_len, rkey, aes_mode);
if (!EVP_EncryptInit(&ctx, cipher, (const unsigned char*)rkey, iv))
goto aes_encrypt_error;
if (!EVP_CIPHER_CTX_set_padding(&ctx, true))
goto aes_encrypt_error;
if (!EVP_EncryptUpdate(&ctx, result, &u_len, (const unsigned char *)source, source_len))
goto aes_encrypt_error;
if (!EVP_EncryptFinal(&ctx, result + u_len, &f_len))
goto aes_encrypt_error;
EVP_CIPHER_CTX_cleanup(&ctx);
tgt->setVarLength(u_len + f_len, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
aes_encrypt_error:
ERR_clear_error();
EVP_CIPHER_CTX_cleanup(&ctx);
ExRaiseSqlError(heap, diagsArea, EXE_OPENSSL_ERROR);
*(*diagsArea) << DgString0("AES_ENCRYPT FUNCTION");
return ex_expr::EXPR_ERROR;
}
ex_expr::exp_return_type ExFunctionAESDecrypt::eval(char * op_data[],
CollHeap *heap,
ComDiagsArea **diagsArea)
{
Attributes * tgt = getOperand(0);
Lng32 source_len = getOperand(1)->getLength(op_data[-MAX_OPERANDS + 1]);
const unsigned char * source = (unsigned char *)op_data[1];
Lng32 key_len = getOperand(2)->getLength(op_data[-MAX_OPERANDS + 2]);
const unsigned char * key = (unsigned char *)op_data[2];
Lng32 maxLength = getOperand(0)->getLength();
unsigned char * result = (unsigned char *) op_data[0];
unsigned char rkey[EVP_MAX_KEY_LENGTH] = {0};
int u_len, f_len;
EVP_CIPHER_CTX ctx;
const EVP_CIPHER * cipher = aes_algorithm_type[aes_mode];
int iv_len_need = EVP_CIPHER_iv_length(cipher);
unsigned char * iv = NULL;
if (iv_len_need) {
if (args_num == 3) {
Lng32 iv_len_input = getOperand(3)->getLength(op_data[-MAX_OPERANDS + 3]);
if (iv_len_input == 0 || iv_len_input < iv_len_need) {
// the length of iv is too short
ExRaiseSqlError(heap, diagsArea, EXE_AES_INVALID_IV);
*(*diagsArea) << DgInt0(iv_len_input) << DgInt1(iv_len_need);
return ex_expr::EXPR_ERROR;
}
iv = (unsigned char *)op_data[3];
}
else {
// it does not have iv argument, but the algorithm need iv
ExRaiseSqlError(heap, diagsArea, EXE_ERR_PARAMCOUNT_FOR_FUNC);
*(*diagsArea) << DgString0("AES_DECRYPT");
return ex_expr::EXPR_ERROR;
}
}
else {
if (args_num == 3) {
// the algorithm doesn't need iv, give a warning
ExRaiseSqlWarning(heap, diagsArea, EXE_OPTION_IGNORED);
*(*diagsArea) << DgString0("IV");
}
}
aes_create_key(key, key_len, rkey, aes_mode);
if (!EVP_DecryptInit(&ctx, cipher, rkey, iv))
goto aes_decrypt_error;
if (!EVP_CIPHER_CTX_set_padding(&ctx, true))
goto aes_decrypt_error;
if (!EVP_DecryptUpdate(&ctx, result, &u_len, source, source_len))
goto aes_decrypt_error;
if (!EVP_DecryptFinal_ex(&ctx, result + u_len, &f_len))
goto aes_decrypt_error;
EVP_CIPHER_CTX_cleanup(&ctx);
tgt->setVarLength(u_len + f_len, op_data[-MAX_OPERANDS]);
return ex_expr::EXPR_OK;
aes_decrypt_error:
ERR_clear_error();
EVP_CIPHER_CTX_cleanup(&ctx);
ExRaiseSqlError(heap, diagsArea, EXE_OPENSSL_ERROR);
*(*diagsArea) << DgString0("AES_DECRYPT FUNCTION");
return ex_expr::EXPR_ERROR;
}
| 1 | 19,938 | I'm wondering if we need to delete jsonStr and jsonAttr after the json_extract_path_text call to avoid unnecessary heap pressure. Though if json_extract_path_text itself does new's on the same heap, we'd get heap fragmentation. Another approach would be to allocate these on the stack instead, avoiding both concerns: char jsonStr[len1+1]; char jsonAttr[len2+1]; ... | apache-trafodion | cpp |
@@ -67,14 +67,14 @@ def lambda_handler(request):
user_size = request.args.get('size', DEFAULT_SIZE)
user_source = request.args.get('_source', [])
# 0-indexed starting position (for pagination)
- user_from = request.args.get('from', 0)
+ user_from = int(request.args.get('from', 0))
terminate_after = None # see if we can skip os.getenv('MAX_DOCUMENTS_PER_SHARD')
if not user_indexes or not isinstance(user_indexes, str):
raise ValueError("Request must include index=<comma-separated string of indices>")
- if not isinstance(user_from, int) or user_from < 0:
- raise ValueError("'from' must be a positive integer")
+ if user_from < 0:
+ raise ValueError("'from' must be a non-negative integer")
if action == 'packages':
query = request.args.get('query', '') | 1 | """
Sends the request to ElasticSearch.
TODO: Implement a higher-level search API.
"""
import os
from copy import deepcopy
from itertools import filterfalse, tee
from aws_requests_auth.boto_utils import BotoAWSRequestsAuth
from elasticsearch import Elasticsearch, RequestsHttpConnection
from t4_lambda_shared.decorator import api
from t4_lambda_shared.utils import get_default_origins, make_json_response
DEFAULT_SIZE = 1_000
MAX_QUERY_DURATION = 27 # Just shy of 29s API Gateway limit
NUM_PREVIEW_IMAGES = 100
NUM_PREVIEW_FILES = 20
COMPRESSION_EXTS = ['.gz']
IMG_EXTS = [
'*.jpg',
'*.jpeg',
'*.png',
'*.gif',
'*.webp',
'*.bmp',
'*.tiff',
'*.tif',
]
SAMPLE_EXTS = [
'*.parquet',
'*.parquet.gz',
'*.csv',
'*.csv.gz',
'*.tsv',
'*.tsv.gz',
'*.txt',
'*.txt.gz',
'*.vcf',
'*.vcf.gz',
'*.xls',
'*.xls.gz',
'*.xlsx',
'*.xlsx.gz',
'*.ipynb',
'*.md',
'*.pdf',
'*.pdf.gz',
'*.json',
'*.json.gz',
]
README_KEYS = ['README.md', 'README.txt', 'README.ipynb']
SUMMARIZE_KEY = 'quilt_summarize.json'
@api(cors_origins=get_default_origins())
def lambda_handler(request):
"""
Proxy the request to the elastic search.
"""
action = request.args.get('action')
user_body = request.args.get('body', {})
user_fields = request.args.get('fields', [])
user_indexes = request.args.get('index', "")
user_size = request.args.get('size', DEFAULT_SIZE)
user_source = request.args.get('_source', [])
# 0-indexed starting position (for pagination)
user_from = request.args.get('from', 0)
terminate_after = None # see if we can skip os.getenv('MAX_DOCUMENTS_PER_SHARD')
if not user_indexes or not isinstance(user_indexes, str):
raise ValueError("Request must include index=<comma-separated string of indices>")
if not isinstance(user_from, int) or user_from < 0:
raise ValueError("'from' must be a positive integer")
if action == 'packages':
query = request.args.get('query', '')
body = user_body or {
"query": {
"query_string": {
"analyze_wildcard": True,
"lenient": True,
"query": query,
# see enterprise/**/bucket.py for mappings
"fields": user_fields or [
# package
'comment', 'handle', 'handle_text^2', 'metadata', 'tags'
]
}
}
}
if not all(i.endswith('_packages') for i in user_indexes.split(',')):
raise ValueError("'packages' action searching indexes that don't end in '_packages'")
_source = user_source
size = user_size
elif action == 'search':
query = request.args.get('query', '')
body = {
"query": {
"query_string": {
"analyze_wildcard": True,
"lenient": True,
"query": query,
# see enterprise/**/bucket.py for mappings
"fields": user_fields or [
# object
'content', 'comment^2', 'ext^2', 'key_text^2', 'meta_text',
# package, and boost the fields
'handle^2', 'handle_text^2', 'metadata^2', 'tags^2'
]
}
}
}
_source = user_source or [
'key', 'version_id', 'updated', 'last_modified', 'size', 'user_meta',
'comment', 'handle', 'hash', 'tags', 'metadata', 'pointer_file'
]
size = DEFAULT_SIZE
elif action == 'stats':
body = {
"query": {"match_all": {}},
"aggs": {
"totalBytes": {"sum": {"field": 'size'}},
"exts": {
"terms": {"field": 'ext'},
"aggs": {"size": {"sum": {"field": 'size'}}},
},
"totalPackageHandles": {"value_count": {"field": "handle"}},
}
}
size = 0 # We still get all aggregates, just don't need the results
_source = False
# Consider all documents when computing counts, etc.
terminate_after = None
elif action == 'images':
body = {
'query': {'terms': {'ext': IMG_EXTS}},
'collapse': {
'field': 'key',
'inner_hits': {
'name': 'latest',
'size': 1,
'sort': [{'last_modified': 'desc'}],
'_source': ['key', 'version_id'],
},
},
}
size = NUM_PREVIEW_IMAGES
_source = False
elif action == 'sample':
body = {
'query': {
'bool': {
'must': [{'terms': {'ext': SAMPLE_EXTS}}],
'must_not': [
{'terms': {'key': README_KEYS + [SUMMARIZE_KEY]}},
{'wildcard': {'key': '*/' + SUMMARIZE_KEY}},
],
},
},
'collapse': {
'field': 'key',
'inner_hits': {
'name': 'latest',
'size': 1,
'sort': [{'last_modified': 'desc'}],
'_source': ['key', 'version_id'],
},
},
}
size = NUM_PREVIEW_FILES
_source = False
else:
return make_json_response(400, {"title": "Invalid action"})
es_host = os.environ['ES_HOST']
region = os.environ['AWS_REGION']
index_overrides = os.getenv('INDEX_OVERRIDES', '')
auth = BotoAWSRequestsAuth(
aws_host=es_host,
aws_region=region,
aws_service='es'
)
es_client = Elasticsearch(
hosts=[{'host': es_host, 'port': 443}],
http_auth=auth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection,
timeout=MAX_QUERY_DURATION,
)
to_search = f"{user_indexes},{index_overrides}" if index_overrides else user_indexes
result = es_client.search(
index=to_search,
body=body,
_source=_source,
size=size,
from_=user_from,
# try turning this off to consider all documents
terminate_after=terminate_after,
)
return make_json_response(200, post_process(result, action))
def post_process(result: dict, action: str) -> dict:
"""post process result from elastic conditional on action
"""
if action == "stats":
# don't modify the original to avoid side-effects
result = deepcopy(result)
counts = result["aggregations"]["exts"]["buckets"]
non_gz, gz = partition(
lambda c: any(c.get("key", "").lower().endswith(ext) for ext in COMPRESSION_EXTS),
counts
)
ext_counts = {}
# ES reports double extensions e.g. file.foo.ext, get down to just .ext
# for any .ext that is not .gz
# populate ext_counts
for record in non_gz:
_, ext = os.path.splitext(f"fakename{record['key']}")
if ext not in ext_counts:
ext_counts[ext] = {'doc_count': 0, 'size': 0}
ext_counts[ext]['doc_count'] += record.get('doc_count', 0)
ext_counts[ext]['size'] += record.get('size', {}).get('value', 0)
corrected = [
{
'key': ext,
'doc_count': val['doc_count'],
'size': {'value': val['size']}
}
for ext, val in ext_counts.items()
]
# rewrite aggregation buckets so gz aggregates use two-level extensions
# and all other extensions are single-level
corrected.extend(gz)
result["aggregations"]["exts"]["buckets"] = corrected
return result
def partition(pred, iterable):
"""Use a predicate to partition entries into false entries and true entries
partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
from https://docs.python.org/dev/library/itertools.html#itertools-recipes
"""
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
| 1 | 19,421 | Does it come as `str`? | quiltdata-quilt | py |
@@ -290,7 +290,7 @@ public class IngredientsProductFragment extends BaseFragment implements IIngredi
substanceProduct.append(" ");
String allergen;
- for (int i = 0; i < allergens.size() - 1; i++) {
+ for (int i = 0; i <= allergens.size() - 1; i++) {
allergen = allergens.get(i);
substanceProduct.append(Utils.getClickableText(allergen, allergen, SearchType.ALLERGEN, getActivity(), customTabsIntent));
substanceProduct.append(", "); | 1 | package openfoodfacts.github.scrachx.openfood.views.product.ingredients;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.graphics.Bitmap;
import android.graphics.Typeface;
import android.net.Uri;
import android.os.Build;
import android.os.Bundle;
import android.provider.Settings;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.support.customtabs.CustomTabsIntent;
import android.support.v4.app.ActivityCompat;
import android.support.v4.app.ActivityOptionsCompat;
import android.support.v4.content.ContextCompat;
import android.support.v7.preference.PreferenceManager;
import android.support.v7.widget.CardView;
import android.text.SpannableStringBuilder;
import android.text.method.LinkMovementMethod;
import android.text.style.ClickableSpan;
import android.text.style.StyleSpan;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.ImageView;
import android.widget.TextView;
import com.afollestad.materialdialogs.MaterialDialog;
import com.squareup.picasso.Picasso;
import com.theartofdev.edmodo.cropper.CropImage;
import org.json.JSONObject;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import butterknife.BindView;
import butterknife.OnClick;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.fragments.BaseFragment;
import openfoodfacts.github.scrachx.openfood.models.AdditiveDao;
import openfoodfacts.github.scrachx.openfood.models.AdditiveName;
import openfoodfacts.github.scrachx.openfood.models.Product;
import openfoodfacts.github.scrachx.openfood.models.ProductImage;
import openfoodfacts.github.scrachx.openfood.models.SendProduct;
import openfoodfacts.github.scrachx.openfood.models.State;
import openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIClient;
import openfoodfacts.github.scrachx.openfood.network.WikidataApiClient;
import openfoodfacts.github.scrachx.openfood.repositories.IProductRepository;
import openfoodfacts.github.scrachx.openfood.repositories.ProductRepository;
import openfoodfacts.github.scrachx.openfood.utils.SearchType;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
import openfoodfacts.github.scrachx.openfood.views.FullScreenImage;
import openfoodfacts.github.scrachx.openfood.views.ProductBrowsingListActivity;
import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabActivityHelper;
import openfoodfacts.github.scrachx.openfood.views.customtabs.CustomTabsHelper;
import openfoodfacts.github.scrachx.openfood.views.product.ProductActivity;
import pl.aprilapps.easyphotopicker.DefaultCallback;
import pl.aprilapps.easyphotopicker.EasyImage;
import static android.Manifest.permission.CAMERA;
import static android.app.Activity.RESULT_OK;
import static android.content.pm.PackageManager.PERMISSION_GRANTED;
import static android.text.Spanned.SPAN_EXCLUSIVE_EXCLUSIVE;
import static openfoodfacts.github.scrachx.openfood.models.ProductImageField.INGREDIENTS;
import static openfoodfacts.github.scrachx.openfood.utils.ProductInfoState.EMPTY;
import static openfoodfacts.github.scrachx.openfood.utils.ProductInfoState.LOADING;
import static openfoodfacts.github.scrachx.openfood.utils.Utils.MY_PERMISSIONS_REQUEST_CAMERA;
import static openfoodfacts.github.scrachx.openfood.utils.Utils.bold;
import static org.apache.commons.lang3.StringUtils.isNotBlank;
import static org.jsoup.helper.StringUtil.isBlank;
public class IngredientsProductFragment extends BaseFragment implements IIngredientsProductPresenter.View {
public static final Pattern INGREDIENT_PATTERN = Pattern.compile("[\\p{L}\\p{Nd}(),.-]+");
public static final Pattern ALLERGEN_PATTERN = Pattern.compile("[\\p{L}\\p{Nd}]+");
@BindView(R.id.textIngredientProduct)
TextView ingredientsProduct;
@BindView(R.id.textSubstanceProduct)
TextView substanceProduct;
@BindView(R.id.textTraceProduct)
TextView traceProduct;
@BindView(R.id.textAdditiveProduct)
TextView additiveProduct;
@BindView(R.id.textPalmOilProduct)
TextView palmOilProduct;
@BindView(R.id.textMayBeFromPalmOilProduct)
TextView mayBeFromPalmOilProduct;
@BindView(R.id.imageViewIngredients)
ImageView mImageIngredients;
@BindView(R.id.addPhotoLabel)
TextView addPhotoLabel;
@BindView(R.id.vitaminsTagsText)
TextView vitaminTagsTextView;
@BindView(R.id.mineralTagsText)
TextView mineralTagsTextView;
@BindView(R.id.aminoAcidTagsText)
TextView aminoAcidTagsTextView;
@BindView(R.id.otherNutritionTags)
TextView otherNutritionTagTextView;
@BindView(R.id.cvTextIngredientProduct)
CardView textIngredientProductCardView;
@BindView(R.id.cvTextSubstanceProduct)
CardView textSubstanceProductCardView;
@BindView(R.id.cvTextTraceProduct)
CardView textTraceProductCardView;
@BindView(R.id.cvTextAdditiveProduct)
CardView textAdditiveProductCardView;
@BindView(R.id.cvTextPalmOilProduct)
CardView textPalmOilProductCardView;
@BindView(R.id.cvVitaminsTagsText)
CardView vitaminsTagsTextCardView;
@BindView(R.id.cvAminoAcidTagsText)
CardView aminoAcidTagsTextCardView;
@BindView(R.id.cvMineralTagsText)
CardView mineralTagsTextCardView;
@BindView(R.id.cvOtherNutritionTags)
CardView otherNutritionTagsCardView;
private Product product;
private OpenFoodAPIClient api;
private String mUrlImage;
private State mState;
private String barcode;
private AdditiveDao mAdditiveDao;
private IProductRepository productRepository;
private IngredientsProductFragment mFragment;
private SendProduct mSendProduct;
private WikidataApiClient apiClientForWikiData;
private CustomTabActivityHelper customTabActivityHelper;
private CustomTabsIntent customTabsIntent;
private IIngredientsProductPresenter.Actions presenter;
//boolean to determine if image should be loaded or not
private boolean isLowBatteryMode = false;
@Override
public void onAttach(Context context) {
super.onAttach(context);
productRepository = ProductRepository.getInstance();
customTabActivityHelper = new CustomTabActivityHelper();
customTabsIntent = CustomTabsHelper.getCustomTabsIntent(getContext(), customTabActivityHelper.getSession());
Intent intent = getActivity().getIntent();
mState = (State) intent.getExtras().getSerializable("state");
product = mState.getProduct();
presenter = new IngredientsProductPresenter(product, this);
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
api = new OpenFoodAPIClient(getActivity());
apiClientForWikiData = new WikidataApiClient();
mFragment = this;
return createView(inflater, container, R.layout.fragment_ingredients_product);
}
@Override
public void onViewCreated(View view, @Nullable Bundle savedInstanceState) {
super.onViewCreated(view, savedInstanceState);
Intent intent = getActivity().getIntent();
mState = (State) intent.getExtras().getSerializable("state");
refreshView(mState);
}
@Override
public void refreshView(State state) {
super.refreshView(state);
mState = state;
if(getArguments()!=null){
mSendProduct = (SendProduct) getArguments().getSerializable("sendProduct");
}
mAdditiveDao = Utils.getAppDaoSession(getActivity()).getAdditiveDao();
// If Battery Level is low and the user has checked the Disable Image in Preferences , then set isLowBatteryMode to true
SharedPreferences preferences = PreferenceManager.getDefaultSharedPreferences(getContext());
Utils.DISABLE_IMAGE_LOAD = preferences.getBoolean("disableImageLoad", false);
if (Utils.DISABLE_IMAGE_LOAD && Utils.getBatteryLevel(getContext())) {
isLowBatteryMode = true;
}
final Product product = mState.getProduct();
barcode = product.getCode();
List<String> vitaminTagsList = product.getVitaminTags();
List<String> aminoAcidTagsList = product.getAminoAcidTags();
List<String> mineralTags = product.getMineralTags();
List<String> otherNutritionTags = product.getOtherNutritionTags();
String prefix = " ";
if (!vitaminTagsList.isEmpty()) {
StringBuilder vitaminStringBuilder = new StringBuilder();
vitaminsTagsTextCardView.setVisibility(View.VISIBLE);
vitaminTagsTextView.setText(bold(getString(R.string.vitamin_tags_text)));
for (String vitamins : vitaminTagsList) {
vitaminStringBuilder.append(prefix);
prefix = ", ";
vitaminStringBuilder.append(trimLanguagePartFromString(vitamins));
}
vitaminTagsTextView.append(vitaminStringBuilder.toString());
}
if (!aminoAcidTagsList.isEmpty()) {
String aminoPrefix = " ";
StringBuilder aminoAcidStringBuilder = new StringBuilder();
aminoAcidTagsTextCardView.setVisibility(View.VISIBLE);
aminoAcidTagsTextView.setText(bold(getString(R.string.amino_acid_tags_text)));
for (String aminoAcid : aminoAcidTagsList) {
aminoAcidStringBuilder.append(aminoPrefix);
aminoPrefix = ", ";
aminoAcidStringBuilder.append(trimLanguagePartFromString(aminoAcid));
}
aminoAcidTagsTextView.append(aminoAcidStringBuilder.toString());
}
if (!mineralTags.isEmpty()) {
String mineralPrefix = " ";
StringBuilder mineralsStringBuilder = new StringBuilder();
mineralTagsTextCardView.setVisibility(View.VISIBLE);
mineralTagsTextView.setText(bold(getString(R.string.mineral_tags_text)));
for (String mineral : mineralTags) {
mineralsStringBuilder.append(mineralPrefix);
mineralPrefix = ", ";
mineralsStringBuilder.append(trimLanguagePartFromString(mineral));
}
mineralTagsTextView.append(mineralsStringBuilder);
}
if (!otherNutritionTags.isEmpty()) {
String otherNutritionPrefix = " ";
StringBuilder otherNutritionStringBuilder = new StringBuilder();
otherNutritionTagTextView.setVisibility(View.VISIBLE);
otherNutritionTagTextView.setText(bold(getString(R.string.other_tags_text)));
for (String otherSubstance : otherNutritionTags) {
otherNutritionStringBuilder.append(otherNutritionPrefix);
otherNutritionPrefix = ", ";
otherNutritionStringBuilder.append(trimLanguagePartFromString(otherSubstance));
}
otherNutritionTagTextView.append(otherNutritionStringBuilder.toString());
}
additiveProduct.setText(bold(getString(R.string.txtAdditives)));
presenter.loadAdditives();
if (isNotBlank(product.getImageIngredientsUrl())) {
addPhotoLabel.setVisibility(View.GONE);
// Load Image if isLowBatteryMode is false
if (!isLowBatteryMode) {
Picasso.with(getContext())
.load(product.getImageIngredientsUrl())
.into(mImageIngredients);
} else {
mImageIngredients.setVisibility(View.GONE);
}
mUrlImage = product.getImageIngredientsUrl();
}
//useful when this fragment is used in offline saving
if (mSendProduct != null && isNotBlank(mSendProduct.getImgupload_ingredients())) {
addPhotoLabel.setVisibility(View.GONE);
mUrlImage = mSendProduct.getImgupload_ingredients();
Picasso.with(getContext()).load("file://" + mUrlImage).config(Bitmap.Config.RGB_565).into(mImageIngredients);
}
List<String> allergens = getAllergens();
if (mState != null && product.getIngredientsText() != null) {
textIngredientProductCardView.setVisibility(View.VISIBLE);
SpannableStringBuilder txtIngredients = new SpannableStringBuilder(product.getIngredientsText().replace("_", ""));
txtIngredients = setSpanBoldBetweenTokens(txtIngredients, allergens);
int ingredientsListAt = Math.max(0, txtIngredients.toString().indexOf(":"));
if (!txtIngredients.toString().substring(ingredientsListAt).trim().isEmpty()) {
ingredientsProduct.setText(txtIngredients);
}
}
if (!allergens.isEmpty()) {
textSubstanceProductCardView.setVisibility(View.VISIBLE);
substanceProduct.setMovementMethod(LinkMovementMethod.getInstance());
substanceProduct.setText(bold(getString(R.string.txtSubstances)));
substanceProduct.append(" ");
String allergen;
for (int i = 0; i < allergens.size() - 1; i++) {
allergen = allergens.get(i);
substanceProduct.append(Utils.getClickableText(allergen, allergen, SearchType.ALLERGEN, getActivity(), customTabsIntent));
substanceProduct.append(", ");
}
allergen = allergens.get(allergens.size() - 1);
substanceProduct.append(Utils.getClickableText(allergen, allergen, SearchType.ALLERGEN, getActivity(), customTabsIntent));
}
if (!isBlank(product.getTraces())) {
textTraceProductCardView.setVisibility(View.VISIBLE);
traceProduct.setMovementMethod(LinkMovementMethod.getInstance());
traceProduct.setText(bold(getString(R.string.txtTraces)));
traceProduct.append(" ");
String trace;
String traces[] = product.getTraces().split(",");
for (int i = 0; i < traces.length - 1; i++) {
trace = traces[i];
traceProduct.append(Utils.getClickableText(trace, trace, SearchType.TRACE, getActivity(), customTabsIntent));
traceProduct.append(", ");
}
trace = traces[traces.length - 1];
traceProduct.append(Utils.getClickableText(trace, trace, SearchType.TRACE, getActivity(), customTabsIntent));
}
if (!(product.getIngredientsFromPalmOilN() == 0 && product.getIngredientsFromOrThatMayBeFromPalmOilN() == 0)) {
textPalmOilProductCardView.setVisibility(View.VISIBLE);
mayBeFromPalmOilProduct.setVisibility(View.VISIBLE);
if (!product.getIngredientsFromPalmOilTags().isEmpty()) {
palmOilProduct.setText(bold(getString(R.string.txtPalmOilProduct)));
palmOilProduct.append(" ");
palmOilProduct.append(product.getIngredientsFromPalmOilTags().toString().replaceAll("[\\[,\\]]", ""));
} else {
palmOilProduct.setVisibility(View.GONE);
}
if (!product.getIngredientsThatMayBeFromPalmOilTags().isEmpty()) {
mayBeFromPalmOilProduct.setText(bold(getString(R.string.txtMayBeFromPalmOilProduct)));
mayBeFromPalmOilProduct.append(" ");
mayBeFromPalmOilProduct.append(product.getIngredientsThatMayBeFromPalmOilTags().toString().replaceAll("[\\[,\\]]", ""));
} else {
mayBeFromPalmOilProduct.setVisibility(View.GONE);
}
}
}
private CharSequence getAdditiveTag(AdditiveName additive) {
SpannableStringBuilder spannableStringBuilder = new SpannableStringBuilder();
ClickableSpan clickableSpan = new ClickableSpan() {
@Override
public void onClick(View view) {
if (additive.getIsWikiDataIdPresent()) {
apiClientForWikiData.doSomeThing(additive.getWikiDataId(), new WikidataApiClient.OnWikiResponse() {
@Override
public void onresponse(boolean value, JSONObject result) {
if (value) {
ProductActivity productActivity = (ProductActivity) getActivity();
productActivity.showBottomScreen(result, additive.getWikiDataId(), 3, additive.getName());
} else {
ProductBrowsingListActivity.startActivity(getContext(), additive.getName(), SearchType.ADDITIVE);
}
}
});
} else {
ProductBrowsingListActivity.startActivity(getContext(), additive.getName(), SearchType.ADDITIVE);
}
}
};
spannableStringBuilder.append(additive.getName());
spannableStringBuilder.setSpan(clickableSpan, 0, spannableStringBuilder.length(), SPAN_EXCLUSIVE_EXCLUSIVE);
return spannableStringBuilder;
}
/**
* @return the string after trimming the language code from the tags
* like it returns folic-acid for en:folic-acid
*/
private String trimLanguagePartFromString(String string) {
return string.substring(3);
}
private SpannableStringBuilder setSpanBoldBetweenTokens(CharSequence text, List<String> allergens) {
final SpannableStringBuilder ssb = new SpannableStringBuilder(text);
Matcher m = INGREDIENT_PATTERN.matcher(ssb);
while (m.find()) {
final String tm = m.group();
final String allergenValue = tm.replaceAll("[(),.-]+", "");
for (String allergen : allergens) {
if (allergen.equalsIgnoreCase(allergenValue)) {
int start = m.start();
int end = m.end();
if (tm.contains("(")) {
start += 1;
} else if (tm.contains(")")) {
end -= 1;
}
ssb.setSpan(new StyleSpan(Typeface.BOLD), start, end, SPAN_EXCLUSIVE_EXCLUSIVE);
}
}
}
ssb.insert(0, Utils.bold(getString(R.string.txtIngredients) + ' '));
return ssb;
}
@Override
public void showAdditives(List<AdditiveName> additives) {
additiveProduct.setText(bold(getString(R.string.txtAdditives)));
additiveProduct.setMovementMethod(LinkMovementMethod.getInstance());
additiveProduct.append(" ");
additiveProduct.append("\n");
additiveProduct.setClickable(true);
additiveProduct.setMovementMethod(LinkMovementMethod.getInstance());
for (int i = 0; i < additives.size() - 1; i++) {
additiveProduct.append(getAdditiveTag(additives.get(i)));
additiveProduct.append("\n");
}
additiveProduct.append(getAdditiveTag((additives.get(additives.size() - 1))));
}
@Override
public void showAdditivesState(String state) {
switch (state) {
case LOADING: {
textAdditiveProductCardView.setVisibility(View.VISIBLE);
additiveProduct.append(getString(R.string.txtLoading));
break;
}
case EMPTY: {
textAdditiveProductCardView.setVisibility(View.GONE);
break;
}
}
}
private List<String> getAllergens() {
if (mState.getProduct() == null || mState.getProduct().getAllergens() == null) {
return Collections.emptyList();
}
List<String> list = new ArrayList<>();
Matcher m = ALLERGEN_PATTERN.matcher(mState.getProduct().getAllergens().replace(",", ""));
while (m.find()) {
final String tma = m.group();
boolean canAdd = true;
for (String allergen : list) {
if (tma.equalsIgnoreCase(allergen)) {
canAdd = false;
break;
}
}
if (canAdd) {
list.add(tma);
}
}
return list;
}
@OnClick(R.id.imageViewIngredients)
public void openFullScreen(View v) {
if (mUrlImage != null) {
Intent intent = new Intent(v.getContext(), FullScreenImage.class);
Bundle bundle = new Bundle();
bundle.putString("imageurl", mUrlImage);
intent.putExtras(bundle);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
ActivityOptionsCompat options = ActivityOptionsCompat.
makeSceneTransitionAnimation(getActivity(), (View) mImageIngredients,
getActivity().getString(R.string.product_transition));
startActivity(intent, options.toBundle());
} else {
startActivity(intent);
}
} else {
// take a picture
if (ContextCompat.checkSelfPermission(getActivity(), CAMERA) != PERMISSION_GRANTED) {
ActivityCompat.requestPermissions(getActivity(), new String[]{CAMERA}, MY_PERMISSIONS_REQUEST_CAMERA);
} else {
EasyImage.openCamera(this, 0);
// EasyImage.openGallery(this);
}
}
}
private void onPhotoReturned(File photoFile) {
ProductImage image = new ProductImage(barcode, INGREDIENTS, photoFile);
image.setFilePath(photoFile.getAbsolutePath());
api.postImg(getContext(), image);
addPhotoLabel.setVisibility(View.GONE);
mUrlImage = photoFile.getAbsolutePath();
Picasso.with(getContext())
.load(photoFile)
.fit()
.into(mImageIngredients);
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == CropImage.CROP_IMAGE_ACTIVITY_REQUEST_CODE) {
CropImage.ActivityResult result = CropImage.getActivityResult(data);
if (resultCode == RESULT_OK) {
Uri resultUri = result.getUri();
onPhotoReturned(new File(resultUri.getPath()));
} else if (resultCode == CropImage.CROP_IMAGE_ACTIVITY_RESULT_ERROR_CODE) {
Exception error = result.getError();
}
}
EasyImage.handleActivityResult(requestCode, resultCode, data, getActivity(), new DefaultCallback() {
@Override
public void onImagePickerError(Exception e, EasyImage.ImageSource source, int type) {
//Some error handling
}
@Override
public void onImagesPicked(List<File> imageFiles, EasyImage.ImageSource source, int type) {
CropImage.activity(Uri.fromFile(imageFiles.get(0)))
.setCropMenuCropButtonIcon(R.drawable.ic_check_white_24dp)
.setAllowFlipping(false)
.setOutputUri(Utils.getOutputPicUri(getContext()))
.start(getContext(), mFragment);
}
@Override
public void onCanceled(EasyImage.ImageSource source, int type) {
//Cancel handling, you might wanna remove taken photo if it was canceled
if (source == EasyImage.ImageSource.CAMERA) {
File photoFile = EasyImage.lastlyTakenButCanceledPhoto(getContext());
if (photoFile != null) photoFile.delete();
}
}
});
}
@Override
public void onRequestPermissionsResult(int requestCode, @NonNull String permissions[], @NonNull int[] grantResults) {
switch (requestCode) {
case MY_PERMISSIONS_REQUEST_CAMERA: {
if (grantResults.length <= 0 || grantResults[0] != PERMISSION_GRANTED) {
new MaterialDialog.Builder(getActivity())
.title(R.string.permission_title)
.content(R.string.permission_denied)
.negativeText(R.string.txtNo)
.positiveText(R.string.txtYes)
.onPositive((dialog, which) -> {
Intent intent = new Intent();
intent.setAction(Settings.ACTION_APPLICATION_DETAILS_SETTINGS);
Uri uri = Uri.fromParts("package", getActivity().getPackageName(), null);
intent.setData(uri);
startActivity(intent);
})
.show();
} else {
EasyImage.openCamera(this, 0);
}
}
}
}
public String getIngredients() {
return mUrlImage;
}
@Override
public void onDestroyView() {
presenter.dispose();
super.onDestroyView();
}
}
| 1 | 65,709 | This should actually read as the following `for (int i = 0; i < allergens.size(); i++)` | openfoodfacts-openfoodfacts-androidapp | java |
@@ -1192,6 +1192,10 @@ func (b *ColListTableBuilder) SetValue(i, j int, v values.Value) error {
}
func (b *ColListTableBuilder) AppendValue(j int, v values.Value) error {
+ if v.IsNull() {
+ return b.AppendNil(j)
+ }
+
switch v.Type() {
case semantic.Bool:
return b.AppendBool(j, v.Bool()) | 1 | package execute
import (
"errors"
"fmt"
"sort"
"sync/atomic"
"github.com/apache/arrow/go/arrow/array"
"github.com/google/go-cmp/cmp"
"github.com/influxdata/flux"
"github.com/influxdata/flux/arrow"
"github.com/influxdata/flux/memory"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
const (
DefaultStartColLabel = "_start"
DefaultStopColLabel = "_stop"
DefaultTimeColLabel = "_time"
DefaultValueColLabel = "_value"
)
func GroupKeyForRowOn(i int, cr flux.ColReader, on map[string]bool) flux.GroupKey {
cols := make([]flux.ColMeta, 0, len(on))
vs := make([]values.Value, 0, len(on))
for j, c := range cr.Cols() {
if !on[c.Label] {
continue
}
cols = append(cols, c)
vs = append(vs, ValueForRow(cr, i, j))
}
return NewGroupKey(cols, vs)
}
func GroupKeyForRowOnArrow(i int, cr flux.ArrowColReader, on map[string]bool) flux.GroupKey {
cols := make([]flux.ColMeta, 0, len(on))
vs := make([]values.Value, 0, len(on))
for j, c := range cr.Cols() {
if !on[c.Label] {
continue
}
cols = append(cols, c)
vs = append(vs, ValueForRowArrow(cr, i, j))
}
return NewGroupKey(cols, vs)
}
// OneTimeTable is a Table that permits reading data only once.
// Specifically the ValueIterator may only be consumed once from any of the columns.
type OneTimeTable interface {
flux.Table
onetime()
}
// CacheOneTimeTable returns a table that can be read multiple times.
// If the table is not a OneTimeTable it is returned directly.
// Otherwise its contents are read into a new table.
func CacheOneTimeTable(t flux.Table, a *memory.Allocator) (flux.Table, error) {
_, ok := t.(OneTimeTable)
if !ok {
return t, nil
}
return CopyTable(t, a)
}
// CopyTable returns a copy of the table and is OneTimeTable safe.
func CopyTable(t flux.Table, a *memory.Allocator) (flux.Table, error) {
builder := NewColListTableBuilder(t.Key(), a)
cols := t.Cols()
colMap := make([]int, len(cols))
for j, c := range cols {
colMap[j] = j
if _, err := builder.AddCol(c); err != nil {
return nil, err
}
}
if err := AppendMappedTable(t, builder, colMap); err != nil {
return nil, err
}
// ColListTableBuilders do not error
nb, _ := builder.Table()
return nb, nil
}
// AddTableCols adds the columns of b onto builder.
func AddTableCols(t flux.Table, builder TableBuilder) error {
cols := t.Cols()
for _, c := range cols {
if _, err := builder.AddCol(c); err != nil {
return err
}
}
return nil
}
func AddTableKeyCols(key flux.GroupKey, builder TableBuilder) error {
for _, c := range key.Cols() {
if _, err := builder.AddCol(c); err != nil {
return err
}
}
return nil
}
// AddNewCols adds the columns of b onto builder that did not already exist.
// Returns the mapping of builder cols to table cols.
func AddNewTableCols(t flux.Table, builder TableBuilder, colMap []int) ([]int, error) {
cols := t.Cols()
existing := builder.Cols()
if l := len(builder.Cols()); cap(colMap) < l {
colMap = make([]int, len(builder.Cols()))
} else {
colMap = colMap[:l]
}
for j := range colMap {
colMap[j] = -1
}
for j, c := range cols {
found := false
for ej, ec := range existing {
if c.Label == ec.Label {
colMap[ej] = j
found = true
break
}
}
if !found {
if _, err := builder.AddCol(c); err != nil {
return nil, err
}
colMap = append(colMap, j)
}
}
return colMap, nil
}
// AppendMappedTable appends data from table t onto builder.
// The colMap is a map of builder column index to table column index.
func AppendMappedTable(t flux.Table, builder TableBuilder, colMap []int) error {
if len(t.Cols()) == 0 {
return nil
}
if err := t.DoArrow(func(cr flux.ArrowColReader) error {
return AppendMappedColsArrow(cr, builder, colMap)
}); err != nil {
return err
}
return builder.LevelColumns()
}
// AppendTable appends data from table t onto builder.
// This function assumes builder and t have the same column schema.
func AppendTable(t flux.Table, builder TableBuilder) error {
if len(t.Cols()) == 0 {
return nil
}
return t.DoArrow(func(cr flux.ArrowColReader) error {
return AppendColsArrow(cr, builder)
})
}
// AppendMappedColsArrow appends all columns from cr onto builder.
// The colMap is a map of builder column index to cr column index.
func AppendMappedColsArrow(cr flux.ArrowColReader, builder TableBuilder, colMap []int) error {
if len(colMap) != len(builder.Cols()) {
return errors.New("AppendMappedCols: colMap must have an entry for each table builder column")
}
for j := range builder.Cols() {
if colMap[j] >= 0 {
if err := AppendColArrow(j, colMap[j], cr, builder); err != nil {
return err
}
}
}
return nil
}
// AppendCols appends all columns from cr onto builder.
// This function assumes that builder and cr have the same column schema.
func AppendCols(cr flux.ColReader, builder TableBuilder) error {
for j := range builder.Cols() {
if err := AppendCol(j, j, cr, builder); err != nil {
return err
}
}
return nil
}
// AppendColsArrow appends all columns from cr onto builder.
// This function assumes that builder and cr have the same column schema.
func AppendColsArrow(cr flux.ArrowColReader, builder TableBuilder) error {
for j := range builder.Cols() {
if err := AppendColArrow(j, j, cr, builder); err != nil {
return err
}
}
return nil
}
// AppendCol append a column from cr onto builder
// The indexes bj and cj are builder and col reader indexes respectively.
func AppendCol(bj, cj int, cr flux.ColReader, builder TableBuilder) error {
if cj < 0 || cj > len(cr.Cols()) {
return errors.New("AppendCol column reader index out of bounds")
}
if bj < 0 || bj > len(builder.Cols()) {
return errors.New("AppendCol builder index out of bounds")
}
c := cr.Cols()[cj]
switch c.Type {
case flux.TBool:
return builder.AppendBools(bj, cr.Bools(cj))
case flux.TInt:
return builder.AppendInts(bj, cr.Ints(cj))
case flux.TUInt:
return builder.AppendUInts(bj, cr.UInts(cj))
case flux.TFloat:
return builder.AppendFloats(bj, cr.Floats(cj))
case flux.TString:
return builder.AppendStrings(bj, cr.Strings(cj))
case flux.TTime:
return builder.AppendTimes(bj, cr.Times(cj))
default:
PanicUnknownType(c.Type)
}
return nil
}
// AppendColArrow append a column from cr onto builder
// The indexes bj and cj are builder and col reader indexes respectively.
func AppendColArrow(bj, cj int, cr flux.ArrowColReader, builder TableBuilder) error {
if cj < 0 || cj > len(cr.Cols()) {
return errors.New("AppendCol column reader index out of bounds")
}
if bj < 0 || bj > len(builder.Cols()) {
return errors.New("AppendCol builder index out of bounds")
}
c := cr.Cols()[cj]
switch c.Type {
case flux.TBool:
vs := cr.Bools(cj)
for i := 0; i < vs.Len(); i++ {
if err := builder.AppendBool(bj, vs.Value(i)); err != nil {
return err
}
}
return nil
case flux.TInt:
return builder.AppendInts(bj, cr.Ints(cj).Int64Values())
case flux.TUInt:
return builder.AppendUInts(bj, cr.UInts(cj).Uint64Values())
case flux.TFloat:
return builder.AppendFloats(bj, cr.Floats(cj).Float64Values())
case flux.TString:
vs := cr.Strings(cj)
for i := 0; i < vs.Len(); i++ {
if err := builder.AppendString(bj, vs.ValueString(i)); err != nil {
return err
}
}
return nil
case flux.TTime:
vs := cr.Times(cj)
for i := 0; i < vs.Len(); i++ {
if err := builder.AppendTime(bj, values.Time(vs.Value(i))); err != nil {
return err
}
}
return nil
default:
PanicUnknownType(c.Type)
}
return nil
}
// AppendRecord appends the record from cr onto builder assuming matching columns.
func AppendRecord(i int, cr flux.ColReader, builder TableBuilder) error {
if !BuilderColsMatchReader(builder, cr) {
return errors.New("AppendRecord column schema mismatch")
}
for j := range builder.Cols() {
if err := builder.AppendValue(j, ValueForRow(cr, i, j)); err != nil {
return err
}
}
return nil
}
// AppendRecordArrow appends the record from cr onto builder assuming matching columns.
func AppendRecordArrow(i int, cr flux.ArrowColReader, builder TableBuilder) error {
if !BuilderColsMatchReaderArrow(builder, cr) {
return errors.New("AppendRecord column schema mismatch")
}
for j := range builder.Cols() {
if err := builder.AppendValue(j, ValueForRowArrow(cr, i, j)); err != nil {
return err
}
}
return nil
}
// AppendMappedRecordWithDefaults appends the records from cr onto builder, using colMap as a map of builder index to cr index.
// if an entry in the colMap indicates a mismatched column, a default value is assigned to the builder's column
func AppendMappedRecordWithDefaults(i int, cr flux.ColReader, builder TableBuilder, colMap []int) error {
if len(colMap) != len(builder.Cols()) {
return errors.New("AppendMappedRecordWithDefaults: colMap must have an entry for each table builder column")
}
// TODO(adam): these zero values should be set to null when we have null support
for j, c := range builder.Cols() {
var err error
switch c.Type {
case flux.TBool:
var val bool
if colMap[j] >= 0 {
val = cr.Bools(colMap[j])[i]
}
err = builder.AppendBool(j, val)
case flux.TInt:
var val int64
if colMap[j] >= 0 {
val = cr.Ints(colMap[j])[i]
}
err = builder.AppendInt(j, val)
case flux.TUInt:
var val uint64
if colMap[j] >= 0 {
val = cr.UInts(colMap[j])[i]
}
err = builder.AppendUInt(j, val)
case flux.TFloat:
var val float64
if colMap[j] >= 0 {
val = cr.Floats(colMap[j])[i]
}
err = builder.AppendFloat(j, val)
case flux.TString:
var val string
if colMap[j] >= 0 {
val = cr.Strings(colMap[j])[i]
}
err = builder.AppendString(j, val)
case flux.TTime:
var val Time
if colMap[j] >= 0 {
val = cr.Times(colMap[j])[i]
}
err = builder.AppendTime(j, val)
default:
PanicUnknownType(c.Type)
}
if err != nil {
return err
}
}
return nil
}
// AppendMappedRecordWithDefaultsArrow appends the records from cr onto builder, using colMap as a map of builder index to cr index.
// if an entry in the colMap indicates a mismatched column, a default value is assigned to the builder's column
func AppendMappedRecordWithDefaultsArrow(i int, cr flux.ArrowColReader, builder TableBuilder, colMap []int) error {
if len(colMap) != len(builder.Cols()) {
return errors.New("AppendMappedRecordWithDefaultsArrow: colMap must have an entry for each table builder column")
}
// TODO(adam): these zero values should be set to null when we have null support
for j, c := range builder.Cols() {
var err error
switch c.Type {
case flux.TBool:
var val bool
if colMap[j] >= 0 {
val = cr.Bools(colMap[j]).Value(i)
}
err = builder.AppendBool(j, val)
case flux.TInt:
var val int64
if colMap[j] >= 0 {
val = cr.Ints(colMap[j]).Value(i)
}
err = builder.AppendInt(j, val)
case flux.TUInt:
var val uint64
if colMap[j] >= 0 {
val = cr.UInts(colMap[j]).Value(i)
}
err = builder.AppendUInt(j, val)
case flux.TFloat:
var val float64
if colMap[j] >= 0 {
val = cr.Floats(colMap[j]).Value(i)
}
err = builder.AppendFloat(j, val)
case flux.TString:
var val string
if colMap[j] >= 0 {
val = cr.Strings(colMap[j]).ValueString(i)
}
err = builder.AppendString(j, val)
case flux.TTime:
var val Time
if colMap[j] >= 0 {
val = values.Time(cr.Times(colMap[j]).Value(i))
}
err = builder.AppendTime(j, val)
default:
PanicUnknownType(c.Type)
}
if err != nil {
return err
}
}
return nil
}
// AppendMappedRecordWExplicit appends the records from cr onto builder, using colMap as a map of builder index to cr index.
// if an entry in the colMap indicates a mismatched column, no value is appended.
func AppendMappedRecordExplicit(i int, cr flux.ColReader, builder TableBuilder, colMap []int) error {
// TODO(adam): these zero values should be set to null when we have null support
for j, c := range builder.Cols() {
if colMap[j] < 0 {
continue
}
var err error
switch c.Type {
case flux.TBool:
err = builder.AppendBool(j, cr.Bools(colMap[j])[i])
case flux.TInt:
err = builder.AppendInt(j, cr.Ints(colMap[j])[i])
case flux.TUInt:
err = builder.AppendUInt(j, cr.UInts(colMap[j])[i])
case flux.TFloat:
err = builder.AppendFloat(j, cr.Floats(colMap[j])[i])
case flux.TString:
err = builder.AppendString(j, cr.Strings(colMap[j])[i])
case flux.TTime:
err = builder.AppendTime(j, cr.Times(colMap[j])[i])
default:
PanicUnknownType(c.Type)
}
if err != nil {
return err
}
}
return nil
}
// AppendMappedRecordWExplicitArrow appends the records from cr onto builder, using colMap as a map of builder index to cr index.
// if an entry in the colMap indicates a mismatched column, no value is appended.
func AppendMappedRecordExplicitArrow(i int, cr flux.ArrowColReader, builder TableBuilder, colMap []int) error {
// TODO(adam): these zero values should be set to null when we have null support
for j := range builder.Cols() {
if colMap[j] < 0 {
continue
}
if err := builder.AppendValue(j, ValueForRowArrow(cr, i, j)); err != nil {
return err
}
}
return nil
}
// BuilderColsMatchReader returns true if builder and cr have identical column sets (order dependent)
func BuilderColsMatchReader(builder TableBuilder, cr flux.ColReader) bool {
return colsMatch(builder.Cols(), cr.Cols())
}
// BuilderColsMatchReaderArrow returns true if builder and cr have identical column sets (order dependent)
func BuilderColsMatchReaderArrow(builder TableBuilder, cr flux.ArrowColReader) bool {
return colsMatch(builder.Cols(), cr.Cols())
}
// TablesEqual takes two flux tables and compares them. Returns false if the tables have different keys, different
// columns, or if the data in any column does not match. Returns true otherwise. This function will consume the
// ColumnReader so if you are calling this from the a Process method, you may need to copy the table if you need to
// iterate over the data for other calculations.
func TablesEqual(left, right flux.Table, alloc *memory.Allocator) (bool, error) {
if colsMatch(left.Key().Cols(), right.Key().Cols()) && colsMatch(left.Cols(), right.Cols()) {
eq := true
// rbuffer will buffer out rows from the right table, always holding just enough to do a comparison with the left
// table's ColReader
leftBuffer := NewColListTableBuilder(left.Key(), alloc)
if err := AddTableCols(left, leftBuffer); err != nil {
return false, err
}
if err := AppendTable(left, leftBuffer); err != nil {
return false, err
}
rightBuffer := NewColListTableBuilder(right.Key(), alloc)
if err := AddTableCols(right, rightBuffer); err != nil {
return false, err
}
if err := AppendTable(right, rightBuffer); err != nil {
return false, err
}
if leftBuffer.NRows() != rightBuffer.NRows() {
return false, nil
}
for j, c := range leftBuffer.Cols() {
switch c.Type {
case flux.TBool:
eq = cmp.Equal(leftBuffer.cols[j].(*boolColumnBuilder).data,
rightBuffer.cols[j].(*boolColumnBuilder).data)
case flux.TInt:
eq = cmp.Equal(leftBuffer.cols[j].(*intColumnBuilder).data,
rightBuffer.cols[j].(*intColumnBuilder).data)
case flux.TUInt:
eq = cmp.Equal(leftBuffer.cols[j].(*uintColumnBuilder).data,
rightBuffer.cols[j].(*uintColumnBuilder).data)
case flux.TFloat:
eq = cmp.Equal(leftBuffer.cols[j].(*floatColumnBuilder).data,
rightBuffer.cols[j].(*floatColumnBuilder).data)
case flux.TString:
eq = cmp.Equal(leftBuffer.cols[j].(*stringColumnBuilder).data,
rightBuffer.cols[j].(*stringColumnBuilder).data)
case flux.TTime:
eq = cmp.Equal(rightBuffer.cols[j].(*timeColumnBuilder).data,
rightBuffer.cols[j].(*timeColumnBuilder).data)
default:
PanicUnknownType(c.Type)
}
if !eq {
return false, nil
}
}
return eq, nil
}
return false, nil
}
func colsMatch(left, right []flux.ColMeta) bool {
if len(left) != len(right) {
return false
}
for j, l := range left {
if l != right[j] {
return false
}
}
return true
}
// ColMap writes a mapping of builder index to column reader index into colMap.
// When colMap does not have enough capacity a new colMap is allocated.
// The colMap is always returned
func ColMap(colMap []int, builder TableBuilder, cr flux.ColReader) []int {
l := len(builder.Cols())
if cap(colMap) < l {
colMap = make([]int, len(builder.Cols()))
} else {
colMap = colMap[:l]
}
cols := cr.Cols()
for j, c := range builder.Cols() {
colMap[j] = ColIdx(c.Label, cols)
}
return colMap
}
// ColMapArrow writes a mapping of builder index to column reader index into colMap.
// When colMap does not have enough capacity a new colMap is allocated.
// The colMap is always returned
func ColMapArrow(colMap []int, builder TableBuilder, cr flux.ArrowColReader) []int {
l := len(builder.Cols())
if cap(colMap) < l {
colMap = make([]int, len(builder.Cols()))
} else {
colMap = colMap[:l]
}
cols := cr.Cols()
for j, c := range builder.Cols() {
colMap[j] = ColIdx(c.Label, cols)
}
return colMap
}
// AppendRecordForCols appends the only the columns provided from cr onto builder.
func AppendRecordForCols(i int, cr flux.ColReader, builder TableBuilder, cols []flux.ColMeta) error {
if len(cr.Cols()) != len(builder.Cols()) || len(cr.Cols()) != len(cols) {
return errors.New("appended records must include all columns")
}
for j := range cols {
if err := builder.AppendValue(j, ValueForRow(cr, i, j)); err != nil {
return err
}
}
return nil
}
func AppendKeyValues(key flux.GroupKey, builder TableBuilder) error {
for j, c := range key.Cols() {
idx := ColIdx(c.Label, builder.Cols())
if idx < 0 {
return fmt.Errorf("group key column %s not found in output table", c.Label)
}
if err := builder.AppendValue(idx, key.Value(j)); err != nil {
return err
}
}
return nil
}
func ContainsStr(strs []string, str string) bool {
for _, s := range strs {
if str == s {
return true
}
}
return false
}
func ColIdx(label string, cols []flux.ColMeta) int {
for j, c := range cols {
if c.Label == label {
return j
}
}
return -1
}
func HasCol(label string, cols []flux.ColMeta) bool {
return ColIdx(label, cols) >= 0
}
// ValueForRow retrieves a value from a column reader at the given index.
func ValueForRow(cr flux.ColReader, i, j int) values.Value {
t := cr.Cols()[j].Type
switch t {
case flux.TString:
return values.NewString(cr.Strings(j)[i])
case flux.TInt:
return values.NewInt(cr.Ints(j)[i])
case flux.TUInt:
return values.NewUInt(cr.UInts(j)[i])
case flux.TFloat:
return values.NewFloat(cr.Floats(j)[i])
case flux.TBool:
return values.NewBool(cr.Bools(j)[i])
case flux.TTime:
return values.NewTime(cr.Times(j)[i])
default:
PanicUnknownType(t)
return values.InvalidValue
}
}
// ValueForRowArrow retrieves a value from an arrow column reader at the given index.
func ValueForRowArrow(cr flux.ArrowColReader, i, j int) values.Value {
t := cr.Cols()[j].Type
switch t {
case flux.TString:
return values.NewString(cr.Strings(j).ValueString(i))
case flux.TInt:
return values.NewInt(cr.Ints(j).Value(i))
case flux.TUInt:
return values.NewUInt(cr.UInts(j).Value(i))
case flux.TFloat:
return values.NewFloat(cr.Floats(j).Value(i))
case flux.TBool:
return values.NewBool(cr.Bools(j).Value(i))
case flux.TTime:
return values.NewTime(values.Time(cr.Times(j).Value(i)))
default:
PanicUnknownType(t)
return values.InvalidValue
}
}
// TableBuilder builds tables that can be used multiple times
type TableBuilder interface {
Key() flux.GroupKey
NRows() int
NCols() int
Cols() []flux.ColMeta
// AddCol increases the size of the table by one column.
// The index of the column is returned.
AddCol(flux.ColMeta) (int, error)
// Set sets the value at the specified coordinates
// The rows and columns must exist before calling set, otherwise Set panics.
SetBool(i, j int, value bool) error
SetInt(i, j int, value int64) error
SetUInt(i, j int, value uint64) error
SetFloat(i, j int, value float64) error
SetString(i, j int, value string) error
SetTime(i, j int, value Time) error
SetValue(i, j int, value values.Value) error
SetNil(i, j int) error
// Append will add a single value to the end of a column. Will set the number of
// rows in the table to the size of the new column. It's the caller's job to make sure
// that the expected number of rows in each column is equal.
AppendBool(j int, value bool) error
AppendInt(j int, value int64) error
AppendUInt(j int, value uint64) error
AppendFloat(j int, value float64) error
AppendString(j int, value string) error
AppendTime(j int, value Time) error
AppendValue(j int, value values.Value) error
AppendNil(j int) error
// AppendBools and similar functions will append multiple values to column j. As above,
// it will set the numer of rows in the table to the size of the new column. It's the
// caller's job to make sure that the expected number of rows in each column is equal.
AppendBools(j int, values []bool) error
AppendInts(j int, values []int64) error
AppendUInts(j int, values []uint64) error
AppendFloats(j int, values []float64) error
AppendStrings(j int, values []string) error
AppendTimes(j int, values []Time) error
// TODO(adam): determine if there's a useful API for AppendValues
// AppendValues(j int, values []values.Value)
// GrowBools and similar functions will extend column j by n zero-values for the respective type.
// If the column has enough capacity, no reallocation is necessary. If the capacity is insufficient,
// a new slice is allocated with 1.5*newCapacity. As with the Append functions, it is the
// caller's job to make sure that the expected number of rows in each column is equal.
GrowBools(j, n int) error
GrowInts(j, n int) error
GrowUInts(j, n int) error
GrowFloats(j, n int) error
GrowStrings(j, n int) error
GrowTimes(j, n int) error
// LevelColumns will check for columns that are too short and Grow them
// so that each column is of uniform size.
LevelColumns() error
// Sort the rows of the by the values of the columns in the order listed.
Sort(cols []string, desc bool)
// Clear removes all rows, while preserving the column meta data.
ClearData()
// Table returns the table that has been built.
// Further modifications of the builder will not effect the returned table.
Table() (flux.Table, error)
}
type ColListTableBuilder struct {
key flux.GroupKey
colMeta []flux.ColMeta
cols []columnBuilder
nrows int
alloc *Allocator
}
func NewColListTableBuilder(key flux.GroupKey, a *memory.Allocator) *ColListTableBuilder {
return &ColListTableBuilder{
key: key,
alloc: &Allocator{Allocator: a},
}
}
func (b *ColListTableBuilder) Key() flux.GroupKey {
return b.key
}
func (b *ColListTableBuilder) NRows() int {
return b.nrows
}
func (b *ColListTableBuilder) Len() int {
return b.nrows
}
func (b *ColListTableBuilder) NCols() int {
return len(b.cols)
}
func (b *ColListTableBuilder) Cols() []flux.ColMeta {
return b.colMeta
}
func (b *ColListTableBuilder) AddCol(c flux.ColMeta) (int, error) {
if ColIdx(c.Label, b.Cols()) >= 0 {
return -1, fmt.Errorf("table builder already has column with label %s", c.Label)
}
newIdx := len(b.cols)
var col columnBuilder
switch c.Type {
case flux.TBool:
col = &boolColumnBuilder{
ColMeta: c,
alloc: b.alloc,
nils: make(map[int]bool),
}
b.colMeta = append(b.colMeta, c)
b.cols = append(b.cols, col)
if b.NRows() > 0 {
if err := b.GrowBools(newIdx, b.NRows()); err != nil {
return -1, err
}
}
case flux.TInt:
col = &intColumnBuilder{
ColMeta: c,
alloc: b.alloc,
nils: make(map[int]bool),
}
b.colMeta = append(b.colMeta, c)
b.cols = append(b.cols, col)
if b.NRows() > 0 {
if err := b.GrowInts(newIdx, b.NRows()); err != nil {
return -1, err
}
}
case flux.TUInt:
col = &uintColumnBuilder{
ColMeta: c,
alloc: b.alloc,
nils: make(map[int]bool),
}
b.colMeta = append(b.colMeta, c)
b.cols = append(b.cols, col)
if b.NRows() > 0 {
if err := b.GrowUInts(newIdx, b.NRows()); err != nil {
return -1, err
}
}
case flux.TFloat:
col = &floatColumnBuilder{
ColMeta: c,
alloc: b.alloc,
nils: make(map[int]bool),
}
b.colMeta = append(b.colMeta, c)
b.cols = append(b.cols, col)
if b.NRows() > 0 {
if err := b.GrowFloats(newIdx, b.NRows()); err != nil {
return -1, err
}
}
case flux.TString:
col = &stringColumnBuilder{
ColMeta: c,
alloc: b.alloc,
nils: make(map[int]bool),
}
b.colMeta = append(b.colMeta, c)
b.cols = append(b.cols, col)
if b.NRows() > 0 {
if err := b.GrowStrings(newIdx, b.NRows()); err != nil {
return -1, err
}
}
case flux.TTime:
col = &timeColumnBuilder{
ColMeta: c,
alloc: b.alloc,
nils: make(map[int]bool),
}
b.colMeta = append(b.colMeta, c)
b.cols = append(b.cols, col)
if b.NRows() > 0 {
if err := b.GrowTimes(newIdx, b.NRows()); err != nil {
return -1, err
}
}
default:
PanicUnknownType(c.Type)
}
return newIdx, nil
}
func (b *ColListTableBuilder) LevelColumns() error {
for idx, c := range b.colMeta {
switch c.Type {
case flux.TBool:
toGrow := b.NRows() - b.cols[idx].Len()
if toGrow > 0 {
if err := b.GrowBools(idx, toGrow); err != nil {
return err
}
}
if toGrow < 0 {
_ = fmt.Errorf("column %s is longer than expected length of table", c.Label)
}
case flux.TInt:
toGrow := b.NRows() - b.cols[idx].Len()
if toGrow > 0 {
if err := b.GrowInts(idx, toGrow); err != nil {
return err
}
}
if toGrow < 0 {
_ = fmt.Errorf("column %s is longer than expected length of table", c.Label)
}
case flux.TUInt:
toGrow := b.NRows() - b.cols[idx].Len()
if toGrow > 0 {
if err := b.GrowUInts(idx, toGrow); err != nil {
return err
}
}
if toGrow < 0 {
_ = fmt.Errorf("column %s is longer than expected length of table", c.Label)
}
case flux.TFloat:
toGrow := b.NRows() - b.cols[idx].Len()
if toGrow > 0 {
if err := b.GrowFloats(idx, toGrow); err != nil {
return err
}
}
if toGrow < 0 {
_ = fmt.Errorf("column %s is longer than expected length of table", c.Label)
}
case flux.TString:
toGrow := b.NRows() - b.cols[idx].Len()
if toGrow > 0 {
if err := b.GrowStrings(idx, toGrow); err != nil {
return err
}
}
if toGrow < 0 {
_ = fmt.Errorf("column %s is longer than expected length of table", c.Label)
}
case flux.TTime:
toGrow := b.NRows() - b.cols[idx].Len()
if toGrow > 0 {
if err := b.GrowTimes(idx, toGrow); err != nil {
return err
}
}
if toGrow < 0 {
_ = fmt.Errorf("column %s is longer than expected length of table", c.Label)
}
default:
PanicUnknownType(c.Type)
}
}
return nil
}
func (b *ColListTableBuilder) SetBool(i int, j int, value bool) error {
if err := b.checkCol(j, flux.TBool); err != nil {
return err
}
b.cols[j].(*boolColumnBuilder).data[i] = value
b.cols[j].SetNil(i, false)
return nil
}
func (b *ColListTableBuilder) AppendBool(j int, value bool) error {
if err := b.checkCol(j, flux.TBool); err != nil {
return err
}
col := b.cols[j].(*boolColumnBuilder)
col.data = b.alloc.AppendBools(col.data, value)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) AppendBools(j int, values []bool) error {
if err := b.checkCol(j, flux.TBool); err != nil {
return err
}
col := b.cols[j].(*boolColumnBuilder)
col.data = b.alloc.AppendBools(col.data, values...)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) GrowBools(j, n int) error {
if err := b.checkCol(j, flux.TBool); err != nil {
return err
}
col := b.cols[j].(*boolColumnBuilder)
col.data = b.alloc.GrowBools(col.data, n)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) SetInt(i int, j int, value int64) error {
if err := b.checkCol(j, flux.TInt); err != nil {
return err
}
b.cols[j].(*intColumnBuilder).data[i] = value
b.cols[j].SetNil(i, false)
return nil
}
func (b *ColListTableBuilder) AppendInt(j int, value int64) error {
if err := b.checkCol(j, flux.TInt); err != nil {
return err
}
col := b.cols[j].(*intColumnBuilder)
col.data = b.alloc.AppendInts(col.data, value)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) AppendInts(j int, values []int64) error {
if err := b.checkCol(j, flux.TInt); err != nil {
return err
}
col := b.cols[j].(*intColumnBuilder)
col.data = b.alloc.AppendInts(col.data, values...)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) GrowInts(j, n int) error {
if err := b.checkCol(j, flux.TInt); err != nil {
return err
}
col := b.cols[j].(*intColumnBuilder)
col.data = b.alloc.GrowInts(col.data, n)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) SetUInt(i int, j int, value uint64) error {
if err := b.checkCol(j, flux.TUInt); err != nil {
return err
}
b.cols[j].(*uintColumnBuilder).data[i] = value
b.cols[j].SetNil(i, false)
return nil
}
func (b *ColListTableBuilder) AppendUInt(j int, value uint64) error {
if err := b.checkCol(j, flux.TUInt); err != nil {
return err
}
col := b.cols[j].(*uintColumnBuilder)
col.data = b.alloc.AppendUInts(col.data, value)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) AppendUInts(j int, values []uint64) error {
if err := b.checkCol(j, flux.TUInt); err != nil {
return err
}
col := b.cols[j].(*uintColumnBuilder)
col.data = b.alloc.AppendUInts(col.data, values...)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) GrowUInts(j, n int) error {
if err := b.checkCol(j, flux.TUInt); err != nil {
return err
}
col := b.cols[j].(*uintColumnBuilder)
col.data = b.alloc.GrowUInts(col.data, n)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) SetFloat(i int, j int, value float64) error {
if err := b.checkCol(j, flux.TFloat); err != nil {
return err
}
b.cols[j].(*floatColumnBuilder).data[i] = value
b.cols[j].SetNil(i, false)
return nil
}
func (b *ColListTableBuilder) AppendFloat(j int, value float64) error {
if err := b.checkCol(j, flux.TFloat); err != nil {
return err
}
col := b.cols[j].(*floatColumnBuilder)
col.data = b.alloc.AppendFloats(col.data, value)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) AppendFloats(j int, values []float64) error {
if err := b.checkCol(j, flux.TFloat); err != nil {
return err
}
col := b.cols[j].(*floatColumnBuilder)
col.data = b.alloc.AppendFloats(col.data, values...)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) GrowFloats(j, n int) error {
if err := b.checkCol(j, flux.TFloat); err != nil {
return err
}
col := b.cols[j].(*floatColumnBuilder)
col.data = b.alloc.GrowFloats(col.data, n)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) SetString(i int, j int, value string) error {
if err := b.checkCol(j, flux.TString); err != nil {
return err
}
b.cols[j].(*stringColumnBuilder).data[i] = value
b.cols[j].SetNil(i, false)
return nil
}
func (b *ColListTableBuilder) AppendString(j int, value string) error {
if err := b.checkCol(j, flux.TString); err != nil {
return err
}
col := b.cols[j].(*stringColumnBuilder)
col.data = b.alloc.AppendStrings(col.data, value)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) AppendStrings(j int, values []string) error {
if err := b.checkCol(j, flux.TString); err != nil {
return err
}
col := b.cols[j].(*stringColumnBuilder)
col.data = b.alloc.AppendStrings(col.data, values...)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) GrowStrings(j, n int) error {
if err := b.checkCol(j, flux.TString); err != nil {
return err
}
col := b.cols[j].(*stringColumnBuilder)
col.data = b.alloc.GrowStrings(col.data, n)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) SetTime(i int, j int, value Time) error {
if err := b.checkCol(j, flux.TTime); err != nil {
return err
}
b.cols[j].(*timeColumnBuilder).data[i] = value
b.cols[j].SetNil(i, false)
return nil
}
func (b *ColListTableBuilder) AppendTime(j int, value Time) error {
if err := b.checkCol(j, flux.TTime); err != nil {
return err
}
col := b.cols[j].(*timeColumnBuilder)
col.data = b.alloc.AppendTimes(col.data, value)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) AppendTimes(j int, values []Time) error {
if err := b.checkCol(j, flux.TTime); err != nil {
return err
}
col := b.cols[j].(*timeColumnBuilder)
col.data = b.alloc.AppendTimes(col.data, values...)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) GrowTimes(j, n int) error {
if err := b.checkCol(j, flux.TTime); err != nil {
return err
}
col := b.cols[j].(*timeColumnBuilder)
col.data = b.alloc.GrowTimes(col.data, n)
b.nrows = len(col.data)
return nil
}
func (b *ColListTableBuilder) SetValue(i, j int, v values.Value) error {
switch v.Type() {
case semantic.Bool:
return b.SetBool(i, j, v.Bool())
case semantic.Int:
return b.SetInt(i, j, v.Int())
case semantic.UInt:
return b.SetUInt(i, j, v.UInt())
case semantic.Float:
return b.SetFloat(i, j, v.Float())
case semantic.String:
return b.SetString(i, j, v.Str())
case semantic.Time:
return b.SetTime(i, j, v.Time())
default:
panic(fmt.Errorf("unexpected value type %v", v.Type()))
}
}
func (b *ColListTableBuilder) AppendValue(j int, v values.Value) error {
switch v.Type() {
case semantic.Bool:
return b.AppendBool(j, v.Bool())
case semantic.Int:
return b.AppendInt(j, v.Int())
case semantic.UInt:
return b.AppendUInt(j, v.UInt())
case semantic.Float:
return b.AppendFloat(j, v.Float())
case semantic.String:
return b.AppendString(j, v.Str())
case semantic.Time:
return b.AppendTime(j, v.Time())
default:
panic(fmt.Errorf("unexpected value type %v", v.Type()))
}
}
func (b *ColListTableBuilder) SetNil(i, j int) error {
if j < 0 || j > len(b.cols) {
return fmt.Errorf("set nil: column does not exist, index out of bounds: %d", j)
}
if i < 0 || i > b.cols[j].Len() {
return fmt.Errorf("set nil: row does not exist, index out of bounds: %d", i)
}
b.cols[j].SetNil(i, true)
return nil
}
func (b *ColListTableBuilder) AppendNil(j int) error {
if j < 0 || j > len(b.cols) {
return fmt.Errorf("set nil: column does not exist, index out of bounds: %d", j)
}
typ := b.colMeta[j].Type
switch typ {
case flux.TBool:
if err := b.AppendBool(j, false); err != nil {
return err
}
case flux.TInt:
if err := b.AppendInt(j, 0); err != nil {
return err
}
case flux.TUInt:
if err := b.AppendUInt(j, 0); err != nil {
return err
}
case flux.TFloat:
if err := b.AppendFloat(j, 0.0); err != nil {
return err
}
case flux.TString:
if err := b.AppendString(j, ""); err != nil {
return err
}
case flux.TTime:
if err := b.AppendTime(j, 0); err != nil {
return err
}
default:
panic(fmt.Errorf("unexpected value type %v", typ))
}
return b.SetNil(b.nrows-1, j)
}
func (b *ColListTableBuilder) checkCol(j int, typ flux.ColType) error {
if j < 0 || j > len(b.cols) {
return fmt.Errorf("column does not exist, index out of bounds: %d", j)
}
CheckColType(b.colMeta[j], typ)
return nil
}
func CheckColType(col flux.ColMeta, typ flux.ColType) {
if col.Type != typ {
panic(fmt.Errorf("column %s:%s is not of type %v", col.Label, col.Type, typ))
}
}
func PanicUnknownType(typ flux.ColType) {
panic(fmt.Errorf("unknown type %v", typ))
}
func (b *ColListTableBuilder) Bools(j int) []bool {
CheckColType(b.colMeta[j], flux.TBool)
return b.cols[j].(*boolColumnBuilder).data
}
func (b *ColListTableBuilder) Ints(j int) []int64 {
CheckColType(b.colMeta[j], flux.TInt)
return b.cols[j].(*intColumnBuilder).data
}
func (b *ColListTableBuilder) UInts(j int) []uint64 {
CheckColType(b.colMeta[j], flux.TUInt)
return b.cols[j].(*uintColumnBuilder).data
}
func (b *ColListTableBuilder) Floats(j int) []float64 {
CheckColType(b.colMeta[j], flux.TFloat)
return b.cols[j].(*floatColumnBuilder).data
}
func (b *ColListTableBuilder) Strings(j int) []string {
meta := b.colMeta[j]
CheckColType(meta, flux.TString)
return b.cols[j].(*stringColumnBuilder).data
}
func (b *ColListTableBuilder) Times(j int) []values.Time {
CheckColType(b.colMeta[j], flux.TTime)
return b.cols[j].(*timeColumnBuilder).data
}
// GetRow takes a row index and returns the record located at that index in the cache
func (b *ColListTableBuilder) GetRow(row int) values.Object {
record := values.NewObject()
var val values.Value
for j, col := range b.colMeta {
switch col.Type {
case flux.TBool:
val = values.NewBool(b.cols[j].(*boolColumnBuilder).data[row])
case flux.TInt:
val = values.NewInt(b.cols[j].(*intColumnBuilder).data[row])
case flux.TUInt:
val = values.NewUInt(b.cols[j].(*uintColumnBuilder).data[row])
case flux.TFloat:
val = values.NewFloat(b.cols[j].(*floatColumnBuilder).data[row])
case flux.TString:
val = values.NewString(b.cols[j].(*stringColumnBuilder).data[row])
case flux.TTime:
val = values.NewTime(b.cols[j].(*timeColumnBuilder).data[row])
}
record.Set(col.Label, val)
}
return record
}
func (b *ColListTableBuilder) Table() (flux.Table, error) {
// Create copy in mutable state
cols := make([]column, len(b.cols))
for i, cb := range b.cols {
cols[i] = cb.Copy()
}
return &ColListTable{
key: b.key,
colMeta: b.colMeta,
cols: cols,
nrows: b.nrows,
}, nil
}
// SliceColumns iterates over each column of b and re-slices them to the range
// [start:stop].
func (b *ColListTableBuilder) SliceColumns(start, stop int) error {
if start < 0 || start > stop {
return fmt.Errorf("invalid start/stop parameters: %d/%d", start, stop)
}
if stop < start || stop > b.nrows {
return fmt.Errorf("invalid start/stop parameters: %d/%d", start, stop)
}
for i, c := range b.cols {
switch c.Meta().Type {
case flux.TBool:
col := b.cols[i].(*boolColumnBuilder)
col.data = col.data[start:stop]
case flux.TInt:
col := b.cols[i].(*intColumnBuilder)
col.data = col.data[start:stop]
case flux.TUInt:
col := b.cols[i].(*uintColumnBuilder)
col.data = col.data[start:stop]
case flux.TFloat:
col := b.cols[i].(*floatColumnBuilder)
col.data = col.data[start:stop]
case flux.TString:
col := b.cols[i].(*stringColumnBuilder)
col.data = col.data[start:stop]
case flux.TTime:
col := b.cols[i].(*timeColumnBuilder)
col.data = col.data[start:stop]
default:
panic(fmt.Errorf("unexpected column type %v", c.Meta().Type))
}
b.nrows = stop - start
}
return nil
}
func (b *ColListTableBuilder) ClearData() {
for _, c := range b.cols {
c.Clear()
}
b.nrows = 0
}
func (b *ColListTableBuilder) Sort(cols []string, desc bool) {
colIdxs := make([]int, len(cols))
for i, label := range cols {
for j, c := range b.colMeta {
if c.Label == label {
colIdxs[i] = j
break
}
}
}
s := colListTableSorter{cols: colIdxs, desc: desc, b: b}
sort.Sort(s)
}
// ColListTable implements Table using list of columns.
// All data for the table is stored in RAM.
// As a result At* methods are provided directly on the table for easy access.
type ColListTable struct {
key flux.GroupKey
colMeta []flux.ColMeta
cols []column
nrows int
refCount int32
}
func (t *ColListTable) RefCount(n int) {
c := atomic.AddInt32(&t.refCount, int32(n))
if c == 0 {
for _, c := range t.cols {
c.Clear()
}
}
}
func (t *ColListTable) Key() flux.GroupKey {
return t.key
}
func (t *ColListTable) Cols() []flux.ColMeta {
return t.colMeta
}
func (t *ColListTable) Empty() bool {
return t.nrows == 0
}
func (t *ColListTable) NRows() int {
return t.nrows
}
func (t *ColListTable) Statistics() flux.Statistics {
return flux.Statistics{}
}
func (t *ColListTable) Len() int {
return t.nrows
}
func (t *ColListTable) Do(f func(flux.ColReader) error) error {
return t.DoArrow(func(cr flux.ArrowColReader) error {
return f(arrow.ColReader(cr))
})
}
func (t *ColListTable) DoArrow(f func(flux.ArrowColReader) error) error {
return f(t)
}
func (t *ColListTable) Bools(j int) *array.Boolean {
CheckColType(t.colMeta[j], flux.TBool)
return t.cols[j].(*boolColumn).data
}
func (t *ColListTable) Ints(j int) *array.Int64 {
CheckColType(t.colMeta[j], flux.TInt)
return t.cols[j].(*intColumn).data
}
func (t *ColListTable) UInts(j int) *array.Uint64 {
CheckColType(t.colMeta[j], flux.TUInt)
return t.cols[j].(*uintColumn).data
}
func (t *ColListTable) Floats(j int) *array.Float64 {
CheckColType(t.colMeta[j], flux.TFloat)
return t.cols[j].(*floatColumn).data
}
func (t *ColListTable) Strings(j int) *array.Binary {
meta := t.colMeta[j]
CheckColType(meta, flux.TString)
return t.cols[j].(*stringColumn).data
}
func (t *ColListTable) Times(j int) *array.Int64 {
CheckColType(t.colMeta[j], flux.TTime)
return t.cols[j].(*timeColumn).data
}
func (t *ColListTable) Copy() *ColListTable {
cpy := new(ColListTable)
cpy.key = t.key
cpy.nrows = t.nrows
cpy.colMeta = make([]flux.ColMeta, len(t.colMeta))
copy(cpy.colMeta, t.colMeta)
cpy.cols = make([]column, len(t.cols))
for i, c := range t.cols {
cpy.cols[i] = c.Copy()
}
return cpy
}
// GetRow takes a row index and returns the record located at that index in the cache
func (t *ColListTable) GetRow(row int) values.Object {
record := values.NewObject()
var val values.Value
for j, col := range t.colMeta {
switch col.Type {
case flux.TBool:
val = values.NewBool(t.cols[j].(*boolColumnBuilder).data[row])
case flux.TInt:
val = values.NewInt(t.cols[j].(*intColumnBuilder).data[row])
case flux.TUInt:
val = values.NewUInt(t.cols[j].(*uintColumnBuilder).data[row])
case flux.TFloat:
val = values.NewFloat(t.cols[j].(*floatColumnBuilder).data[row])
case flux.TString:
val = values.NewString(t.cols[j].(*stringColumnBuilder).data[row])
case flux.TTime:
val = values.NewTime(t.cols[j].(*timeColumnBuilder).data[row])
}
record.Set(col.Label, val)
}
return record
}
type colListTableSorter struct {
cols []int
desc bool
b *ColListTableBuilder
}
func (c colListTableSorter) Len() int {
return c.b.nrows
}
func (c colListTableSorter) Less(x int, y int) (less bool) {
for _, j := range c.cols {
if !c.b.cols[j].Equal(x, y) {
less = c.b.cols[j].Less(x, y)
break
}
}
if c.desc {
less = !less
}
return
}
func (c colListTableSorter) Swap(x int, y int) {
for _, col := range c.b.cols {
col.Swap(x, y)
}
}
type column interface {
Meta() flux.ColMeta
Clear()
Copy() column
}
type columnBuilder interface {
Meta() flux.ColMeta
Clear()
Copy() column
Len() int
SetNil(i int, isNil bool)
Equal(i, j int) bool
Less(i, j int) bool
Swap(i, j int)
}
type boolColumn struct {
flux.ColMeta
data *array.Boolean
}
func (c *boolColumn) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *boolColumn) Clear() {
if c.data != nil {
c.data.Release()
c.data = nil
}
}
func (c *boolColumn) Copy() column {
c.data.Retain()
return &boolColumn{
ColMeta: c.ColMeta,
data: c.data,
}
}
type boolColumnBuilder struct {
flux.ColMeta
data []bool
nils map[int]bool
alloc *Allocator
}
func (c *boolColumnBuilder) SetNil(i int, isNil bool) {
if isNil {
c.nils[i] = isNil
} else {
delete(c.nils, i)
}
}
func (c *boolColumnBuilder) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *boolColumnBuilder) Clear() {
c.alloc.Free(len(c.data), boolSize)
c.data = c.data[0:0]
}
func (c *boolColumnBuilder) Copy() column {
var data *array.Boolean
if len(c.nils) > 0 {
b := arrow.NewBoolBuilder(c.alloc.Allocator)
b.Reserve(len(c.data))
for i, v := range c.data {
if c.nils[i] {
b.UnsafeAppendBoolToBitmap(false)
continue
}
b.UnsafeAppend(v)
}
data = b.NewBooleanArray()
b.Release()
} else {
data = arrow.NewBool(c.data, c.alloc.Allocator)
}
col := &boolColumn{
ColMeta: c.ColMeta,
data: data,
}
return col
}
func (c *boolColumnBuilder) Len() int {
return len(c.data)
}
func (c *boolColumnBuilder) Equal(i, j int) bool {
return c.data[i] == c.data[j]
}
func (c *boolColumnBuilder) Less(i, j int) bool {
if c.data[i] == c.data[j] {
return false
}
return c.data[i]
}
func (c *boolColumnBuilder) Swap(i, j int) {
c.data[i], c.data[j] = c.data[j], c.data[i]
}
type intColumn struct {
flux.ColMeta
data *array.Int64
}
func (c *intColumn) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *intColumn) Clear() {
if c.data != nil {
c.data.Release()
c.data = nil
}
}
func (c *intColumn) Copy() column {
c.data.Retain()
return &intColumn{
ColMeta: c.ColMeta,
data: c.data,
}
}
type intColumnBuilder struct {
flux.ColMeta
data []int64
nils map[int]bool
alloc *Allocator
}
func (c *intColumnBuilder) SetNil(i int, isNil bool) {
if isNil {
c.nils[i] = isNil
} else {
delete(c.nils, i)
}
}
func (c *intColumnBuilder) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *intColumnBuilder) Clear() {
c.alloc.Free(len(c.data), int64Size)
c.data = c.data[0:0]
}
func (c *intColumnBuilder) Copy() column {
var data *array.Int64
if len(c.nils) > 0 {
b := arrow.NewIntBuilder(c.alloc.Allocator)
b.Reserve(len(c.data))
for i, v := range c.data {
if c.nils[i] {
b.UnsafeAppendBoolToBitmap(false)
continue
}
b.UnsafeAppend(v)
}
data = b.NewInt64Array()
b.Release()
} else {
data = arrow.NewInt(c.data, c.alloc.Allocator)
}
col := &intColumn{
ColMeta: c.ColMeta,
data: data,
}
return col
}
func (c *intColumnBuilder) Len() int {
return len(c.data)
}
func (c *intColumnBuilder) Equal(i, j int) bool {
return c.data[i] == c.data[j]
}
func (c *intColumnBuilder) Less(i, j int) bool {
return c.data[i] < c.data[j]
}
func (c *intColumnBuilder) Swap(i, j int) {
c.data[i], c.data[j] = c.data[j], c.data[i]
}
type uintColumn struct {
flux.ColMeta
data *array.Uint64
}
func (c *uintColumn) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *uintColumn) Clear() {
if c.data != nil {
c.data.Release()
c.data = nil
}
}
func (c *uintColumn) Copy() column {
c.data.Retain()
return &uintColumn{
ColMeta: c.ColMeta,
data: c.data,
}
}
type uintColumnBuilder struct {
flux.ColMeta
data []uint64
nils map[int]bool
alloc *Allocator
}
func (c *uintColumnBuilder) SetNil(i int, isNil bool) {
if isNil {
c.nils[i] = isNil
} else {
delete(c.nils, i)
}
}
func (c *uintColumnBuilder) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *uintColumnBuilder) Clear() {
c.alloc.Free(len(c.data), uint64Size)
c.data = c.data[0:0]
}
func (c *uintColumnBuilder) Copy() column {
var data *array.Uint64
if len(c.nils) > 0 {
b := arrow.NewUintBuilder(c.alloc.Allocator)
b.Reserve(len(c.data))
for i, v := range c.data {
if c.nils[i] {
b.UnsafeAppendBoolToBitmap(false)
continue
}
b.UnsafeAppend(v)
}
data = b.NewUint64Array()
b.Release()
} else {
data = arrow.NewUint(c.data, c.alloc.Allocator)
}
col := &uintColumn{
ColMeta: c.ColMeta,
data: data,
}
return col
}
func (c *uintColumnBuilder) Len() int {
return len(c.data)
}
func (c *uintColumnBuilder) Equal(i, j int) bool {
return c.data[i] == c.data[j]
}
func (c *uintColumnBuilder) Less(i, j int) bool {
return c.data[i] < c.data[j]
}
func (c *uintColumnBuilder) Swap(i, j int) {
c.data[i], c.data[j] = c.data[j], c.data[i]
}
type floatColumn struct {
flux.ColMeta
data *array.Float64
}
func (c *floatColumn) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *floatColumn) Clear() {
if c.data != nil {
c.data.Release()
c.data = nil
}
}
func (c *floatColumn) Copy() column {
c.data.Retain()
return &floatColumn{
ColMeta: c.ColMeta,
data: c.data,
}
}
type floatColumnBuilder struct {
flux.ColMeta
data []float64
nils map[int]bool
alloc *Allocator
}
func (c *floatColumnBuilder) SetNil(i int, isNil bool) {
if isNil {
c.nils[i] = isNil
} else {
delete(c.nils, i)
}
}
func (c *floatColumnBuilder) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *floatColumnBuilder) Clear() {
c.alloc.Free(len(c.data), float64Size)
c.data = c.data[0:0]
}
func (c *floatColumnBuilder) Copy() column {
var data *array.Float64
if len(c.nils) > 0 {
b := arrow.NewFloatBuilder(c.alloc.Allocator)
b.Reserve(len(c.data))
for i, v := range c.data {
if c.nils[i] {
b.UnsafeAppendBoolToBitmap(false)
continue
}
b.UnsafeAppend(v)
}
data = b.NewFloat64Array()
b.Release()
} else {
data = arrow.NewFloat(c.data, c.alloc.Allocator)
}
col := &floatColumn{
ColMeta: c.ColMeta,
data: data,
}
return col
}
func (c *floatColumnBuilder) Len() int {
return len(c.data)
}
func (c *floatColumnBuilder) Equal(i, j int) bool {
return c.data[i] == c.data[j]
}
func (c *floatColumnBuilder) Less(i, j int) bool {
return c.data[i] < c.data[j]
}
func (c *floatColumnBuilder) Swap(i, j int) {
c.data[i], c.data[j] = c.data[j], c.data[i]
}
type stringColumn struct {
flux.ColMeta
data *array.Binary
}
func (c *stringColumn) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *stringColumn) Clear() {
if c.data != nil {
c.data.Release()
c.data = nil
}
}
func (c *stringColumn) Copy() column {
c.data.Retain()
return &stringColumn{
ColMeta: c.ColMeta,
data: c.data,
}
}
type stringColumnBuilder struct {
flux.ColMeta
data []string
nils map[int]bool
alloc *Allocator
}
func (c *stringColumnBuilder) SetNil(i int, isNil bool) {
if isNil {
c.nils[i] = isNil
} else {
delete(c.nils, i)
}
}
func (c *stringColumnBuilder) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *stringColumnBuilder) Clear() {
c.alloc.Free(len(c.data), stringSize)
c.data = c.data[0:0]
}
func (c *stringColumnBuilder) Copy() column {
var data *array.Binary
if len(c.nils) > 0 {
b := arrow.NewStringBuilder(c.alloc.Allocator)
b.Reserve(len(c.data))
for i, v := range c.data {
if c.nils[i] {
b.UnsafeAppendBoolToBitmap(false)
continue
}
b.AppendString(v)
}
data = b.NewBinaryArray()
b.Release()
} else {
data = arrow.NewString(c.data, c.alloc.Allocator)
}
col := &stringColumn{
ColMeta: c.ColMeta,
data: data,
}
return col
}
func (c *stringColumnBuilder) Len() int {
return len(c.data)
}
func (c *stringColumnBuilder) Equal(i, j int) bool {
return c.data[i] == c.data[j]
}
func (c *stringColumnBuilder) Less(i, j int) bool {
return c.data[i] < c.data[j]
}
func (c *stringColumnBuilder) Swap(i, j int) {
c.data[i], c.data[j] = c.data[j], c.data[i]
}
type timeColumn struct {
flux.ColMeta
data *array.Int64
}
func (c *timeColumn) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *timeColumn) Clear() {
if c.data != nil {
c.data.Release()
c.data = nil
}
}
func (c *timeColumn) Copy() column {
c.data.Retain()
return &timeColumn{
ColMeta: c.ColMeta,
data: c.data,
}
}
type timeColumnBuilder struct {
flux.ColMeta
data []Time
nils map[int]bool
alloc *Allocator
}
func (c *timeColumnBuilder) SetNil(i int, isNil bool) {
if isNil {
c.nils[i] = isNil
} else {
delete(c.nils, i)
}
}
func (c *timeColumnBuilder) Meta() flux.ColMeta {
return c.ColMeta
}
func (c *timeColumnBuilder) Clear() {
c.alloc.Free(len(c.data), timeSize)
c.data = c.data[0:0]
}
func (c *timeColumnBuilder) Copy() column {
b := arrow.NewIntBuilder(c.alloc.Allocator)
b.Reserve(len(c.data))
for i, v := range c.data {
if c.nils[i] {
b.UnsafeAppendBoolToBitmap(false)
continue
}
b.UnsafeAppend(int64(v))
}
col := &timeColumn{
ColMeta: c.ColMeta,
data: b.NewInt64Array(),
}
b.Release()
return col
}
func (c *timeColumnBuilder) Len() int {
return len(c.data)
}
func (c *timeColumnBuilder) Equal(i, j int) bool {
return c.data[i] == c.data[j]
}
func (c *timeColumnBuilder) Less(i, j int) bool {
return c.data[i] < c.data[j]
}
func (c *timeColumnBuilder) Swap(i, j int) {
c.data[i], c.data[j] = c.data[j], c.data[i]
}
type TableBuilderCache interface {
// TableBuilder returns an existing or new TableBuilder for the given meta data.
// The boolean return value indicates if TableBuilder is new.
TableBuilder(key flux.GroupKey) (TableBuilder, bool)
ForEachBuilder(f func(flux.GroupKey, TableBuilder))
}
type tableBuilderCache struct {
tables *GroupLookup
alloc *memory.Allocator
triggerSpec flux.TriggerSpec
}
func NewTableBuilderCache(a *memory.Allocator) *tableBuilderCache {
return &tableBuilderCache{
tables: NewGroupLookup(),
alloc: a,
}
}
type tableState struct {
builder TableBuilder
trigger Trigger
}
func (d *tableBuilderCache) SetTriggerSpec(ts flux.TriggerSpec) {
d.triggerSpec = ts
}
func (d *tableBuilderCache) Table(key flux.GroupKey) (flux.Table, error) {
b, ok := d.lookupState(key)
if !ok {
return nil, fmt.Errorf("table not found with key %v", key)
}
return b.builder.Table()
}
func (d *tableBuilderCache) lookupState(key flux.GroupKey) (tableState, bool) {
v, ok := d.tables.Lookup(key)
if !ok {
return tableState{}, false
}
return v.(tableState), true
}
// TableBuilder will return the builder for the specified table.
// If no builder exists, one will be created.
func (d *tableBuilderCache) TableBuilder(key flux.GroupKey) (TableBuilder, bool) {
b, ok := d.lookupState(key)
if !ok {
builder := NewColListTableBuilder(key, d.alloc)
t := NewTriggerFromSpec(d.triggerSpec)
b = tableState{
builder: builder,
trigger: t,
}
d.tables.Set(key, b)
}
return b.builder, !ok
}
func (d *tableBuilderCache) ForEachBuilder(f func(flux.GroupKey, TableBuilder)) {
d.tables.Range(func(key flux.GroupKey, value interface{}) {
f(key, value.(tableState).builder)
})
}
func (d *tableBuilderCache) DiscardTable(key flux.GroupKey) {
b, ok := d.lookupState(key)
if ok {
b.builder.ClearData()
}
}
func (d *tableBuilderCache) ExpireTable(key flux.GroupKey) {
b, ok := d.tables.Delete(key)
if ok {
b.(tableState).builder.ClearData()
}
}
func (d *tableBuilderCache) ForEach(f func(flux.GroupKey)) {
d.tables.Range(func(key flux.GroupKey, value interface{}) {
f(key)
})
}
func (d *tableBuilderCache) ForEachWithContext(f func(flux.GroupKey, Trigger, TableContext)) {
d.tables.Range(func(key flux.GroupKey, value interface{}) {
b := value.(tableState)
f(key, b.trigger, TableContext{
Key: key,
Count: b.builder.NRows(),
})
})
}
| 1 | 9,477 | This is so useful and safety, I want to propose making AppendBool, AppendInt, etc. all private functions, and forcing us to use AppendValue(j, values.New(false)), etc. | influxdata-flux | go |
@@ -140,7 +140,9 @@ export function useImperativeHandle(ref, createHandle, args) {
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._args = args;
- ref.current = createHandle();
+ if (ref) {
+ ref.current = createHandle();
+ }
}
}
| 1 | import { options } from 'preact';
/** @type {number} */
let currentIndex;
/** @type {import('./internal').Component} */
let currentComponent;
/** @type {Array<import('./internal').Component>} */
let afterPaintEffects = [];
let oldBeforeRender = options.render;
options.render = vnode => {
if (oldBeforeRender) oldBeforeRender(vnode);
currentComponent = vnode._component;
currentIndex = 0;
if (currentComponent.__hooks) {
currentComponent.__hooks._pendingEffects = handleEffects(currentComponent.__hooks._pendingEffects);
}
};
let oldAfterDiff = options.diffed;
options.diffed = vnode => {
if (oldAfterDiff) oldAfterDiff(vnode);
const c = vnode._component;
if (!c) return;
const hooks = c.__hooks;
if (hooks) {
hooks._pendingLayoutEffects = handleEffects(hooks._pendingLayoutEffects);
}
};
let oldBeforeUnmount = options.unmount;
options.unmount = vnode => {
if (oldBeforeUnmount) oldBeforeUnmount(vnode);
const c = vnode._component;
if (!c) return;
const hooks = c.__hooks;
if (hooks) {
hooks._list.forEach(hook => hook._cleanup && hook._cleanup());
}
};
/**
* Get a hook's state from the currentComponent
* @param {number} index The index of the hook to get
* @returns {import('./internal').HookState}
*/
function getHookState(index) {
if (options.hook) options.hook(currentComponent);
// Largely inspired by:
// * https://github.com/michael-klein/funcy.js/blob/f6be73468e6ec46b0ff5aa3cc4c9baf72a29025a/src/hooks/core_hooks.mjs
// * https://github.com/michael-klein/funcy.js/blob/650beaa58c43c33a74820a3c98b3c7079cf2e333/src/renderer.mjs
// Other implementations to look at:
// * https://codesandbox.io/s/mnox05qp8
const hooks = currentComponent.__hooks || (currentComponent.__hooks = { _list: [], _pendingEffects: [], _pendingLayoutEffects: [] });
if (index >= hooks._list.length) {
hooks._list.push({});
}
return hooks._list[index];
}
export function useState(initialState) {
return useReducer(invokeOrReturn, initialState);
}
export function useReducer(reducer, initialState, init) {
/** @type {import('./internal').ReducerHookState} */
const hookState = getHookState(currentIndex++);
if (!hookState._component) {
hookState._component = currentComponent;
hookState._value = [
!init ? invokeOrReturn(null, initialState) : init(initialState),
action => {
const nextValue = reducer(hookState._value[0], action);
if (hookState._value[0]!==nextValue) {
hookState._value[0] = nextValue;
hookState._component.setState({});
}
}
];
}
return hookState._value;
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentComponent.__hooks._pendingEffects.push(state);
afterPaint(currentComponent);
}
}
/**
* @param {import('./internal').Effect} callback
* @param {any[]} args
*/
export function useLayoutEffect(callback, args) {
/** @type {import('./internal').EffectHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._value = callback;
state._args = args;
currentComponent.__hooks._pendingLayoutEffects.push(state);
}
}
export function useRef(initialValue) {
const state = getHookState(currentIndex++);
if (!state._value) {
state._value = { current: initialValue };
}
return state._value;
}
export function useImperativeHandle(ref, createHandle, args) {
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._args = args;
ref.current = createHandle();
}
}
/**
* @param {() => any} callback
* @param {any[]} args
*/
export function useMemo(callback, args) {
/** @type {import('./internal').MemoHookState} */
const state = getHookState(currentIndex++);
if (argsChanged(state._args, args)) {
state._args = args;
state._callback = callback;
return state._value = callback();
}
return state._value;
}
/**
* @param {() => void} callback
* @param {any[]} args
*/
export function useCallback(callback, args) {
return useMemo(() => callback, args);
}
/**
* @param {import('./internal').PreactContext} context
*/
export function useContext(context) {
const provider = currentComponent.context[context._id];
if (!provider) return context._defaultValue;
const state = getHookState(currentIndex++);
// This is probably not safe to convert to "!"
if (state._value == null) {
state._value = true;
provider.sub(currentComponent);
}
return provider.props.value;
}
/**
* Display a custom label for a custom hook for the devtools panel
* @type {<T>(value: T, cb?: (value: T) => string | number) => void}
*/
export function useDebugValue(value, formatter) {
if (options.useDebugValue) {
options.useDebugValue(formatter ? formatter(value) : value);
}
}
// Note: if someone used Component.debounce = requestAnimationFrame,
// then effects will ALWAYS run on the NEXT frame instead of the current one, incurring a ~16ms delay.
// Perhaps this is not such a big deal.
/**
* Invoke a component's pending effects after the next frame renders
* @type {(component: import('./internal').Component) => void}
*/
/* istanbul ignore next */
let afterPaint = () => {};
/**
* After paint effects consumer.
*/
function flushAfterPaintEffects() {
afterPaintEffects.some(component => {
component._afterPaintQueued = false;
if (component._parentDom) {
component.__hooks._pendingEffects = handleEffects(component.__hooks._pendingEffects);
}
});
afterPaintEffects = [];
}
function scheduleFlushAfterPaint() {
setTimeout(flushAfterPaintEffects);
}
/* istanbul ignore else */
if (typeof window !== 'undefined') {
afterPaint = (component) => {
if (!component._afterPaintQueued && (component._afterPaintQueued = true) && afterPaintEffects.push(component) === 1) {
/* istanbul ignore next */
if (options.requestAnimationFrame) {
options.requestAnimationFrame(flushAfterPaintEffects);
}
else {
requestAnimationFrame(scheduleFlushAfterPaint);
}
}
};
}
function handleEffects(effects) {
effects.forEach(invokeCleanup);
effects.forEach(invokeEffect);
return [];
}
function invokeCleanup(hook) {
if (hook._cleanup) hook._cleanup();
}
/**
* Invoke a Hook's effect
* @param {import('./internal').EffectHookState} hook
*/
function invokeEffect(hook) {
const result = hook._value();
if (typeof result === 'function') hook._cleanup = result;
}
function argsChanged(oldArgs, newArgs) {
return !oldArgs || newArgs.some((arg, index) => arg !== oldArgs[index]);
}
function invokeOrReturn(arg, f) {
return typeof f === 'function' ? f(arg) : f;
}
| 1 | 13,582 | Really, really small nit I believe there's 3x tabs in here? And should it be just 2x? | preactjs-preact | js |
@@ -748,6 +748,17 @@ Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
Default: false,
Advanced: true,
+ }, {
+ Name: "leave_parts_on_error",
+ Provider: "AWS",
+ Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery.
+
+It should be set to true for resuming uploads across different sessions.
+
+WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up.
+`,
+ Default: false,
+ Advanced: true,
}},
})
} | 1 | // Package s3 provides an interface to Amazon S3 oject storage
package s3
// FIXME need to prevent anything but ListDir working for s3://
/*
Progress of port to aws-sdk
* Don't really need o.meta at all?
What happens if you CTRL-C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
*/
import (
"context"
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/swift"
"github.com/pkg/errors"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your S3 provider.",
Examples: []fs.OptionExample{{
Value: "AWS",
Help: "Amazon Web Services (AWS) S3",
}, {
Value: "Alibaba",
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
}, {
Value: "Ceph",
Help: "Ceph Object Storage",
}, {
Value: "DigitalOcean",
Help: "Digital Ocean Spaces",
}, {
Value: "Dreamhost",
Help: "Dreamhost DreamObjects",
}, {
Value: "IBMCOS",
Help: "IBM COS S3",
}, {
Value: "Minio",
Help: "Minio Object Storage",
}, {
Value: "Netease",
Help: "Netease Object Storage (NOS)",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
}, {
Value: "Other",
Help: "Any other S3 compatible provider",
}},
}, {
Name: "env_auth",
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter AWS credentials in the next step",
}, {
Value: "true",
Help: "Get AWS credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "region",
Help: "Region to connect to.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}},
}, {
Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
}, {
Value: "other-v2-signature",
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
Provider: "AWS",
}, {
Name: "endpoint",
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
Provider: "IBMCOS",
Examples: []fs.OptionExample{{
Value: "s3-api.us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Endpoint",
}, {
Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Dallas Endpoint",
}, {
Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Washington DC Endpoint",
}, {
Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
Help: "US Cross Region San Jose Endpoint",
}, {
Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Private Endpoint",
}, {
Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Dallas Private Endpoint",
}, {
Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Washington DC Private Endpoint",
}, {
Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region San Jose Private Endpoint",
}, {
Value: "s3.us-east.objectstorage.softlayer.net",
Help: "US Region East Endpoint",
}, {
Value: "s3.us-east.objectstorage.service.networklayer.com",
Help: "US Region East Private Endpoint",
}, {
Value: "s3.us-south.objectstorage.softlayer.net",
Help: "US Region South Endpoint",
}, {
Value: "s3.us-south.objectstorage.service.networklayer.com",
Help: "US Region South Private Endpoint",
}, {
Value: "s3.eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Endpoint",
}, {
Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Frankfurt Endpoint",
}, {
Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Milan Endpoint",
}, {
Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Amsterdam Endpoint",
}, {
Value: "s3.eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Private Endpoint",
}, {
Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Frankfurt Private Endpoint",
}, {
Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Milan Private Endpoint",
}, {
Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Amsterdam Private Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.softlayer.net",
Help: "Great Britain Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
Help: "Great Britain Private Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Endpoint",
}, {
Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Tokyo Endpoint",
}, {
Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional HongKong Endpoint",
}, {
Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Seoul Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Private Endpoint",
}, {
Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Tokyo Private Endpoint",
}, {
Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional HongKong Private Endpoint",
}, {
Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Seoul Private Endpoint",
}, {
Value: "s3.mel01.objectstorage.softlayer.net",
Help: "Melbourne Single Site Endpoint",
}, {
Value: "s3.mel01.objectstorage.service.networklayer.com",
Help: "Melbourne Single Site Private Endpoint",
}, {
Value: "s3.tor01.objectstorage.softlayer.net",
Help: "Toronto Single Site Endpoint",
}, {
Value: "s3.tor01.objectstorage.service.networklayer.com",
Help: "Toronto Single Site Private Endpoint",
}},
}, {
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
Name: "endpoint",
Help: "Endpoint for OSS API.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "oss-cn-hangzhou.aliyuncs.com",
Help: "East China 1 (Hangzhou)",
}, {
Value: "oss-cn-shanghai.aliyuncs.com",
Help: "East China 2 (Shanghai)",
}, {
Value: "oss-cn-qingdao.aliyuncs.com",
Help: "North China 1 (Qingdao)",
}, {
Value: "oss-cn-beijing.aliyuncs.com",
Help: "North China 2 (Beijing)",
}, {
Value: "oss-cn-zhangjiakou.aliyuncs.com",
Help: "North China 3 (Zhangjiakou)",
}, {
Value: "oss-cn-huhehaote.aliyuncs.com",
Help: "North China 5 (Huhehaote)",
}, {
Value: "oss-cn-shenzhen.aliyuncs.com",
Help: "South China 1 (Shenzhen)",
}, {
Value: "oss-cn-hongkong.aliyuncs.com",
Help: "Hong Kong (Hong Kong)",
}, {
Value: "oss-us-west-1.aliyuncs.com",
Help: "US West 1 (Silicon Valley)",
}, {
Value: "oss-us-east-1.aliyuncs.com",
Help: "US East 1 (Virginia)",
}, {
Value: "oss-ap-southeast-1.aliyuncs.com",
Help: "Southeast Asia Southeast 1 (Singapore)",
}, {
Value: "oss-ap-southeast-2.aliyuncs.com",
Help: "Asia Pacific Southeast 2 (Sydney)",
}, {
Value: "oss-ap-southeast-3.aliyuncs.com",
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
}, {
Value: "oss-ap-southeast-5.aliyuncs.com",
Help: "Asia Pacific Southeast 5 (Jakarta)",
}, {
Value: "oss-ap-northeast-1.aliyuncs.com",
Help: "Asia Pacific Northeast 1 (Japan)",
}, {
Value: "oss-ap-south-1.aliyuncs.com",
Help: "Asia Pacific South 1 (Mumbai)",
}, {
Value: "oss-eu-central-1.aliyuncs.com",
Help: "Central Europe 1 (Frankfurt)",
}, {
Value: "oss-eu-west-1.aliyuncs.com",
Help: "West Europe (London)",
}, {
Value: "oss-me-east-1.aliyuncs.com",
Help: "Middle East 1 (Dubai)",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,Alibaba",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
Provider: "Dreamhost",
}, {
Value: "nyc3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces New York 3",
Provider: "DigitalOcean",
}, {
Value: "ams3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Amsterdam 3",
Provider: "DigitalOcean",
}, {
Value: "sgp1.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
Provider: "Wasabi",
}, {
Value: "s3.us-west-1.wasabisys.com",
Help: "Wasabi US West endpoint",
Provider: "Wasabi",
}, {
Value: "s3.eu-central-1.wasabisys.com",
Help: "Wasabi EU Central endpoint",
Provider: "Wasabi",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region.",
}, {
Value: "eu-west-2",
Help: "EU (London) Region.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region.",
}, {
Value: "EU",
Help: "EU Region.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
Provider: "IBMCOS",
Examples: []fs.OptionExample{{
Value: "us-standard",
Help: "US Cross Region Standard",
}, {
Value: "us-vault",
Help: "US Cross Region Vault",
}, {
Value: "us-cold",
Help: "US Cross Region Cold",
}, {
Value: "us-flex",
Help: "US Cross Region Flex",
}, {
Value: "us-east-standard",
Help: "US East Region Standard",
}, {
Value: "us-east-vault",
Help: "US East Region Vault",
}, {
Value: "us-east-cold",
Help: "US East Region Cold",
}, {
Value: "us-east-flex",
Help: "US East Region Flex",
}, {
Value: "us-south-standard",
Help: "US South Region Standard",
}, {
Value: "us-south-vault",
Help: "US South Region Vault",
}, {
Value: "us-south-cold",
Help: "US South Region Cold",
}, {
Value: "us-south-flex",
Help: "US South Region Flex",
}, {
Value: "eu-standard",
Help: "EU Cross Region Standard",
}, {
Value: "eu-vault",
Help: "EU Cross Region Vault",
}, {
Value: "eu-cold",
Help: "EU Cross Region Cold",
}, {
Value: "eu-flex",
Help: "EU Cross Region Flex",
}, {
Value: "eu-gb-standard",
Help: "Great Britain Standard",
}, {
Value: "eu-gb-vault",
Help: "Great Britain Vault",
}, {
Value: "eu-gb-cold",
Help: "Great Britain Cold",
}, {
Value: "eu-gb-flex",
Help: "Great Britain Flex",
}, {
Value: "ap-standard",
Help: "APAC Standard",
}, {
Value: "ap-vault",
Help: "APAC Vault",
}, {
Value: "ap-cold",
Help: "APAC Cold",
}, {
Value: "ap-flex",
Help: "APAC Flex",
}, {
Value: "mel01-standard",
Help: "Melbourne Standard",
}, {
Value: "mel01-vault",
Help: "Melbourne Vault",
}, {
Value: "mel01-cold",
Help: "Melbourne Cold",
}, {
Value: "mel01-flex",
Help: "Melbourne Flex",
}, {
Value: "tor01-standard",
Help: "Toronto Standard",
}, {
Value: "tor01-vault",
Help: "Toronto Vault",
}, {
Value: "tor01-cold",
Help: "Toronto Cold",
}, {
Value: "tor01-flex",
Help: "Toronto Flex",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
Provider: "!IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
Provider: "!IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
Provider: "!IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-read",
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-full-control",
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS",
}, {
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
Provider: "IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
Provider: "IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
Provider: "IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
Provider: "IBMCOS",
}},
}, {
Name: "bucket_acl",
Help: `Canned ACL used when creating buckets.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when only when creating buckets. If it
isn't set then "acl" is used instead.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
}},
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "AES256",
Help: "AES256",
}, {
Value: "aws:kms",
Help: "aws:kms",
}},
}, {
Name: "sse_kms_key_id",
Help: "If using KMS ID you must provide the ARN of Key.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "arn:aws:kms:us-east-1:*",
Help: "arn:aws:kms:*",
}},
}, {
Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "REDUCED_REDUNDANCY",
Help: "Reduced redundancy storage class",
}, {
Value: "STANDARD_IA",
Help: "Standard Infrequent Access storage class",
}, {
Value: "ONEZONE_IA",
Help: "One Zone Infrequent Access storage class",
}, {
Value: "GLACIER",
Help: "Glacier storage class",
}, {
Value: "DEEP_ARCHIVE",
Help: "Glacier Deep Archive storage class",
}, {
Value: "INTELLIGENT_TIERING",
Help: "Intelligent-Tiering storage class",
}},
}, {
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
Name: "storage_class",
Help: "The storage class to use when storing new objects in OSS.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "GLACIER",
Help: "Archive storage mode.",
}, {
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
Default: false,
Advanced: true,
}, {
Name: "session_token",
Help: "An AWS session token",
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 4,
Advanced: true,
}, {
Name: "force_path_style",
Help: `If true use path style access if false use virtual hosted style.
If this is true (the default) then rclone will use path style access,
if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg Aliyun OSS or Netease COS) require this set to false.`,
Default: true,
Advanced: true,
}, {
Name: "v2_auth",
Help: `If true use v2 authentication.
If this is false (the default) then rclone will use v4 authentication.
If it is set then rclone will use v2 authentication.
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
Default: false,
Advanced: true,
}, {
Name: "use_accelerate_endpoint",
Provider: "AWS",
Help: `If true use the AWS S3 accelerated endpoint.
See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`,
Default: false,
Advanced: true,
}},
})
}
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// Options defines the configuration for this backend
type Options struct {
Provider string `config:"provider"`
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"`
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
UploadConcurrency int `config:"upload_concurrency"`
ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"`
UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"`
}
// Fs represents a remote s3 server
type Fs struct {
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
}
// Object describes a s3 object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
mimeType string // MimeType of object - may be ""
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("S3 bucket %s", f.bucket)
}
return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = []int{
// 409, // Conflict - various states that could be resolved on a retry
503, // Service Unavailable/Slow Down - "Reduce your request rate"
}
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func (f *Fs) shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(awsError.OrigErr()) {
return true, err
}
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket()
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
}
return true, err
}
for _, e := range retryErrorCodes {
if reqErr.StatusCode() == e {
return true, err
}
}
}
}
// Ok, not an awserr, check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// Pattern to match a s3 path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a s3 'url'
func s3ParsePath(path string) (bucket, directory string, err error) {
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't parse bucket out of s3 path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
// s3Connection makes a connection to s3
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth
v := credentials.Value{
AccessKeyID: opt.AccessKeyID,
SecretAccessKey: opt.SecretAccessKey,
SessionToken: opt.SessionToken,
}
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
def := defaults.Get()
def.Config.HTTPClient = lowTimeoutClient
// first provider to supply a credential set "wins"
providers := []credentials.Provider{
// use static credentials if they're present (checked by provider)
&credentials.StaticProvider{Value: v},
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
&credentials.EnvProvider{},
// A SharedCredentialsProvider retrieves credentials
// from the current user's home directory. It checks
// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
&credentials.SharedCredentialsProvider{},
// Pick up IAM role if we're in an ECS task
defaults.RemoteCredProvider(*def.Config, def.Handlers),
// Pick up IAM role in case we're on EC2
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{
HTTPClient: lowTimeoutClient,
}),
ExpiryWindow: 3,
},
}
cred := credentials.NewChainCredentials(providers)
switch {
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case v.AccessKeyID == "" && v.SecretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
cred = credentials.AnonymousCredentials
case v.AccessKeyID == "":
return nil, nil, errors.New("access_key_id not found")
case v.SecretAccessKey == "":
return nil, nil, errors.New("secret_access_key not found")
}
if opt.Region == "" && opt.Endpoint == "" {
opt.Endpoint = "https://s3.amazonaws.com/"
}
if opt.Region == "" {
opt.Region = "us-east-1"
}
if opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.UseAccelerateEndpoint {
opt.ForcePathStyle = false
}
awsConfig := aws.NewConfig().
WithMaxRetries(maxRetries).
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle).
WithS3UseAccelerate(opt.UseAccelerateEndpoint)
if opt.Region != "" {
awsConfig.WithRegion(opt.Region)
}
if opt.Endpoint != "" {
awsConfig.WithEndpoint(opt.Endpoint)
}
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
awsSessionOpts := session.Options{
Config: *awsConfig,
}
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
awsSessionOpts.Config.Credentials = nil
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
return nil, nil, err
}
c := s3.New(ses)
if opt.V2Auth || opt.Region == "other-v2-signature" {
fs.Debugf(nil, "Using v2 auth")
signer := func(req *request.Request) {
// Ignore AnonymousCredentials object
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
c.Handlers.Sign.PushBack(signer)
}
return c, ses, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "s3: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "s3: upload cutoff")
}
bucket, directory, err := s3ParsePath(root)
if err != nil {
return nil, err
}
if opt.ACL == "" {
opt.ACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
}
c, ses, err := s3Connection(opt)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: directory,
opt: *opt,
c: c,
bucket: bucket,
ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
srv: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.root != "" {
f.root += "/"
// Check to see if the object exists
req := s3.HeadObjectInput{
Bucket: &f.bucket,
Key: &directory,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req)
return f.shouldRetry(err)
})
if err == nil {
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
// f.listMultipartUploads()
return f, nil
}
// Return an Object from a path
//
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
if info.LastModified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = *info.LastModified
}
o.etag = aws.StringValue(info.ETag)
o.bytes = aws.Int64Value(info.Size)
} else {
err := o.readMetaData(ctx) // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// Gets the bucket location
func (f *Fs) getBucketLocation() (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &f.bucket,
}
var resp *s3.GetBucketLocationOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(err)
})
if err != nil {
return "", err
}
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
}
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
func (f *Fs) updateRegionForBucket() error {
region, err := f.getBucketLocation()
if err != nil {
return errors.Wrap(err, "reading bucket location failed")
}
if aws.StringValue(f.c.Config.Endpoint) != "" {
return errors.Errorf("can't set region to %q as endpoint is set", region)
}
if aws.StringValue(f.c.Config.Region) == region {
return errors.Errorf("region is already %q - not updating", region)
}
// Make a new session with the new region
oldRegion := f.opt.Region
f.opt.Region = region
c, ses, err := s3Connection(&f.opt)
if err != nil {
return errors.Wrap(err, "creating new session failed")
}
f.c = c
f.ses = ses
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
return nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *s3.Object, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(ctx context.Context, dir string, recurse bool, fn listFn) error {
root := f.root
if dir != "" {
root += dir + "/"
}
maxKeys := int64(listChunkSize)
delimiter := ""
if !recurse {
delimiter = "/"
}
var marker *string
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &f.bucket,
Delimiter: &delimiter,
Prefix: &root,
MaxKeys: &maxKeys,
Marker: marker,
}
var resp *s3.ListObjectsOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListObjectsWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
return err
}
rootLength := len(f.root)
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix.Prefix
if !strings.HasPrefix(remote, f.root) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &s3.Object{Key: &remote}, true)
if err != nil {
return err
}
}
}
for _, object := range resp.Contents {
key := aws.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Logf(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
// is this a directory marker?
if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
remote = remote[:len(remote)-1]
err = fn(remote, &s3.Object{Key: &remote}, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil {
return err
}
}
if !aws.BoolValue(resp.IsTruncated) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
if len(resp.Contents) == 0 {
return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
}
marker = resp.Contents[len(resp.Contents)-1].Key
} else {
marker = resp.NextMarker
}
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketDeleted = false
f.bucketOKMu.Unlock()
}
}
// listDir lists files and directories to out
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(ctx, dir, false, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
req := s3.ListBucketsInput{}
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBucketsWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
for _, bucket := range resp.Buckets {
d := fs.NewDir(aws.StringValue(bucket.Name), aws.TimeValue(bucket.CreationDate))
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(ctx, dir)
}
return f.listDir(ctx, dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(ctx, dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Put the Object into the bucket
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Check if the bucket exists
//
// NB this can return incorrect results if called immediately after bucket deletion
func (f *Fs) dirExists(ctx context.Context) (bool, error) {
req := s3.HeadBucketInput{
Bucket: &f.bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
if err, ok := err.(awserr.RequestFailure); ok {
if err.StatusCode() == http.StatusNotFound {
return false, nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
if !f.bucketDeleted {
exists, err := f.dirExists(ctx)
if err == nil {
f.bucketOK = exists
}
if err != nil || exists {
return err
}
}
req := s3.CreateBucketInput{
Bucket: &f.bucket,
ACL: &f.opt.BucketACL,
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.opt.LocationConstraint,
}
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil
}
}
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
req := s3.DeleteBucketInput{
Bucket: &f.bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucketWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
fs.Infof(f, "Bucket deleted")
}
return err
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// pathEscape escapes s as for a URL path. It uses rest.URLPathEscape
// but also escapes '+' for S3 and Digital Ocean spaces compatibility
func pathEscape(s string) string {
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir(ctx, "")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
key := f.root + remote
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
req := s3.CopyObjectInput{
Bucket: &f.bucket,
ACL: &f.opt.ACL,
Key: &key,
CopySource: &source,
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
}
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
}
if f.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
}
if f.opt.StorageClass != "" {
req.StorageClass = &f.opt.StorageClass
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.CopyObjectWithContext(ctx, &req)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
hash := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(hash) {
err := o.readMetaData(ctx)
if err != nil {
return "", err
}
if md5sum, ok := o.meta[metaMD5Hash]; ok {
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
if err != nil {
return "", err
}
hash = hex.EncodeToString(md5sumBytes)
} else {
hash = ""
}
}
return hash, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.meta != nil {
return nil
}
key := o.fs.root + o.remote
req := s3.HeadObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
var resp *s3.HeadObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if resp.ContentLength != nil {
size = *resp.ContentLength
}
o.etag = aws.StringValue(resp.ETag)
o.bytes = size
o.meta = resp.Metadata
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
o.mimeType = aws.StringValue(resp.ContentType)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if fs.Config.UseServerModTime {
return o.lastModified
}
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == nil {
// fs.Debugf(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(*d)
if err != nil {
fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData(ctx)
if err != nil {
return err
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
if o.bytes >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Guess the content type
mimeType := fs.MimeType(ctx, o)
// Copy the object to itself to update the metadata
key := o.fs.root + o.remote
sourceKey := o.fs.bucket + "/" + key
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
req := s3.CopyObjectInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
ContentType: &mimeType,
CopySource: aws.String(pathEscape(sourceKey)),
Metadata: o.meta,
MetadataDirective: &directive,
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass == "GLACIER" || o.fs.opt.StorageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.CopyObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
return err
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
key := o.fs.root + o.remote
req := s3.GetObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var resp *s3.GetObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.GetObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
}
}
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update the Object from in with modTime and size
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
err := o.fs.Mkdir(ctx, "")
if err != nil {
return err
}
modTime := src.ModTime(ctx)
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var uploader *s3manager.Uploader
if multipart {
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
u.Concurrency = o.fs.opt.UploadConcurrency
u.LeavePartsOnError = false
u.S3 = o.fs.c
u.PartSize = int64(o.fs.opt.ChunkSize)
if size == -1 {
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
return
}
// Adjust PartSize until the number of parts is small enough.
if size/u.PartSize >= s3manager.MaxUploadParts {
// Calculate partition size rounded up to the nearest MB
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
}
})
}
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
}
// read the md5sum if available for non multpart and if
// disable checksum isn't present.
var md5sum string
if !multipart || !o.fs.opt.DisableChecksum {
hash, err := src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)
if err == nil {
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
if multipart {
metadata[metaMD5Hash] = &md5sum
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(ctx, src)
key := o.fs.root + o.remote
if multipart {
req := s3manager.UploadInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
Body: in,
ContentType: &mimeType,
Metadata: metadata,
//ContentLength: &size,
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = uploader.UploadWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
if err != nil {
return err
}
} else {
req := s3.PutObjectInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
ContentType: &mimeType,
Metadata: metadata,
}
if md5sum != "" {
req.ContentMD5 = &md5sum
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
// Create the request
putObj, _ := o.fs.c.PutObjectRequest(&req)
// Sign it so we can upload using a presigned request.
//
// Note the SDK doesn't currently support streaming to
// PutObject so we'll use this work-around.
url, headers, err := putObj.PresignRequest(15 * time.Minute)
if err != nil {
return errors.Wrap(err, "s3 upload: sign request")
}
// Set request to nil if empty so as not to make chunked encoding
if size == 0 {
in = nil
}
// create the vanilla http request
httpReq, err := http.NewRequest("PUT", url, in)
if err != nil {
return errors.Wrap(err, "s3 upload: new request")
}
httpReq = httpReq.WithContext(ctx)
// set the headers we signed and the length
httpReq.Header = headers
httpReq.ContentLength = size
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
if err != nil {
return o.fs.shouldRetry(err)
}
body, err := rest.ReadBody(resp)
if err != nil {
return o.fs.shouldRetry(err)
}
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
}
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
})
if err != nil {
return err
}
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData(ctx)
return err
}
// Remove an object
func (o *Object) Remove(ctx context.Context) error {
key := o.fs.root + o.remote
req := s3.DeleteObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObjectWithContext(ctx, &req)
return o.fs.shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
err := o.readMetaData(ctx)
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)
| 1 | 9,036 | Perhaps note that rclone can't do this yet? | rclone-rclone | go |
@@ -285,3 +285,14 @@ func (c *client) doRequest(
return res, requestError{}
}
+
+// ConfigureTransports converts an net/http HTTP/1 Transport to a http3.RoundTripper
+// Uses the original TLS config, if present, relying on the clone created on a
+// new connection.
+func ConfigureTransports(t1 *http.Transport) *RoundTripper {
+ roundtripper := &RoundTripper{
+ TLSClientConfig: t1.TLSClientConfig,
+ QuicConfig: &quic.Config{},
+ }
+ return roundtripper
+} | 1 | package http3
import (
"bytes"
"crypto/tls"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"sync"
"github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/internal/protocol"
"github.com/lucas-clemente/quic-go/internal/qtls"
"github.com/lucas-clemente/quic-go/internal/utils"
"github.com/marten-seemann/qpack"
)
// MethodGet0RTT allows a GET request to be sent using 0-RTT.
// Note that 0-RTT data doesn't provide replay protection.
const MethodGet0RTT = "GET_0RTT"
const (
defaultUserAgent = "quic-go HTTP/3"
defaultMaxResponseHeaderBytes = 10 * 1 << 20 // 10 MB
)
var defaultQuicConfig = &quic.Config{
MaxIncomingStreams: -1, // don't allow the server to create bidirectional streams
KeepAlive: true,
Versions: []protocol.VersionNumber{protocol.VersionTLS},
}
var dialAddr = quic.DialAddrEarly
type roundTripperOpts struct {
DisableCompression bool
MaxHeaderBytes int64
}
// client is a HTTP3 client doing requests
type client struct {
tlsConf *tls.Config
config *quic.Config
opts *roundTripperOpts
dialOnce sync.Once
dialer func(network, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlySession, error)
handshakeErr error
requestWriter *requestWriter
decoder *qpack.Decoder
hostname string
session quic.EarlySession
logger utils.Logger
}
func newClient(
hostname string,
tlsConf *tls.Config,
opts *roundTripperOpts,
quicConfig *quic.Config,
dialer func(network, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlySession, error),
) (*client, error) {
if quicConfig == nil {
quicConfig = defaultQuicConfig
} else if len(quicConfig.Versions) == 0 {
quicConfig = quicConfig.Clone()
quicConfig.Versions = []quic.VersionNumber{defaultQuicConfig.Versions[0]}
}
if len(quicConfig.Versions) != 1 {
return nil, errors.New("can only use a single QUIC version for dialing a HTTP/3 connection")
}
quicConfig.MaxIncomingStreams = -1 // don't allow any bidirectional streams
logger := utils.DefaultLogger.WithPrefix("h3 client")
if tlsConf == nil {
tlsConf = &tls.Config{}
} else {
tlsConf = tlsConf.Clone()
}
// Replace existing ALPNs by H3
tlsConf.NextProtos = []string{versionToALPN(quicConfig.Versions[0])}
return &client{
hostname: authorityAddr("https", hostname),
tlsConf: tlsConf,
requestWriter: newRequestWriter(logger),
decoder: qpack.NewDecoder(func(hf qpack.HeaderField) {}),
config: quicConfig,
opts: opts,
dialer: dialer,
logger: logger,
}, nil
}
func (c *client) dial() error {
var err error
if c.dialer != nil {
c.session, err = c.dialer("udp", c.hostname, c.tlsConf, c.config)
} else {
c.session, err = dialAddr(c.hostname, c.tlsConf, c.config)
}
if err != nil {
return err
}
// run the sesssion setup using 0-RTT data
go func() {
if err := c.setupSession(); err != nil {
c.logger.Debugf("Setting up session failed: %s", err)
c.session.CloseWithError(quic.ErrorCode(errorInternalError), "")
}
}()
return nil
}
func (c *client) setupSession() error {
// open the control stream
str, err := c.session.OpenUniStream()
if err != nil {
return err
}
buf := &bytes.Buffer{}
// write the type byte
buf.Write([]byte{0x0})
// send the SETTINGS frame
(&settingsFrame{}).Write(buf)
if _, err := str.Write(buf.Bytes()); err != nil {
return err
}
return nil
}
func (c *client) Close() error {
if c.session == nil {
return nil
}
return c.session.CloseWithError(quic.ErrorCode(errorNoError), "")
}
func (c *client) maxHeaderBytes() uint64 {
if c.opts.MaxHeaderBytes <= 0 {
return defaultMaxResponseHeaderBytes
}
return uint64(c.opts.MaxHeaderBytes)
}
// RoundTrip executes a request and returns a response
func (c *client) RoundTrip(req *http.Request) (*http.Response, error) {
if req.URL.Scheme != "https" {
return nil, errors.New("http3: unsupported scheme")
}
if authorityAddr("https", hostnameFromRequest(req)) != c.hostname {
return nil, fmt.Errorf("http3 client BUG: RoundTrip called for the wrong client (expected %s, got %s)", c.hostname, req.Host)
}
c.dialOnce.Do(func() {
c.handshakeErr = c.dial()
})
if c.handshakeErr != nil {
return nil, c.handshakeErr
}
// Immediately send out this request, if this is a 0-RTT request.
if req.Method == MethodGet0RTT {
req.Method = http.MethodGet
} else {
// wait for the handshake to complete
select {
case <-c.session.HandshakeComplete().Done():
case <-req.Context().Done():
return nil, req.Context().Err()
}
}
str, err := c.session.OpenStreamSync(req.Context())
if err != nil {
return nil, err
}
// Request Cancellation:
// This go routine keeps running even after RoundTrip() returns.
// It is shut down when the application is done processing the body.
reqDone := make(chan struct{})
go func() {
select {
case <-req.Context().Done():
str.CancelWrite(quic.ErrorCode(errorRequestCanceled))
str.CancelRead(quic.ErrorCode(errorRequestCanceled))
case <-reqDone:
}
}()
rsp, rerr := c.doRequest(req, str, reqDone)
if rerr.err != nil { // if any error occurred
close(reqDone)
if rerr.streamErr != 0 { // if it was a stream error
str.CancelWrite(quic.ErrorCode(rerr.streamErr))
}
if rerr.connErr != 0 { // if it was a connection error
var reason string
if rerr.err != nil {
reason = rerr.err.Error()
}
c.session.CloseWithError(quic.ErrorCode(rerr.connErr), reason)
}
}
return rsp, rerr.err
}
func (c *client) doRequest(
req *http.Request,
str quic.Stream,
reqDone chan struct{},
) (*http.Response, requestError) {
var requestGzip bool
if !c.opts.DisableCompression && req.Method != "HEAD" && req.Header.Get("Accept-Encoding") == "" && req.Header.Get("Range") == "" {
requestGzip = true
}
if err := c.requestWriter.WriteRequest(str, req, requestGzip); err != nil {
return nil, newStreamError(errorInternalError, err)
}
frame, err := parseNextFrame(str)
if err != nil {
return nil, newStreamError(errorFrameError, err)
}
hf, ok := frame.(*headersFrame)
if !ok {
return nil, newConnError(errorFrameUnexpected, errors.New("expected first frame to be a HEADERS frame"))
}
if hf.Length > c.maxHeaderBytes() {
return nil, newStreamError(errorFrameError, fmt.Errorf("HEADERS frame too large: %d bytes (max: %d)", hf.Length, c.maxHeaderBytes()))
}
headerBlock := make([]byte, hf.Length)
if _, err := io.ReadFull(str, headerBlock); err != nil {
return nil, newStreamError(errorRequestIncomplete, err)
}
hfs, err := c.decoder.DecodeFull(headerBlock)
if err != nil {
// TODO: use the right error code
return nil, newConnError(errorGeneralProtocolError, err)
}
connState := qtls.ToTLSConnectionState(c.session.ConnectionState())
res := &http.Response{
Proto: "HTTP/3",
ProtoMajor: 3,
Header: http.Header{},
TLS: &connState,
}
for _, hf := range hfs {
switch hf.Name {
case ":status":
status, err := strconv.Atoi(hf.Value)
if err != nil {
return nil, newStreamError(errorGeneralProtocolError, errors.New("malformed non-numeric status pseudo header"))
}
res.StatusCode = status
res.Status = hf.Value + " " + http.StatusText(status)
default:
res.Header.Add(hf.Name, hf.Value)
}
}
respBody := newResponseBody(str, reqDone, func() {
c.session.CloseWithError(quic.ErrorCode(errorFrameUnexpected), "")
})
if requestGzip && res.Header.Get("Content-Encoding") == "gzip" {
res.Header.Del("Content-Encoding")
res.Header.Del("Content-Length")
res.ContentLength = -1
res.Body = newGzipReader(respBody)
res.Uncompressed = true
} else {
res.Body = respBody
}
return res, requestError{}
}
| 1 | 9,372 | You probably don't need to set an empty config here. | lucas-clemente-quic-go | go |
@@ -12,6 +12,19 @@ from ..element import Element
from .grid import GridInterface
from .interface import Interface, DataError, dask_array_module
+try:
+ import cftime
+ cftime_types = (
+ cftime._cftime.DatetimeGregorian,
+ cftime._cftime.Datetime360Day,
+ cftime._cftime.DatetimeJulian,
+ cftime._cftime.DatetimeNoLeap,
+ cftime._cftime.DatetimeProlepticGregorian
+ )
+ util.datetime_types += cftime_types
+except:
+ cftime_types = ()
+
class XArrayInterface(GridInterface):
| 1 | from __future__ import absolute_import
import sys
import types
from collections import OrderedDict
import numpy as np
from .. import util
from ..dimension import Dimension, asdim, dimension_name
from ..ndmapping import NdMapping, item_check, sorted_context
from ..element import Element
from .grid import GridInterface
from .interface import Interface, DataError, dask_array_module
class XArrayInterface(GridInterface):
types = ()
datatype = 'xarray'
@classmethod
def loaded(cls):
return 'xarray' in sys.modules
@classmethod
def applies(cls, obj):
if not cls.loaded():
return False
import xarray as xr
return isinstance(obj, (xr.Dataset, xr.DataArray))
@classmethod
def dimension_type(cls, dataset, dim):
name = dataset.get_dimension(dim, strict=True).name
return dataset.data[name].dtype.type
@classmethod
def dtype(cls, dataset, dim):
name = dataset.get_dimension(dim, strict=True).name
return dataset.data[name].dtype
@classmethod
def shape(cls, dataset, gridded=False):
array = dataset.data[dataset.vdims[0].name]
if not any(cls.irregular(dataset, kd) for kd in dataset.kdims):
names = [kd.name for kd in dataset.kdims
if kd.name in array.dims][::-1]
if not all(d in names for d in array.dims):
array = np.squeeze(array)
array = array.transpose(*names)
shape = array.shape
if gridded:
return shape
else:
return (np.product(shape, dtype=np.intp), len(dataset.dimensions()))
@classmethod
def init(cls, eltype, data, kdims, vdims):
import xarray as xr
element_params = eltype.params()
kdim_param = element_params['kdims']
vdim_param = element_params['vdims']
def retrieve_unit_and_label(dim):
if isinstance(dim, util.basestring):
dim = Dimension(dim)
dim.unit = data[dim.name].attrs.get('units')
label = data[dim.name].attrs.get('long_name')
if label is not None:
dim.label = label
return dim
if isinstance(data, xr.DataArray):
if vdims:
vdim = vdims[0]
elif data.name:
vdim = Dimension(data.name)
vdim.unit = data.attrs.get('units')
label = data.attrs.get('long_name')
if label is not None:
vdim.label = label
elif len(vdim_param.default) == 1:
vdim = vdim_param.default[0]
if vdim.name in data.dims:
raise DataError("xarray DataArray does not define a name, "
"and the default of '%s' clashes with a "
"coordinate dimension. Give the DataArray "
"a name or supply an explicit value dimension."
% vdim.name, cls)
else:
raise DataError("xarray DataArray does not define a name "
"and %s does not define a default value "
"dimension. Give the DataArray a name or "
"supply an explicit vdim." % eltype.__name__,
cls)
vdims = [vdim]
data = data.to_dataset(name=vdim.name)
if not isinstance(data, xr.Dataset):
if kdims is None:
kdims = kdim_param.default
if vdims is None:
vdims = vdim_param.default
kdims = [asdim(kd) for kd in kdims]
vdims = [asdim(vd) for vd in vdims]
if isinstance(data, np.ndarray) and data.ndim == 2 and data.shape[1] == len(kdims+vdims):
data = tuple(data)
if isinstance(data, tuple):
data = {d.name: vals for d, vals in zip(kdims + vdims, data)}
elif isinstance(data, list) and data == []:
ndims = len(kdims)
dimensions = [d.name for d in kdims + vdims]
data = {d: np.array([]) for d in dimensions[:ndims]}
data.update({d: np.empty((0,) * ndims) for d in dimensions[ndims:]})
if not isinstance(data, dict):
raise TypeError('XArrayInterface could not interpret data type')
data = {d: np.asarray(values) if d in kdims else values
for d, values in data.items()}
coord_dims = [data[kd.name].ndim for kd in kdims]
dims = tuple('dim_%d' % i for i in range(max(coord_dims)))[::-1]
coords = OrderedDict()
for kd in kdims:
coord_vals = data[kd.name]
if coord_vals.ndim > 1:
coord = (dims[:coord_vals.ndim], coord_vals)
else:
coord = coord_vals
coords[kd.name] = coord
xr_kwargs = {'dims': dims if max(coord_dims) > 1 else list(coords)[::-1]}
arrays = {}
for vdim in vdims:
arr = data[vdim.name]
if not isinstance(arr, xr.DataArray):
arr = xr.DataArray(arr, coords=coords, **xr_kwargs)
arrays[vdim.name] = arr
data = xr.Dataset(arrays)
else:
if not data.coords:
data = data.assign_coords(**{k: range(v) for k, v in data.dims.items()})
if vdims is None:
vdims = list(data.data_vars.keys())
vdims = [retrieve_unit_and_label(vd) for vd in vdims]
if kdims is None:
xrdims = list(data.dims)
xrcoords = list(data.coords)
kdims = [name for name in data.indexes.keys()
if isinstance(data[name].data, np.ndarray)]
kdims = sorted(kdims, key=lambda x: (xrcoords.index(x) if x in xrcoords else float('inf'), x))
if set(xrdims) != set(kdims):
virtual_dims = [xd for xd in xrdims if xd not in kdims]
for c in data.coords:
if c not in kdims and set(data[c].dims) == set(virtual_dims):
kdims.append(c)
kdims = [retrieve_unit_and_label(kd) for kd in kdims]
vdims = [asdim(vd) for vd in vdims]
kdims = [asdim(kd) for kd in kdims]
not_found = []
for d in kdims:
if not any(d.name == k or (isinstance(v, xr.DataArray) and d.name in v.dims)
for k, v in data.coords.items()):
not_found.append(d)
if not isinstance(data, xr.Dataset):
raise TypeError('Data must be be an xarray Dataset type.')
elif not_found:
raise DataError("xarray Dataset must define coordinates "
"for all defined kdims, %s coordinates not found."
% not_found, cls)
return data, {'kdims': kdims, 'vdims': vdims}, {}
@classmethod
def validate(cls, dataset, vdims=True):
Interface.validate(dataset, vdims)
# Check whether irregular (i.e. multi-dimensional) coordinate
# array dimensionality matches
irregular = []
for kd in dataset.kdims:
if cls.irregular(dataset, kd):
irregular.append((kd, dataset.data[kd.name].dims))
if irregular:
nonmatching = ['%s: %s' % (kd, dims) for kd, dims in irregular[1:]
if set(dims) != set(irregular[0][1])]
if nonmatching:
nonmatching = ['%s: %s' % irregular[0]] + nonmatching
raise DataError("The dimensions of coordinate arrays "
"on irregular data must match. The "
"following kdims were found to have "
"non-matching array dimensions:\n\n%s"
% ('\n'.join(nonmatching)), cls)
@classmethod
def range(cls, dataset, dimension):
dim = dataset.get_dimension(dimension, strict=True).name
if dataset._binned and dimension in dataset.kdims:
data = cls.coords(dataset, dim, edges=True)
if data.dtype.kind == 'M':
dmin, dmax = data.min(), data.max()
else:
dmin, dmax = np.nanmin(data), np.nanmax(data)
else:
data = dataset.data[dim]
if len(data):
dmin, dmax = data.min().data, data.max().data
else:
dmin, dmax = np.NaN, np.NaN
da = dask_array_module()
if da and isinstance(dmin, da.Array):
dmin, dmax = da.compute(dmin, dmax)
dmin = dmin if np.isscalar(dmin) or isinstance(dmin, util.datetime_types) else dmin.item()
dmax = dmax if np.isscalar(dmax) or isinstance(dmax, util.datetime_types) else dmax.item()
return dmin, dmax
@classmethod
def groupby(cls, dataset, dimensions, container_type, group_type, **kwargs):
index_dims = [dataset.get_dimension(d, strict=True) for d in dimensions]
element_dims = [kdim for kdim in dataset.kdims
if kdim not in index_dims]
invalid = [d for d in index_dims if dataset.data[d.name].ndim > 1]
if invalid:
if len(invalid) == 1: invalid = "'%s'" % invalid[0]
raise ValueError("Cannot groupby irregularly sampled dimension(s) %s."
% invalid)
group_kwargs = {}
if group_type != 'raw' and issubclass(group_type, Element):
group_kwargs = dict(util.get_param_values(dataset),
kdims=element_dims)
group_kwargs.update(kwargs)
drop_dim = any(d not in group_kwargs['kdims'] for d in element_dims)
# XArray 0.7.2 does not support multi-dimensional groupby
# Replace custom implementation when
# https://github.com/pydata/xarray/pull/818 is merged.
group_by = [d.name for d in index_dims]
data = []
if len(dimensions) == 1:
for k, v in dataset.data.groupby(index_dims[0].name):
if drop_dim:
v = v.to_dataframe().reset_index()
data.append((k, group_type(v, **group_kwargs)))
else:
unique_iters = [cls.values(dataset, d, False) for d in group_by]
indexes = zip(*util.cartesian_product(unique_iters))
for k in indexes:
sel = dataset.data.sel(**dict(zip(group_by, k)))
if drop_dim:
sel = sel.to_dataframe().reset_index()
data.append((k, group_type(sel, **group_kwargs)))
if issubclass(container_type, NdMapping):
with item_check(False), sorted_context(False):
return container_type(data, kdims=index_dims)
else:
return container_type(data)
@classmethod
def coords(cls, dataset, dimension, ordered=False, expanded=False, edges=False):
import xarray as xr
dim = dataset.get_dimension(dimension)
dim = dimension if dim is None else dim.name
irregular = cls.irregular(dataset, dim)
if irregular or expanded:
if irregular:
data = dataset.data[dim]
else:
data = util.expand_grid_coords(dataset, dim)
if edges:
data = cls._infer_interval_breaks(data, axis=1)
data = cls._infer_interval_breaks(data, axis=0)
return data.values if isinstance(data, xr.DataArray) else data
data = np.atleast_1d(dataset.data[dim].data)
if ordered and data.shape and np.all(data[1:] < data[:-1]):
data = data[::-1]
shape = cls.shape(dataset, True)
if dim in dataset.kdims:
idx = dataset.get_dimension_index(dim)
isedges = (dim in dataset.kdims and len(shape) == dataset.ndims
and len(data) == (shape[dataset.ndims-idx-1]+1))
else:
isedges = False
if edges and not isedges:
data = cls._infer_interval_breaks(data)
elif not edges and isedges:
data = np.convolve(data, [0.5, 0.5], 'valid')
return data.values if isinstance(data, xr.DataArray) else data
@classmethod
def values(cls, dataset, dim, expanded=True, flat=True, compute=True):
dim = dataset.get_dimension(dim, strict=True)
data = dataset.data[dim.name].data
irregular = cls.irregular(dataset, dim) if dim in dataset.kdims else False
irregular_kdims = [d for d in dataset.kdims if cls.irregular(dataset, d)]
if irregular_kdims:
virtual_coords = list(dataset.data[irregular_kdims[0].name].coords.dims)
else:
virtual_coords = []
if dim in dataset.vdims or irregular:
data_coords = list(dataset.data[dim.name].dims)
da = dask_array_module()
if compute and da and isinstance(data, da.Array):
data = data.compute()
data = cls.canonicalize(dataset, data, data_coords=data_coords,
virtual_coords=virtual_coords)
return data.T.flatten() if flat else data
elif expanded:
data = cls.coords(dataset, dim.name, expanded=True)
return data.T.flatten() if flat else data
else:
return cls.coords(dataset, dim.name, ordered=True)
@classmethod
def aggregate(cls, dataset, dimensions, function, **kwargs):
reduce_dims = [d.name for d in dataset.kdims if d not in dimensions]
return dataset.data.reduce(function, dim=reduce_dims), []
@classmethod
def unpack_scalar(cls, dataset, data):
"""
Given a dataset object and data in the appropriate format for
the interface, return a simple scalar.
"""
if (len(data.data_vars) == 1 and
len(data[dataset.vdims[0].name].shape) == 0):
return data[dataset.vdims[0].name].item()
return data
@classmethod
def ndloc(cls, dataset, indices):
kdims = [d for d in dataset.kdims[::-1]]
adjusted_indices = []
slice_dims = []
for kd, ind in zip(kdims, indices):
if cls.irregular(dataset, kd):
coords = [c for c in dataset.data.coords if c not in dataset.data.dims]
dim = dataset.data[kd.name].dims[coords.index(kd.name)]
shape = dataset.data[kd.name].shape[coords.index(kd.name)]
coords = np.arange(shape)
else:
coords = cls.coords(dataset, kd, False)
dim = kd.name
slice_dims.append(dim)
ncoords = len(coords)
if np.all(coords[1:] < coords[:-1]):
if np.isscalar(ind):
ind = ncoords-ind-1
elif isinstance(ind, slice):
start = None if ind.stop is None else ncoords-ind.stop
stop = None if ind.start is None else ncoords-ind.start
ind = slice(start, stop, ind.step)
elif isinstance(ind, np.ndarray) and ind.dtype.kind == 'b':
ind = ind[::-1]
elif isinstance(ind, (np.ndarray, list)):
ind = [ncoords-i-1 for i in ind]
if isinstance(ind, list):
ind = np.array(ind)
if isinstance(ind, np.ndarray) and ind.dtype.kind == 'b':
ind = np.where(ind)[0]
adjusted_indices.append(ind)
isel = dict(zip(slice_dims, adjusted_indices))
all_scalar = all(map(np.isscalar, indices))
if all_scalar and len(indices) == len(kdims) and len(dataset.vdims) == 1:
return dataset.data[dataset.vdims[0].name].isel(**isel).values.item()
# Detect if the indexing is selecting samples or slicing the array
sampled = (all(isinstance(ind, np.ndarray) and ind.dtype.kind != 'b'
for ind in adjusted_indices) and len(indices) == len(kdims))
if sampled or (all_scalar and len(indices) == len(kdims)):
if all_scalar: isel = {k: [v] for k, v in isel.items()}
return dataset.data.isel_points(**isel).to_dataframe().reset_index()
else:
return dataset.data.isel(**isel)
@classmethod
def concat_dim(cls, datasets, dim, vdims):
import xarray as xr
return xr.concat([ds.assign_coords(**{dim.name: c}) for c, ds in datasets.items()],
dim=dim.name)
@classmethod
def redim(cls, dataset, dimensions):
renames = {k: v.name for k, v in dimensions.items()}
return dataset.data.rename(renames)
@classmethod
def reindex(cls, dataset, kdims=None, vdims=None):
dropped_kdims = [kd for kd in dataset.kdims if kd not in kdims]
constant = {}
for kd in dropped_kdims:
vals = cls.values(dataset, kd.name, expanded=False)
if len(vals) == 1:
constant[kd.name] = vals[0]
if len(constant) == len(dropped_kdims):
return dataset.data.sel(**{k: v for k, v in constant.items()
if k in dataset.data.dims})
elif dropped_kdims:
return tuple(dataset.columns(kdims+vdims).values())
return dataset.data
@classmethod
def sort(cls, dataset, by=[], reverse=False):
return dataset
@classmethod
def select(cls, dataset, selection_mask=None, **selection):
validated = {}
for k, v in selection.items():
dim = dataset.get_dimension(k, strict=True)
if cls.irregular(dataset, dim):
return GridInterface.select(dataset, selection_mask, **selection)
dim = dim.name
if isinstance(v, slice):
v = (v.start, v.stop)
if isinstance(v, set):
validated[dim] = list(v)
elif isinstance(v, tuple):
dim_vals = dataset.data[k].values
upper = None if v[1] is None else v[1]-sys.float_info.epsilon*10
v = v[0], upper
if dim_vals.dtype.kind not in 'OSU' and np.all(dim_vals[1:] < dim_vals[:-1]):
# If coordinates are inverted invert slice
v = v[::-1]
validated[dim] = slice(*v)
elif isinstance(v, types.FunctionType):
validated[dim] = v(dataset[k])
else:
validated[dim] = v
data = dataset.data.sel(**validated)
# Restore constant dimensions
indexed = cls.indexed(dataset, selection)
dropped = {d.name: np.atleast_1d(data[d.name])
for d in dataset.kdims
if not data[d.name].data.shape}
if dropped and not indexed:
data = data.assign_coords(**dropped)
da = dask_array_module()
if (indexed and len(data.data_vars) == 1 and
len(data[dataset.vdims[0].name].shape) == 0):
value = data[dataset.vdims[0].name]
if da and isinstance(value.data, da.Array):
value = value.compute()
return value.item()
elif indexed:
values = []
for vd in dataset.vdims:
value = data[vd.name]
if da and isinstance(value.data, da.Array):
value = value.compute()
values.append(value.item())
return np.array(values)
return data
@classmethod
def length(cls, dataset):
return np.product([len(dataset.data[d.name]) for d in dataset.kdims], dtype=np.intp)
@classmethod
def dframe(cls, dataset, dimensions):
data = dataset.data.to_dataframe().reset_index()
if dimensions:
return data[dimensions]
return data
@classmethod
def sample(cls, dataset, samples=[]):
raise NotImplementedError
@classmethod
def add_dimension(cls, dataset, dimension, dim_pos, values, vdim):
import xarray as xr
if not vdim:
raise Exception("Cannot add key dimension to a dense representation.")
dim = dimension_name(dimension)
coords = {d.name: cls.coords(dataset, d.name) for d in dataset.kdims}
arr = xr.DataArray(values, coords=coords, name=dim,
dims=tuple(d.name for d in dataset.kdims[::-1]))
return dataset.data.assign(**{dim: arr})
Interface.register(XArrayInterface)
| 1 | 21,109 | I think you are missing `cftime.DatetimeAllLeap` here. That said, all of these are subclasses of `cftime.datetime`, so I think you could get away with just using `cftime.datetime` here, rather than enumerating all of the different subclasses (since `cftime_types` is only used for instance checks). | holoviz-holoviews | py |
@@ -37,6 +37,11 @@ const (
StoragePoolClaimCPK CasPoolKey = "openebs.io/storage-pool-claim"
// CStorPoolClusterCPK is the CStorPoolcluster label
CStorPoolClusterCPK CasPoolKey = "openebs.io/cstor-pool-cluster"
+ // CStorPoolInstanceCPK is the CStorPoolInstance label
+ CStorPoolInstanceCPK CasPoolKey = "openebs.io/cstor-pool-instance"
+ // PredecessorBlockDeviceCPK is the annotation on the block device claim
+ // holding previous block device name
+ PredecessorBlockDeviceCPK CasPoolKey = "openebs.io/bd-predecessor"
// NdmDiskTypeCPK is the node-disk-manager disk type e.g. 'sparse' or 'disk'
NdmDiskTypeCPK CasPoolKey = "ndm.io/disk-type"
// NdmBlockDeviceTypeCPK is the node-disk-manager blockdevice type e.g. // 'blockdevice' | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
ndm "github.com/openebs/maya/pkg/apis/openebs.io/ndm/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// CasPoolKey is the key for the CasPool.
type CasPoolKey string
// CasPoolValString represents the string value for a CasPoolKey.
type CasPoolValString string
// CasPoolValInt represents the integer value for a CasPoolKey
type CasPoolValInt int
const (
// HostNameCPK is the kubernetes host name label
HostNameCPK CasPoolKey = "kubernetes.io/hostname"
// StoragePoolClaimCPK is the storage pool claim label
StoragePoolClaimCPK CasPoolKey = "openebs.io/storage-pool-claim"
// CStorPoolClusterCPK is the CStorPoolcluster label
CStorPoolClusterCPK CasPoolKey = "openebs.io/cstor-pool-cluster"
// NdmDiskTypeCPK is the node-disk-manager disk type e.g. 'sparse' or 'disk'
NdmDiskTypeCPK CasPoolKey = "ndm.io/disk-type"
// NdmBlockDeviceTypeCPK is the node-disk-manager blockdevice type e.g. // 'blockdevice'
NdmBlockDeviceTypeCPK CasPoolKey = "ndm.io/blockdevice-type"
// PoolTypeMirroredCPV is a key for mirrored for pool
PoolTypeMirroredCPV CasPoolValString = "mirrored"
// PoolTypeStripedCPV is a key for striped for pool
PoolTypeStripedCPV CasPoolValString = "striped"
// PoolTypeRaidzCPV is a key for raidz for pool
PoolTypeRaidzCPV CasPoolValString = "raidz"
// PoolTypeRaidz2CPV is a key for raidz for pool
PoolTypeRaidz2CPV CasPoolValString = "raidz2"
// TypeSparseCPV is a key for sparse disk pool
TypeSparseCPV CasPoolValString = "sparse"
// TypeDiskCPV is a key for physical,iscsi,virtual etc disk pool
TypeDiskCPV CasPoolValString = "disk"
// TypeBlockDeviceCPV is a key for physical,iscsi,virtual etc disk pool
TypeBlockDeviceCPV CasPoolValString = "blockdevice"
// StripedBlockDeviceCountCPV is the count for striped type pool
StripedBlockDeviceCountCPV CasPoolValInt = 1
// MirroredBlockDeviceCountCPV is the count for mirrored type pool
MirroredBlockDeviceCountCPV CasPoolValInt = 2
// RaidzBlockDeviceCountCPV is the count for raidz type pool
RaidzBlockDeviceCountCPV CasPoolValInt = 3
// Raidz2BlockDeviceCountCPV is the count for raidz2 type pool
Raidz2BlockDeviceCountCPV CasPoolValInt = 6
)
// CasPool is a type which will be utilised by CAS engine to perform
// storagepool related operation.
// TODO: Restrucutre CasPool struct.
type CasPool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// StoragePoolClaim is the name of the storagepoolclaim object
StoragePoolClaim string
// CasCreateTemplate is the cas template that will be used for storagepool create
// operation
CasCreateTemplate string
// CasDeleteTemplate is the cas template that will be used for storagepool delete
// operation
CasDeleteTemplate string
// Namespace can be passed via storagepoolclaim as labels to decide on the
// execution of namespaced resources with respect to storagepool
Namespace string
// BlockDeviceList is the list of block devices over which a storagepool will be provisioned
BlockDeviceList []BlockDeviceGroup
// PoolType is the type of pool to be provisioned e.g. striped or mirrored
PoolType string
// PoolCacheFile is cache file which used at the time of importing a pool
PoolCacheFile string
// MaxPool is the maximum number of pool that should be provisioned
MaxPools int
// MinPool is the minimum number of pool that should be provisioned
MinPools int
// Type is the CasPool type e.g. sparse or openebs-cstor
Type string
// NodeName is the node where cstor pool will be created
NodeName string
// reSync will decide whether the event is a reconciliation event
ReSync bool
// PendingPoolCount is the number of pools that will be tried for creation as a part of reconciliation.
PendingPoolCount int
DeviceID []string
APIBlockDeviceList ndm.BlockDeviceList
}
| 1 | 17,781 | don't we need to set this label on pool pods? I don't see them being set | openebs-maya | go |
@@ -27,7 +27,7 @@ struct train_kernel_cpu<Float, method::brute_force> {
train_result operator()(const context_cpu& ctx,
const descriptor_base& desc,
const train_input& input) const {
- throw unimplemented_error("k-NN brute force method is not implemented for CPU!");
+ throw unimplemented_error("k-NN brute force method is not implemented for CPU");
return train_result();
}
}; | 1 | /*******************************************************************************
* Copyright 2020 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "oneapi/dal/algo/knn/backend/cpu/train_kernel.hpp"
#include "oneapi/dal/backend/interop/common.hpp"
#include "oneapi/dal/backend/interop/error_converter.hpp"
namespace oneapi::dal::knn::backend {
using dal::backend::context_cpu;
template <typename Float>
struct train_kernel_cpu<Float, method::brute_force> {
train_result operator()(const context_cpu& ctx,
const descriptor_base& desc,
const train_input& input) const {
throw unimplemented_error("k-NN brute force method is not implemented for CPU!");
return train_result();
}
};
template struct train_kernel_cpu<float, method::brute_force>;
template struct train_kernel_cpu<double, method::brute_force>;
} // namespace oneapi::dal::knn::backend
| 1 | 23,205 | can remove, it isn't reachable. | oneapi-src-oneDAL | cpp |
@@ -160,7 +160,7 @@ describe Cancellation do
subscription = build_stubbed(:subscription, plan: subscribed_plan)
cancellation = Cancellation.new(subscription)
- expect(cancellation.can_downgrade_instead?).to be_true
+ expect(cancellation.can_downgrade_instead?).to be true
end
it 'returns true if the subscribed plan is not the downgrade plan' do | 1 | require 'spec_helper'
describe Cancellation do
describe "#process" do
before :each do
subscription.stubs(:stripe_customer_id).returns("cus_1CXxPJDpw1VLvJ")
mailer = stub(deliver: true)
SubscriptionMailer.stubs(:cancellation_survey).
with(subscription.user).returns(mailer)
updater = stub(unsubscribe: true)
AnalyticsUpdater.stubs(:new).with(subscription.user).returns(updater)
end
context "with an active subscription" do
it "makes the subscription inactive and records the current date" do
cancellation.process
expect(subscription.deactivated_on).to eq Time.zone.today
end
it "sends a unsubscription survey email" do
cancellation.process
expect(SubscriptionMailer).
to have_received(:cancellation_survey).with(subscription.user)
expect(SubscriptionMailer.cancellation_survey(subscription.user)).
to have_received(:deliver)
end
it "updates intercom status for user" do
cancellation.process
expect(AnalyticsUpdater).
to have_received(:new).with(subscription.user)
expect(AnalyticsUpdater.new(subscription.user)).
to have_received(:unsubscribe)
end
end
context "with an inactive subscription" do
it "doesn't send any updates" do
subscription.stubs(:active?).returns(false)
cancellation.process
expect(SubscriptionMailer.cancellation_survey(subscription.user)).
to have_received(:deliver).never
expect(AnalyticsUpdater.new(subscription.user)).
to have_received(:unsubscribe).never
end
end
end
describe 'schedule' do
it 'schedules a cancellation with Stripe' do
subscription = create(:subscription)
cancellation = Cancellation.new(subscription)
stripe_customer = stub(
'Stripe::Customer',
cancel_subscription: nil,
subscription: stub(current_period_end: 1361234235)
)
Stripe::Customer.stubs(:retrieve).returns(stripe_customer)
cancellation.schedule
expect(stripe_customer).to have_received(:cancel_subscription).
with(at_period_end: true)
expect(subscription.scheduled_for_cancellation_on).
to eq Time.zone.at(1361234235).to_date
end
it 'retrieves the customer correctly' do
subscription = create(:subscription)
cancellation = Cancellation.new(subscription)
subscription.stubs(:stripe_customer_id).returns('cus_1CXxPJDpw1VLvJ')
stripe_customer = stub(
'Stripe::Customer',
cancel_subscription: nil,
subscription: stub(current_period_end: 1361234235)
)
Stripe::Customer.stubs(:retrieve).returns(stripe_customer)
cancellation.schedule
expect(Stripe::Customer).to have_received(:retrieve)
.with('cus_1CXxPJDpw1VLvJ')
end
it 'does not make the subscription inactive if stripe unsubscribe fails' do
subscription = create(:subscription)
cancellation = Cancellation.new(subscription)
stripe_customer = stub('Stripe::Customer')
stripe_customer.stubs(:cancel_subscription).raises(Stripe::APIError)
Stripe::Customer.stubs(:retrieve).returns(stripe_customer)
expect { cancellation.schedule }.to raise_error
expect(Subscription.find(subscription.id)).to be_active
end
it 'does not unsubscribe from stripe if deactivating the subscription failed' do
subscription = create(:subscription)
cancellation = Cancellation.new(subscription)
stripe_customer = stub('Stripe::Customer')
subscription.stubs(:destroy).raises(ActiveRecord::RecordNotSaved)
Stripe::Customer.stubs(:retrieve).returns(stripe_customer)
expect { cancellation.schedule }.to raise_error
expect(subscription).to have_received(:cancel_subscription).never
end
end
describe 'cancel_and_refund' do
it 'cancels immediately and refunds the last charge with Stripe' do
subscription = create(:subscription)
charge = stub('Stripe::Charge', id: 'charge_id', refund: nil)
subscription.stubs(:last_charge).returns(charge)
cancellation = Cancellation.new(subscription)
stripe_customer = stub(
'Stripe::Customer',
cancel_subscription: nil,
subscription: stub(current_period_end: 1361234235)
)
Stripe::Customer.stubs(:retrieve).returns(stripe_customer)
cancellation.cancel_and_refund
expect(stripe_customer).to have_received(:cancel_subscription)
.with(at_period_end: false)
expect(charge).to have_received(:refund)
expect(subscription.scheduled_for_cancellation_on).to be_nil
end
it 'does not error if the customer was not charged' do
subscription = create(:subscription)
subscription.stubs(:last_charge).returns(nil)
cancellation = Cancellation.new(subscription)
stripe_customer = stub(
'Stripe::Customer',
cancel_subscription: nil,
subscription: stub(current_period_end: 1361234235)
)
Stripe::Customer.stubs(:retrieve).returns(stripe_customer)
expect { cancellation.cancel_and_refund }.not_to raise_error
expect(stripe_customer).to have_received(:cancel_subscription)
.with(at_period_end: false)
end
end
describe '#can_downgrade_instead?' do
it 'returns false if the subscribed plan is the downgrade plan' do
stub_downgrade_plan
subscribed_plan = build_stubbed(:plan)
subscription = build_stubbed(:subscription, plan: subscribed_plan)
cancellation = Cancellation.new(subscription)
expect(cancellation.can_downgrade_instead?).to be_true
end
it 'returns true if the subscribed plan is not the downgrade plan' do
downgrade_plan = stub_downgrade_plan
subscription = build_stubbed(:subscription, plan: downgrade_plan)
cancellation = Cancellation.new(subscription)
expect(cancellation.can_downgrade_instead?).to be_false
end
end
describe '#downgrade_plan' do
it 'returns the basic plan' do
downgrade_plan = stub_downgrade_plan
subscription = build_stubbed(:subscription)
cancellation = Cancellation.new(subscription)
expect(cancellation.downgrade_plan).to eq(downgrade_plan)
end
end
describe '#subscribed_plan' do
it 'returns the plan from the subscription' do
subscription = build_stubbed(:subscription)
cancellation = Cancellation.new(subscription)
expect(cancellation.subscribed_plan).to eq(subscription.plan)
end
end
describe '#downgrade' do
it 'switches to the downgrade plan' do
downgrade_plan = stub_downgrade_plan
subscription = build_stubbed(:subscription)
subscription.stubs(:change_plan)
cancellation = Cancellation.new(subscription)
cancellation.downgrade
expect(subscription).to have_received(:change_plan).with(downgrade_plan)
end
end
def stub_downgrade_plan
build_stubbed(:plan).tap do |plan|
IndividualPlan.stubs(:basic).returns(plan)
end
end
def subscription
@subscription ||= create(:subscription, :purchased)
end
def cancellation
@cancellation ||= Cancellation.new(subscription)
end
end
| 1 | 10,554 | `expect(cancellation).to be_can_downgrade_instead` would be preferred :rainbow: :rainbow: | thoughtbot-upcase | rb |
@@ -65,6 +65,11 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal
}
}
+ if (_socket.IsClosed)
+ {
+ break;
+ }
+
if (result.IsCancelled)
{
// Send a FIN | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Server.Kestrel.Internal.System.IO.Pipelines;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal.Networking;
namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Libuv.Internal
{
public class LibuvOutputConsumer
{
private readonly LibuvThread _thread;
private readonly UvStreamHandle _socket;
private readonly string _connectionId;
private readonly ILibuvTrace _log;
private readonly IPipeReader _pipe;
public LibuvOutputConsumer(
IPipeReader pipe,
LibuvThread thread,
UvStreamHandle socket,
string connectionId,
ILibuvTrace log)
{
_pipe = pipe;
_thread = thread;
_socket = socket;
_connectionId = connectionId;
_log = log;
}
public async Task WriteOutputAsync()
{
var pool = _thread.WriteReqPool;
while (true)
{
var result = await _pipe.ReadAsync();
var buffer = result.Buffer;
var consumed = buffer.End;
try
{
if (!buffer.IsEmpty)
{
var writeReq = pool.Allocate();
try
{
var writeResult = await writeReq.WriteAsync(_socket, buffer);
LogWriteInfo(writeResult.Status, writeResult.Error);
if (writeResult.Error != null)
{
consumed = buffer.Start;
throw writeResult.Error;
}
}
finally
{
// Make sure we return the writeReq to the pool
pool.Return(writeReq);
}
}
if (result.IsCancelled)
{
// Send a FIN
_log.ConnectionWriteFin(_connectionId);
using (var shutdownReq = new UvShutdownReq(_log))
{
shutdownReq.Init(_thread);
var shutdownResult = await shutdownReq.ShutdownAsync(_socket);
_log.ConnectionWroteFin(_connectionId, shutdownResult.Status);
}
// Ensure no data is written after uv_shutdown
break;
}
if (buffer.IsEmpty && result.IsCompleted)
{
break;
}
}
finally
{
_pipe.Advance(consumed);
}
}
}
private void LogWriteInfo(int status, Exception error)
{
if (error == null)
{
_log.ConnectionWriteCallback(_connectionId, status);
}
else
{
// Log connection resets at a lower (Debug) level.
if (status == LibuvConstants.ECONNRESET)
{
_log.ConnectionReset(_connectionId);
}
else
{
_log.ConnectionError(_connectionId, error);
}
}
}
}
}
| 1 | 13,101 | Gross. The OS should timeout writes for completely unresponsive clients to begin with. Long term, the better solution is to enforce a minimum minimum data rate for responses. This might require a way to cancel LibuvAwaitables, but it definitely not OK to immediately kill the socket and any ongoing writes just because the request processing loop completed. You could consider doing that after some kind of timeout, but at that point you might as well just enforce a minimum rate. | aspnet-KestrelHttpServer | .cs |
@@ -66,7 +66,8 @@ void mesh_reader::load() {
select_subset_of_data();
}
-bool mesh_reader::fetch_datum(CPUMat& X, int data_id, int mb_idx, int tid) {
+bool mesh_reader::fetch_datum(CPUMat& X, int data_id, int mb_idx, thread_pool& io_thread_pool) {
+ // int tid = io_thread_pool.get_local_thread_id();
if (m_random_flips) {
fast_rng_gen& gen = get_fast_generator();
std::uniform_int_distribution<int> dist(0, 1); | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
// data_reader_mesh .hpp .cpp - data reader for mesh data
////////////////////////////////////////////////////////////////////////////////
#include "lbann/data_readers/data_reader_mesh.hpp"
#include "lbann/utils/glob.hpp"
#include <omp.h>
namespace lbann {
mesh_reader::mesh_reader(bool shuffle)
: generic_data_reader(shuffle) {}
void mesh_reader::load() {
if (m_data_height == 0 || m_data_width == 0) {
throw lbann_exception("mesh_reader: data shape must be non-zero");
}
// Compute total number of samples based on number of targets.
std::vector<std::string> matches = glob(
get_file_dir() + m_target_name + m_suffix + "/*.bin");
if (matches.size() == 0) {
throw lbann_exception("mesh_reader: could not find any targets");
}
m_num_samples = matches.size();
// Set up buffers to load data into.
m_load_bufs.resize(omp_get_max_threads());
for (auto&& buf : m_load_bufs) {
buf.resize(m_data_height * m_data_width);
}
// Set up the format string.
if (std::pow(10, m_index_length) <= m_num_samples) {
throw lbann_exception("mesh_reader: index length too small");
}
m_index_format_str = "%0" + std::to_string(m_index_length) + "d";
// Set up to record flipping if needed.
if (m_random_flips) {
m_flip_choices.resize(m_num_samples);
}
// Reset indices.
m_shuffled_indices.resize(m_num_samples);
std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0);
select_subset_of_data();
}
bool mesh_reader::fetch_datum(CPUMat& X, int data_id, int mb_idx, int tid) {
if (m_random_flips) {
fast_rng_gen& gen = get_fast_generator();
std::uniform_int_distribution<int> dist(0, 1);
m_flip_choices[data_id].first = dist(gen);
m_flip_choices[data_id].second = dist(gen);
}
for (size_t i = 0; i < m_channels.size(); ++i) {
Mat X_view = El::View(
X, El::IR(i*m_data_height*m_data_width, (i+1)*m_data_height*m_data_width),
El::IR(mb_idx));
load_file(data_id, m_channels[i], X_view);
}
return true;
}
bool mesh_reader::fetch_response(CPUMat& Y, int data_id, int mb_idx, int tid) {
Mat Y_view = El::View(Y, El::ALL, El::IR(mb_idx));
load_file(data_id, m_target_name, Y_view);
return true;
}
void mesh_reader::load_file(int data_id, const std::string channel, Mat& mat) {
const std::string filename = construct_filename(channel, data_id);
std::ifstream f(filename, std::ios::binary);
if (f.fail()) {
throw lbann_exception("mesh_reader: failed to open " + filename);
}
// Load into a local buffer.
float* buf = m_load_bufs[omp_get_thread_num()].data();
if (!f.read((char*) buf, m_data_height * m_data_width * sizeof(float))) {
throw lbann_exception("mesh_reader: failed to read " + filename);
}
if (std::is_same<float, DataType>::value) {
// Need to transpose from row-major to column-major order.
Mat tmp_mat(m_data_width, m_data_height, buf, m_data_width);
Mat mat_reshape(m_data_height, m_data_width, mat.Buffer(), m_data_height);
El::Transpose(tmp_mat, mat_reshape);
// Flip if needed.
if (m_random_flips) {
if (m_flip_choices[data_id].first) {
horizontal_flip(mat_reshape);
}
if (m_flip_choices[data_id].second) {
vertical_flip(mat_reshape);
}
}
} else {
// Need to transpose and convert from float. Not yet supported.
throw lbann_exception("mesh_reader: does not support DataType != float");
}
}
std::string mesh_reader::construct_filename(std::string channel, int data_id) {
std::string filename = get_file_dir() + channel + m_suffix + "/" + channel;
char idx[m_index_length + 1];
std::snprintf(idx, m_index_length + 1, m_index_format_str.c_str(), data_id);
return filename + std::string(idx) + ".bin";
}
void mesh_reader::horizontal_flip(CPUMat& mat) {
// TODO: Could probably optimize this for better locality.
const El::Int height = mat.Height();
const El::Int width = mat.Width();
for (El::Int row = 0; row < height; ++row) {
for (El::Int col = 0; col < (width / 2); ++col) {
DataType tmp = mat(row, col);
mat(row, col) = mat(row, width - col - 1);
mat(row, width - col - 1) = tmp;
}
}
}
void mesh_reader::vertical_flip(CPUMat& mat) {
// TODO: Could probably optimize this for better locality.
const El::Int height = mat.Height();
const El::Int width = mat.Width();
for (El::Int row = 0; row < (height / 2); ++row) {
for (El::Int col = 0; col < width; ++col) {
DataType tmp = mat(row, col);
mat(row, col) = mat(height - row - 1, col);
mat(height - row - 1, col) = tmp;
}
}
}
} // namespace lbann
| 1 | 12,835 | If this is not used, we should just delete the line. | LLNL-lbann | cpp |
@@ -89,11 +89,7 @@ func defaultExec(
// for transporting shell-style streams
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
- return nil, errors.Wrapf(
- err,
- "failed to exec into pod {%s}: failed to connect to the provided server",
- name,
- )
+ return nil, err
}
// Stream initiates transport of standard shell streams | 1 | // Copyright © 2018-2019 The OpenEBS Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"bytes"
"encoding/json"
errors "github.com/openebs/maya/pkg/errors/v1alpha1"
client "github.com/openebs/maya/pkg/kubernetes/client/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
)
// getClientsetFn is a typed function that
// abstracts fetching of clientset
type getClientsetFn func() (*clientset.Clientset, error)
// getClientsetFromPathFn is a typed function that
// abstracts fetching of clientset from kubeConfigPath
type getClientsetForPathFn func(kubeConfigPath string) (*clientset.Clientset, error)
// getKubeConfigFn is a typed function that
// abstracts fetching of config
type getKubeConfigFn func() (*rest.Config, error)
// getKubeConfigForPathFn is a typed function that
// abstracts fetching of config from kubeConfigPath
type getKubeConfigForPathFn func(kubeConfigPath string) (*rest.Config, error)
// createFn is a typed function that abstracts
// creation of pod
type createFn func(cli *clientset.Clientset, namespace string, pod *corev1.Pod) (*corev1.Pod, error)
// listFn is a typed function that abstracts
// listing of pods
type listFn func(cli *clientset.Clientset, namespace string, opts metav1.ListOptions) (*corev1.PodList, error)
// deleteFn is a typed function that abstracts
// deleting of pod
type deleteFn func(cli *clientset.Clientset, namespace, name string, opts *metav1.DeleteOptions) error
// deleteFn is a typed function that abstracts
// deletion of pod's collection
type deleteCollectionFn func(cli *clientset.Clientset, namespace string, listOpts metav1.ListOptions, deleteOpts *metav1.DeleteOptions) error
// getFn is a typed function that abstracts
// to get pod
type getFn func(cli *clientset.Clientset, namespace, name string, opts metav1.GetOptions) (*corev1.Pod, error)
// execFn is a typed function that abstracts
// pod exec
type execFn func(cli *clientset.Clientset, config *rest.Config, name, namespace string, opts *corev1.PodExecOptions) (*ExecOutput, error)
// defaultExec is the default implementation of execFn
func defaultExec(
cli *clientset.Clientset,
config *rest.Config,
name string,
namespace string,
opts *corev1.PodExecOptions,
) (*ExecOutput, error) {
var stdout, stderr bytes.Buffer
req := cli.CoreV1().RESTClient().Post().
Resource("pods").
Name(name).
Namespace(namespace).
SubResource("exec").
VersionedParams(opts, scheme.ParameterCodec)
// create exec executor which is an interface
// for transporting shell-style streams
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
return nil, errors.Wrapf(
err,
"failed to exec into pod {%s}: failed to connect to the provided server",
name,
)
}
// Stream initiates transport of standard shell streams
// It will transport any non-nil stream to a remote system,
// and return an error if a problem occurs
err = exec.Stream(remotecommand.StreamOptions{
Stdin: nil,
Stdout: &stdout,
Stderr: &stderr,
Tty: opts.TTY,
})
if err != nil {
return nil, errors.Wrapf(
err,
"failed to exec into pod {%s}: failed to stream",
name,
)
}
execOutput := &ExecOutput{
Stdout: stdout.String(),
Stderr: stderr.String(),
}
return execOutput, nil
}
// KubeClient enables kubernetes API operations
// on pod instance
type KubeClient struct {
// clientset refers to pod clientset
// that will be responsible to
// make kubernetes API calls
clientset *clientset.Clientset
// namespace holds the namespace on which
// KubeClient has to operate
namespace string
// kubeConfig represents kubernetes config
kubeConfig *rest.Config
// kubeconfig path to get kubernetes clientset
kubeConfigPath string
// functions useful during mocking
getKubeConfig getKubeConfigFn
getKubeConfigForPath getKubeConfigForPathFn
getClientset getClientsetFn
getClientsetForPath getClientsetForPathFn
create createFn
list listFn
del deleteFn
delCollection deleteCollectionFn
get getFn
exec execFn
}
// ExecOutput struct contains stdout and stderr
type ExecOutput struct {
Stdout string `json:"stdout"`
Stderr string `json:"stderr"`
}
// KubeClientBuildOption defines the abstraction
// to build a KubeClient instance
type KubeClientBuildOption func(*KubeClient)
// withDefaults sets the default options
// of KubeClient instance
func (k *KubeClient) withDefaults() {
if k.getKubeConfig == nil {
k.getKubeConfig = func() (config *rest.Config, err error) {
return client.New().Config()
}
}
if k.getKubeConfigForPath == nil {
k.getKubeConfigForPath = func(kubeConfigPath string) (
config *rest.Config, err error) {
return client.New(client.WithKubeConfigPath(kubeConfigPath)).
GetConfigForPathOrDirect()
}
}
if k.getClientset == nil {
k.getClientset = func() (clients *clientset.Clientset, err error) {
return client.New().Clientset()
}
}
if k.getClientsetForPath == nil {
k.getClientsetForPath = func(kubeConfigPath string) (
clients *clientset.Clientset, err error) {
return client.New(client.WithKubeConfigPath(kubeConfigPath)).Clientset()
}
}
if k.create == nil {
k.create = func(cli *clientset.Clientset,
namespace string, pod *corev1.Pod) (*corev1.Pod, error) {
return cli.CoreV1().Pods(namespace).Create(pod)
}
}
if k.list == nil {
k.list = func(cli *clientset.Clientset,
namespace string, opts metav1.ListOptions) (*corev1.PodList, error) {
return cli.CoreV1().Pods(namespace).List(opts)
}
}
if k.del == nil {
k.del = func(cli *clientset.Clientset, namespace,
name string, opts *metav1.DeleteOptions) error {
return cli.CoreV1().Pods(namespace).Delete(name, opts)
}
}
if k.get == nil {
k.get = func(cli *clientset.Clientset, namespace,
name string, opts metav1.GetOptions) (*corev1.Pod, error) {
return cli.CoreV1().Pods(namespace).Get(name, opts)
}
}
if k.delCollection == nil {
k.delCollection = func(cli *clientset.Clientset, namespace string,
listOpts metav1.ListOptions, deleteOpts *metav1.DeleteOptions) error {
return cli.CoreV1().Pods(namespace).DeleteCollection(deleteOpts, listOpts)
}
}
if k.exec == nil {
k.exec = defaultExec
}
}
// WithClientSet sets the kubernetes client against
// the KubeClient instance
func WithClientSet(c *clientset.Clientset) KubeClientBuildOption {
return func(k *KubeClient) {
k.clientset = c
}
}
// WithKubeConfigPath sets the kubeConfig path
// against client instance
func WithKubeConfigPath(path string) KubeClientBuildOption {
return func(k *KubeClient) {
k.kubeConfigPath = path
}
}
// NewKubeClient returns a new instance of KubeClient meant for
// cstor volume replica operations
func NewKubeClient(opts ...KubeClientBuildOption) *KubeClient {
k := &KubeClient{}
for _, o := range opts {
o(k)
}
k.withDefaults()
return k
}
// WithNamespace sets the kubernetes namespace against
// the provided namespace
func (k *KubeClient) WithNamespace(namespace string) *KubeClient {
k.namespace = namespace
return k
}
// WithKubeConfig sets the kubernetes config against
// the KubeClient instance
func (k *KubeClient) WithKubeConfig(config *rest.Config) *KubeClient {
k.kubeConfig = config
return k
}
func (k *KubeClient) getClientsetForPathOrDirect() (
*clientset.Clientset, error) {
if k.kubeConfigPath != "" {
return k.getClientsetForPath(k.kubeConfigPath)
}
return k.getClientset()
}
// getClientsetOrCached returns either a new instance
// of kubernetes client or its cached copy
func (k *KubeClient) getClientsetOrCached() (*clientset.Clientset, error) {
if k.clientset != nil {
return k.clientset, nil
}
cs, err := k.getClientsetForPathOrDirect()
if err != nil {
return nil, errors.Wrapf(err, "failed to get clientset")
}
k.clientset = cs
return k.clientset, nil
}
func (k *KubeClient) getKubeConfigForPathOrDirect() (*rest.Config, error) {
if k.kubeConfigPath != "" {
return k.getKubeConfigForPath(k.kubeConfigPath)
}
return k.getKubeConfig()
}
// getKubeConfigOrCached returns either a new instance
// of kubernetes config or its cached copy
func (k *KubeClient) getKubeConfigOrCached() (*rest.Config, error) {
if k.kubeConfig != nil {
return k.kubeConfig, nil
}
kc, err := k.getKubeConfigForPathOrDirect()
if err != nil {
return nil, errors.Wrapf(err, "failed to get kube config")
}
k.kubeConfig = kc
return k.kubeConfig, nil
}
// List returns a list of pod
// instances present in kubernetes cluster
func (k *KubeClient) List(opts metav1.ListOptions) (*corev1.PodList, error) {
cli, err := k.getClientsetOrCached()
if err != nil {
return nil, errors.Wrapf(err, "failed to list pods")
}
return k.list(cli, k.namespace, opts)
}
// Delete deletes a pod instance present in kubernetes cluster
func (k *KubeClient) Delete(name string, opts *metav1.DeleteOptions) error {
if len(name) == 0 {
return errors.New("failed to delete pod: missing pod name")
}
cli, err := k.getClientsetOrCached()
if err != nil {
return errors.Wrapf(
err,
"failed to delete pod {%s}: failed to get clientset",
name,
)
}
return k.del(cli, k.namespace, name, opts)
}
// Create creates a pod in specified namespace in kubernetes cluster
func (k *KubeClient) Create(pod *corev1.Pod) (*corev1.Pod, error) {
if pod == nil {
return nil, errors.New("failed to create pod: nil pod object")
}
cli, err := k.getClientsetOrCached()
if err != nil {
return nil, errors.Wrapf(
err,
"failed to create pod {%s} in namespace {%s}",
pod.Name,
pod.Namespace,
)
}
return k.create(cli, k.namespace, pod)
}
// Get gets a pod object present in kubernetes cluster
func (k *KubeClient) Get(name string,
opts metav1.GetOptions) (*corev1.Pod, error) {
if len(name) == 0 {
return nil, errors.New("failed to get pod: missing pod name")
}
cli, err := k.getClientsetOrCached()
if err != nil {
return nil, errors.Wrapf(
err,
"failed to get pod {%s}: failed to get clientset",
name,
)
}
return k.get(cli, k.namespace, name, opts)
}
// GetRaw gets pod object for a given name and namespace present
// in kubernetes cluster and returns result in raw byte.
func (k *KubeClient) GetRaw(name string,
opts metav1.GetOptions) ([]byte, error) {
p, err := k.Get(name, opts)
if err != nil {
return nil, err
}
return json.Marshal(p)
}
// Exec runs a command remotely in a container of a pod
func (k *KubeClient) Exec(name string,
opts *corev1.PodExecOptions) (*ExecOutput, error) {
cli, err := k.getClientsetOrCached()
if err != nil {
return nil, err
}
config, err := k.getKubeConfigOrCached()
if err != nil {
return nil, err
}
return k.exec(cli, config, name, k.namespace, opts)
}
// ExecRaw runs a command remotely in a container of a pod
// and returns raw output
func (k *KubeClient) ExecRaw(name string,
opts *corev1.PodExecOptions) ([]byte, error) {
execOutput, err := k.Exec(name, opts)
if err != nil {
return nil, err
}
return json.Marshal(execOutput)
}
// DeleteCollection deletes a collection of pod objects.
func (k *KubeClient) DeleteCollection(listOpts metav1.ListOptions, deleteOpts *metav1.DeleteOptions) error {
cli, err := k.getClientsetOrCached()
if err != nil {
return errors.Wrapf(err, "failed to delete the collection of pods")
}
return k.delCollection(cli, k.namespace, listOpts, deleteOpts)
}
| 1 | 17,306 | Are there other callers to this function. How will they be impacted. | openebs-maya | go |
@@ -23,7 +23,7 @@
Defines an interface which all Auth handlers need to implement.
"""
-from plugin import Plugin
+from .plugin import Plugin
class NotReadyToAuthenticate(Exception):
pass | 1 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Defines an interface which all Auth handlers need to implement.
"""
from plugin import Plugin
class NotReadyToAuthenticate(Exception):
pass
class AuthHandler(Plugin):
capability = []
def __init__(self, host, config, provider):
"""Constructs the handlers.
:type host: string
:param host: The host to which the request is being sent.
:type config: boto.pyami.Config
:param config: Boto configuration.
:type provider: boto.provider.Provider
:param provider: Provider details.
Raises:
NotReadyToAuthenticate: if this handler is not willing to
authenticate for the given provider and config.
"""
pass
def add_auth(self, http_request):
"""Invoked to add authentication details to request.
:type http_request: boto.connection.HTTPRequest
:param http_request: HTTP request that needs to be authenticated.
"""
pass
| 1 | 10,149 | Let's be consistent. The majority of boto code does not use relative imports. Let's just stick to the existing standard of "from boto.package.subpackage import Thing". | boto-boto | py |
@@ -207,6 +207,12 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
// CLI options defined by user at runtime.
// Options parsing is done with CLI library Picocli https://picocli.info/
+ @Option(
+ names = "--identity",
+ paramLabel = "<String>",
+ description = "Identification for this node in the Client ID")
+ private final Optional<String> identityString = Optional.empty();
+
// Completely disables P2P within Besu.
@Option(
names = {"--p2p-enabled"}, | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.Runner;
import org.hyperledger.besu.RunnerBuilder;
import org.hyperledger.besu.chainimport.RlpBlockImporter;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
import org.hyperledger.besu.cli.converter.PercentageConverter;
import org.hyperledger.besu.cli.converter.RpcApisConverter;
import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty;
import org.hyperledger.besu.cli.custom.JsonRPCWhitelistHostsProperty;
import org.hyperledger.besu.cli.custom.RpcAuthFileValidator;
import org.hyperledger.besu.cli.error.BesuExceptionHandler;
import org.hyperledger.besu.cli.options.EthProtocolOptions;
import org.hyperledger.besu.cli.options.MetricsCLIOptions;
import org.hyperledger.besu.cli.options.NetworkingOptions;
import org.hyperledger.besu.cli.options.SynchronizerOptions;
import org.hyperledger.besu.cli.options.TransactionPoolOptions;
import org.hyperledger.besu.cli.subcommands.PasswordSubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand.KeyLoader;
import org.hyperledger.besu.cli.subcommands.RetestethSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand.JsonBlockImporterFactory;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand.RlpBlockExporterFactory;
import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand;
import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand;
import org.hyperledger.besu.cli.util.BesuCommandCustomFactory;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.controller.BesuControllerBuilder;
import org.hyperledger.besu.controller.KeyPairUtil;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.worldstate.PruningConfiguration;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.StandardMetricCategory;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.metrics.prometheus.PrometheusMetricsSystem;
import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.BesuEvents;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.PicoCLIOptions;
import org.hyperledger.besu.plugin.services.StorageService;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin;
import org.hyperledger.besu.services.BesuConfigurationImpl;
import org.hyperledger.besu.services.BesuEventsImpl;
import org.hyperledger.besu.services.BesuPluginContextImpl;
import org.hyperledger.besu.services.PicoCLIOptionsImpl;
import org.hyperledger.besu.services.StorageServiceImpl;
import org.hyperledger.besu.util.NetworkUtility;
import org.hyperledger.besu.util.PermissioningConfigurationValidator;
import org.hyperledger.besu.util.bytes.BytesValue;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.PositiveNumber;
import org.hyperledger.besu.util.uint.UInt256;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Resources;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.json.DecodeException;
import io.vertx.core.metrics.MetricsOptions;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Configurator;
import picocli.CommandLine;
import picocli.CommandLine.AbstractParseResultHandler;
import picocli.CommandLine.Command;
import picocli.CommandLine.ExecutionException;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParameterException;
@SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives
@Command(
description = "This command runs the Besu Ethereum client full node.",
abbreviateSynopsis = true,
name = "besu",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
header = "Usage:",
synopsisHeading = "%n",
descriptionHeading = "%nDescription:%n%n",
optionListHeading = "%nOptions:%n",
footerHeading = "%n",
footer = "Besu is licensed under the Apache License 2.0")
public class BesuCommand implements DefaultCommandValues, Runnable {
private final Logger logger;
private CommandLine commandLine;
private final RlpBlockImporter rlpBlockImporter;
private final JsonBlockImporterFactory jsonBlockImporterFactory;
private final RlpBlockExporterFactory rlpBlockExporterFactory;
final NetworkingOptions networkingOptions = NetworkingOptions.create();
final SynchronizerOptions synchronizerOptions = SynchronizerOptions.create();
final EthProtocolOptions ethProtocolOptions = EthProtocolOptions.create();
final MetricsCLIOptions metricsCLIOptions = MetricsCLIOptions.create();
final TransactionPoolOptions transactionPoolOptions = TransactionPoolOptions.create();
private final RunnerBuilder runnerBuilder;
private final BesuController.Builder controllerBuilderFactory;
private final BesuPluginContextImpl besuPluginContext;
private final StorageServiceImpl storageService;
private final Map<String, String> environment;
private final MetricCategoryRegistryImpl metricCategoryRegistry =
new MetricCategoryRegistryImpl();
private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter();
protected KeyLoader getKeyLoader() {
return KeyPairUtil::loadKeyPair;
}
// Public IP stored to prevent having to research it each time we need it.
private InetAddress autoDiscoveredDefaultIP = null;
// Property to indicate whether Besu has been launched via docker
private final boolean isDocker = Boolean.getBoolean("besu.docker");
// CLI options defined by user at runtime.
// Options parsing is done with CLI library Picocli https://picocli.info/
// Completely disables P2P within Besu.
@Option(
names = {"--p2p-enabled"},
description = "Enable P2P functionality (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean p2pEnabled = true;
// Boolean option to indicate if peers should NOT be discovered, default to
// false indicates that
// the peers should be discovered by default.
//
// This negative option is required because of the nature of the option that is
// true when
// added on the command line. You can't do --option=false, so false is set as
// default
// and you have not to set the option at all if you want it false.
// This seems to be the only way it works with Picocli.
// Also many other software use the same negative option scheme for false
// defaults
// meaning that it's probably the right way to handle disabling options.
@Option(
names = {"--discovery-enabled"},
description = "Enable P2P peer discovery (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean peerDiscoveryEnabled = true;
// A list of bootstrap nodes can be passed
// and a hardcoded list will be used otherwise by the Runner.
// NOTE: we have no control over default value here.
@Option(
names = {"--bootnodes"},
paramLabel = "<enode://id@host:port>",
description =
"Comma separated enode URLs for P2P discovery bootstrap. "
+ "Default is a predefined list.",
split = ",",
arity = "0..*")
void setBootnodes(final List<String> values) {
try {
bootNodes =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURL::fromString)
.collect(Collectors.toList());
DiscoveryConfiguration.assertValidBootnodes(bootNodes);
} catch (final IllegalArgumentException e) {
throw new ParameterException(commandLine, e.getMessage());
}
}
private List<EnodeURL> bootNodes = null;
@Option(
names = {"--max-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum P2P peer connections that can be established (default: ${DEFAULT-VALUE})")
private final Integer maxPeers = DEFAULT_MAX_PEERS;
@Option(
names = {"--remote-connections-limit-enabled"},
description =
"Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})")
private final Boolean isLimitRemoteWireConnectionsEnabled = true;
@Option(
names = {"--remote-connections-max-percentage"},
paramLabel = MANDATORY_DOUBLE_FORMAT_HELP,
description =
"The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})",
arity = "1",
converter = PercentageConverter.class)
private final Integer maxRemoteConnectionsPercentage =
Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED)
.toPercentage()
.getValue();
@Option(
names = {"--banned-node-ids", "--banned-node-id"},
paramLabel = MANDATORY_NODE_ID_FORMAT_HELP,
description = "A list of node IDs to ban from the P2P network.",
split = ",",
arity = "1..*")
void setBannedNodeIds(final List<String> values) {
try {
bannedNodeIds =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURL::parseNodeId)
.collect(Collectors.toList());
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage());
}
}
private Collection<BytesValue> bannedNodeIds = new ArrayList<>();
@Option(
names = {"--sync-mode"},
paramLabel = MANDATORY_MODE_FORMAT_HELP,
description =
"Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: ${DEFAULT-VALUE})")
private final SyncMode syncMode = DEFAULT_SYNC_MODE;
@Option(
names = {"--fast-sync-min-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})")
private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT;
@Option(
names = {"--network"},
paramLabel = MANDATORY_NETWORK_FORMAT_HELP,
description =
"Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}."
+ " (default: MAINNET)")
private final NetworkName network = null;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--p2p-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--p2p-interface"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description =
"The network interface address on which this node listens for p2p communication (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pInterface = NetworkUtility.INADDR_ANY;
@Option(
names = {"--p2p-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port on which to listen for p2p communication (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer p2pPort = EnodeURL.DEFAULT_LISTENING_PORT;
@Option(
names = {"--nat-method"},
description =
"Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}."
+ " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})")
private final NatMethod natMethod = DEFAULT_NAT_METHOD;
@Option(
names = {"--network-id"},
paramLabel = "<BIG INTEGER>",
description =
"P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)",
arity = "1")
private final BigInteger networkId = null;
@Option(
names = {"--graphql-http-enabled"},
description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isGraphQLHttpEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--graphql-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--graphql-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT;
@Option(
names = {"--graphql-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-enabled"},
description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--rpc-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT;
// A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS)
@Option(
names = {"--rpc-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-api", "--rpc-http-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-http-authentication-enabled"},
description =
"Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpAuthenticationEnabled = false;
@Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--rpc-ws-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-ws-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@Option(
names = {"--metrics-enabled"},
description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--metrics-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPort = DEFAULT_METRICS_PORT;
@Option(
names = {"--metrics-category", "--metrics-categories"},
paramLabel = "<category name>",
split = ",",
arity = "1..*",
description =
"Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})")
private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES;
@Option(
names = {"--metrics-push-enabled"},
description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsPushEnabled = false;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--metrics-push-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-push-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT;
@Option(
names = {"--metrics-push-interval"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushInterval = 15;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--metrics-push-prometheus-job"},
description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPrometheusJob = "besu-client";
@Option(
names = {"--host-whitelist"},
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Comma separated list of hostnames to whitelist for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})",
defaultValue = "localhost,127.0.0.1")
private final JsonRPCWhitelistHostsProperty hostsWhitelist = new JsonRPCWhitelistHostsProperty();
@Option(
names = {"--logging", "-l"},
paramLabel = "<LOG VERBOSITY LEVEL>",
description =
"Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL (default: ${DEFAULT-VALUE})")
private final Level logLevel = LogManager.getRootLogger().getLevel();
@Option(
names = {"--miner-enabled"},
description = "Set if node will perform mining (default: ${DEFAULT-VALUE})")
private final Boolean isMiningEnabled = false;
@Option(
names = {"--miner-coinbase"},
description =
"Account to which mining rewards are paid. You must specify a valid coinbase if "
+ "mining is enabled using --miner-enabled option",
arity = "1")
private final Address coinbase = null;
@Option(
names = {"--min-gas-price"},
description =
"Minimum price (in Wei) offered by a transaction for it to be included in a mined "
+ "block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE;
@Option(
names = {"--miner-extra-data"},
description =
"A hex string representing the (32) bytes to be included in the extra data "
+ "field of a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final BytesValue extraData = DEFAULT_EXTRA_DATA;
@Option(
names = {"--pruning-enabled"},
hidden = true,
description =
"Enable pruning of world state of blocks older than the retention period (default: ${DEFAULT-VALUE})")
private final Boolean isPruningEnabled = false;
@Option(
names = {"--pruning-blocks-retained"},
hidden = true,
description =
"Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})",
arity = "1")
private final Long pruningBlocksRetained = DEFAULT_PRUNING_BLOCKS_RETAINED;
@Option(
names = {"--pruning-block-confirmations"},
hidden = true,
description =
"Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})",
arity = "1")
private final Long pruningBlockConfirmations = DEFAULT_PRUNING_BLOCK_CONFIRMATIONS;
@Option(
names = {"--permissions-nodes-config-file-enabled"},
description = "Enable node level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesEnabled = false;
@Option(
names = {"--permissions-accounts-config-file-enabled"},
description = "Enable account level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsEnabled = false;
@Option(
names = {"--permissions-nodes-contract-address"},
description = "Address of the node permissioning smart contract",
arity = "1")
private final Address permissionsNodesContractAddress = null;
@Option(
names = {"--permissions-nodes-contract-enabled"},
description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesContractEnabled = false;
@Option(
names = {"--permissions-accounts-contract-address"},
description = "Address of the account permissioning smart contract",
arity = "1")
private final Address permissionsAccountsContractAddress = null;
@Option(
names = {"--permissions-accounts-contract-enabled"},
description =
"Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsContractEnabled = false;
@Option(
names = {"--privacy-enabled"},
description = "Enable private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyEnabled = false;
@Option(
names = {"--revert-reason-enabled"},
description =
"Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})")
private final Boolean isRevertReasonEnabled = false;
@Option(
names = {"--required-blocks", "--required-block"},
paramLabel = "BLOCK=HASH",
description = "Block number and hash peers are required to have.",
arity = "*",
split = ",")
private final Map<Long, Hash> requiredBlocks = new HashMap<>();
@Option(
names = {"--privacy-url"},
description = "The URL on which the enclave is running")
private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL;
@Option(
names = {"--privacy-precompiled-address"},
description =
"The address to which the privacy pre-compiled contract will be mapped to (default: ${DEFAULT-VALUE})")
private final Integer privacyPrecompiledAddress = Address.PRIVACY;
@Option(
names = {"--privacy-marker-transaction-signing-key-file"},
description =
"The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.")
private final Path privacyMarkerTransactionSigningKeyPath = null;
@Option(
names = {"--target-gas-limit"},
description =
"Sets target gas limit per block. If set each blocks gas limit will approach this setting over time if the current gas limit is different.")
private final Long targetGasLimit = null;
@Option(
names = {"--tx-pool-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS;
@Option(
names = {"--tx-pool-retention-hours"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pendingTxRetentionPeriod =
TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS;
@SuppressWarnings("FieldMayBeFinal") // Because PicoCLI requires Strings to not be final.
@Option(
names = {"--key-value-storage"},
description = "Identity for the key-value storage to be used.",
arity = "1")
private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME;
@Option(
names = {"--override-genesis-config"},
paramLabel = "NAME=VALUE",
description = "Overrides configuration values in the genesis file. Use with care.",
arity = "*",
hidden = true,
split = ",")
private final Map<String, String> genesisConfigOverrides =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
private EthNetworkConfig ethNetworkConfig;
private JsonRpcConfiguration jsonRpcConfiguration;
private GraphQLConfiguration graphQLConfiguration;
private WebSocketConfiguration webSocketConfiguration;
private MetricsConfiguration metricsConfiguration;
private Optional<PermissioningConfiguration> permissioningConfiguration;
private Collection<EnodeURL> staticNodes;
private BesuController<?> besuController;
private StandaloneCommand standaloneCommands;
private BesuConfiguration pluginCommonConfiguration;
private final Supplier<ObservableMetricsSystem> metricsSystem =
Suppliers.memoize(() -> PrometheusMetricsSystem.init(metricsConfiguration()));
public BesuCommand(
final Logger logger,
final RlpBlockImporter rlpBlockImporter,
final JsonBlockImporterFactory jsonBlockImporterFactory,
final RlpBlockExporterFactory rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment) {
this(
logger,
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
runnerBuilder,
controllerBuilderFactory,
besuPluginContext,
environment,
new StorageServiceImpl());
}
@VisibleForTesting
protected BesuCommand(
final Logger logger,
final RlpBlockImporter rlpBlockImporter,
final JsonBlockImporterFactory jsonBlockImporterFactory,
final RlpBlockExporterFactory rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment,
final StorageServiceImpl storageService) {
this.logger = logger;
this.rlpBlockImporter = rlpBlockImporter;
this.rlpBlockExporterFactory = rlpBlockExporterFactory;
this.jsonBlockImporterFactory = jsonBlockImporterFactory;
this.runnerBuilder = runnerBuilder;
this.controllerBuilderFactory = controllerBuilderFactory;
this.besuPluginContext = besuPluginContext;
this.environment = environment;
this.storageService = storageService;
}
public void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final InputStream in,
final String... args) {
commandLine =
new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext))
.setCaseInsensitiveEnumValuesAllowed(true);
handleStandaloneCommand()
.addSubCommands(resultHandler, in)
.registerConverters()
.handleUnstableOptions()
.preparePlugins()
.parse(resultHandler, exceptionHandler, args);
}
@Override
public void run() {
try {
prepareLogging();
logger.info("Starting Besu version: {}", BesuInfo.version());
validateOptions().configure().controller().startPlugins().startSynchronization();
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage(), e);
}
}
private void addConfigurationService() {
if (pluginCommonConfiguration == null) {
final Path dataDir = dataDir();
pluginCommonConfiguration =
new BesuConfigurationImpl(dataDir, dataDir.resolve(DATABASE_PATH));
besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration);
}
}
@VisibleForTesting
void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) {
this.pluginCommonConfiguration = pluginCommonConfiguration;
}
private BesuCommand handleStandaloneCommand() {
standaloneCommands = new StandaloneCommand();
if (isFullInstantiation()) {
commandLine.addMixin("standaloneCommands", standaloneCommands);
}
return this;
}
private BesuCommand addSubCommands(
final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) {
commandLine.addSubcommand(
BlocksSubCommand.COMMAND_NAME,
new BlocksSubCommand(
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
resultHandler.out()));
commandLine.addSubcommand(
PublicKeySubCommand.COMMAND_NAME,
new PublicKeySubCommand(resultHandler.out(), getKeyLoader()));
commandLine.addSubcommand(
PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out()));
commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand());
commandLine.addSubcommand(
RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in));
commandLine.addSubcommand(
OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out()));
return this;
}
private BesuCommand registerConverters() {
commandLine.registerConverter(Address.class, Address::fromHexStringStrict);
commandLine.registerConverter(BytesValue.class, BytesValue::fromHexString);
commandLine.registerConverter(Level.class, Level::valueOf);
commandLine.registerConverter(SyncMode.class, SyncMode::fromString);
commandLine.registerConverter(UInt256.class, (arg) -> UInt256.of(new BigInteger(arg)));
commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg)));
commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString);
commandLine.registerConverter(Hash.class, Hash::fromHexString);
metricCategoryConverter.addCategories(BesuMetricCategory.class);
metricCategoryConverter.addCategories(StandardMetricCategory.class);
commandLine.registerConverter(MetricCategory.class, metricCategoryConverter);
return this;
}
private BesuCommand handleUnstableOptions() {
// Add unstable options
final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder();
final ImmutableMap<String, Object> unstableOptions =
unstableOptionsBuild
.put("Ethereum Wire Protocol", ethProtocolOptions)
.put("Metrics", metricsCLIOptions)
.put("P2P Network", networkingOptions)
.put("Synchronizer", synchronizerOptions)
.put("TransactionPool", transactionPoolOptions)
.build();
UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions);
return this;
}
private BesuCommand preparePlugins() {
besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine));
besuPluginContext.addService(StorageService.class, storageService);
besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry);
// register built-in plugins
new RocksDBPlugin().register(besuPluginContext);
besuPluginContext.registerPlugins(pluginsDir());
metricCategoryRegistry
.getMetricCategories()
.forEach(metricCategoryConverter::addRegistryCategory);
return this;
}
private void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final String... args) {
// Create a handler that will search for a config file option and use it for
// default values
// and eventually it will run regular parsing of the remaining options.
final ConfigOptionSearchAndRunHandler configParsingHandler =
new ConfigOptionSearchAndRunHandler(
resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment, isDocker);
commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args);
}
private void startSynchronization() {
synchronize(
besuController,
p2pEnabled,
peerDiscoveryEnabled,
ethNetworkConfig,
maxPeers,
p2pHost,
p2pInterface,
p2pPort,
graphQLConfiguration,
jsonRpcConfiguration,
webSocketConfiguration,
metricsConfiguration,
permissioningConfiguration,
staticNodes);
}
private BesuCommand startPlugins() {
besuPluginContext.addService(
BesuEvents.class,
new BesuEventsImpl(
besuController.getProtocolManager().getBlockBroadcaster(),
besuController.getTransactionPool(),
besuController.getSyncState()));
besuPluginContext.addService(MetricsSystem.class, getMetricsSystem());
besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext);
besuPluginContext.startPlugins();
return this;
}
private void prepareLogging() {
// set log level per CLI flags
if (logLevel != null) {
System.out.println("Setting logging level to " + logLevel.name());
Configurator.setAllLevels("", logLevel);
}
}
private BesuCommand validateOptions() {
issueOptionWarnings();
validateP2PInterface(p2pInterface);
validateMiningParams();
return this;
}
private void validateMiningParams() {
// noinspection ConstantConditions
if (isMiningEnabled && coinbase == null) {
throw new ParameterException(
this.commandLine,
"Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled)"
+ "or specify the beneficiary of mining (via --miner-coinbase <Address>)");
}
}
protected void validateP2PInterface(final String p2pInterface) {
final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface;
try {
if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) {
throw new ParameterException(commandLine, failMessage);
}
} catch (final UnknownHostException | SocketException e) {
throw new ParameterException(commandLine, failMessage, e);
}
}
private void issueOptionWarnings() {
// Check that P2P options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--p2p-enabled",
!p2pEnabled,
asList(
"--bootnodes",
"--discovery-enabled",
"--max-peers",
"--banned-node-id",
"--banned-node-ids",
"--p2p-host",
"--p2p-interface",
"--p2p-port",
"--remote-connections-max-percentage"));
// Check that mining options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--miner-enabled",
!isMiningEnabled,
asList("--miner-coinbase", "--min-gas-price", "--miner-extra-data"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--sync-mode",
!SyncMode.FAST.equals(syncMode),
singletonList("--fast-sync-min-peers"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--pruning-enabled",
!isPruningEnabled,
asList("--pruning-block-confirmations", "--pruning-blocks-retained"));
}
private BesuCommand configure() throws Exception {
ethNetworkConfig = updateNetworkConfig(getNetwork());
jsonRpcConfiguration = jsonRpcConfiguration();
graphQLConfiguration = graphQLConfiguration();
webSocketConfiguration = webSocketConfiguration();
permissioningConfiguration = permissioningConfiguration();
staticNodes = loadStaticNodes();
logger.info("Connecting to {} static nodes.", staticNodes.size());
logger.trace("Static Nodes = {}", staticNodes);
final List<URI> enodeURIs =
ethNetworkConfig.getBootNodes().stream().map(EnodeURL::toURI).collect(Collectors.toList());
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInWhitelist(enodeURIs, p));
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(
p ->
ensureAllNodesAreInWhitelist(
staticNodes.stream().map(EnodeURL::toURI).collect(Collectors.toList()), p));
metricsConfiguration = metricsConfiguration();
return this;
}
private NetworkName getNetwork() {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
return network == null ? MAINNET : network;
}
private void ensureAllNodesAreInWhitelist(
final Collection<URI> enodeAddresses,
final LocalPermissioningConfiguration permissioningConfiguration) {
try {
PermissioningConfigurationValidator.areAllNodesAreInWhitelist(
enodeAddresses, permissioningConfiguration);
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage());
}
}
private BesuCommand controller() {
besuController = buildController();
return this;
}
public BesuController<?> buildController() {
try {
return getControllerBuilder().build();
} catch (final Exception e) {
throw new ExecutionException(this.commandLine, e.getMessage(), e);
}
}
public BesuControllerBuilder<?> getControllerBuilder() {
try {
addConfigurationService();
return controllerBuilderFactory
.fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides)
.synchronizerConfiguration(buildSyncConfig())
.ethProtocolConfiguration(ethProtocolOptions.toDomainObject())
.dataDirectory(dataDir())
.miningParameters(
new MiningParameters(coinbase, minTransactionGasPrice, extraData, isMiningEnabled))
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
.nodePrivateKeyFile(nodePrivateKeyFile())
.metricsSystem(metricsSystem.get())
.privacyParameters(privacyParameters())
.clock(Clock.systemUTC())
.isRevertReasonEnabled(isRevertReasonEnabled)
.storageProvider(keyStorageProvider(keyValueStorageName))
.isPruningEnabled(isPruningEnabled)
.pruningConfiguration(buildPruningConfiguration())
.genesisConfigOverrides(genesisConfigOverrides)
.targetGasLimit(targetGasLimit == null ? Optional.empty() : Optional.of(targetGasLimit))
.requiredBlocks(requiredBlocks);
} catch (final IOException e) {
throw new ExecutionException(this.commandLine, "Invalid path", e);
}
}
private GraphQLConfiguration graphQLConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--graphql-http-enabled",
!isRpcHttpEnabled,
asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port"));
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(isGraphQLHttpEnabled);
graphQLConfiguration.setHost(graphQLHttpHost);
graphQLConfiguration.setPort(graphQLHttpPort);
graphQLConfiguration.setHostsWhitelist(hostsWhitelist);
graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins);
return graphQLConfiguration;
}
private JsonRpcConfiguration jsonRpcConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-enabled",
!isRpcHttpEnabled,
asList(
"--rpc-http-api",
"--rpc-http-apis",
"--rpc-http-cors-origins",
"--rpc-http-host",
"--rpc-http-port",
"--rpc-http-authentication-enabled",
"--rpc-http-authentication-credentials-file"));
if (isRpcHttpAuthenticationEnabled && rpcHttpAuthenticationCredentialsFile() == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file");
}
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(isRpcHttpEnabled);
jsonRpcConfiguration.setHost(rpcHttpHost);
jsonRpcConfiguration.setPort(rpcHttpPort);
jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins);
jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList()));
jsonRpcConfiguration.setHostsWhitelist(hostsWhitelist);
jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled);
jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile());
return jsonRpcConfiguration;
}
private WebSocketConfiguration webSocketConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!isRpcWsEnabled,
asList(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file"));
if (isRpcWsAuthenticationEnabled && rpcWsAuthenticationCredentialsFile() == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file");
}
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(isRpcWsEnabled);
webSocketConfiguration.setHost(rpcWsHost);
webSocketConfiguration.setPort(rpcWsPort);
webSocketConfiguration.setRpcApis(rpcWsApis);
webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile());
webSocketConfiguration.setHostsWhitelist(hostsWhitelist);
return webSocketConfiguration;
}
public MetricsConfiguration metricsConfiguration() {
if (isMetricsEnabled && isMetricsPushEnabled) {
throw new ParameterException(
this.commandLine,
"--metrics-enabled option and --metrics-push-enabled option can't be used at the same "
+ "time. Please refer to CLI reference for more details about this constraint.");
}
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-enabled",
!isMetricsEnabled,
asList("--metrics-host", "--metrics-port"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-push-enabled",
!isMetricsPushEnabled,
asList(
"--metrics-push-host",
"--metrics-push-port",
"--metrics-push-interval",
"--metrics-push-prometheus-job"));
return metricsCLIOptions
.toDomainObject()
.enabled(isMetricsEnabled)
.host(metricsHost)
.port(metricsPort)
.metricCategories(metricCategories)
.pushEnabled(isMetricsPushEnabled)
.pushHost(metricsPushHost)
.pushPort(metricsPushPort)
.pushInterval(metricsPushInterval)
.hostsWhitelist(hostsWhitelist)
.prometheusJob(metricsPrometheusJob)
.build();
}
private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception {
if (!(localPermissionsEnabled() || contractPermissionsEnabled())) {
if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) {
logger.warn(
"Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
}
return Optional.empty();
}
final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional;
if (localPermissionsEnabled()) {
final Optional<String> nodePermissioningConfigFile =
Optional.ofNullable(nodePermissionsConfigFile());
final Optional<String> accountPermissioningConfigFile =
Optional.ofNullable(accountsPermissionsConfigFile());
final LocalPermissioningConfiguration localPermissioningConfiguration =
PermissioningConfigurationBuilder.permissioningConfiguration(
permissionsNodesEnabled,
nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()),
permissionsAccountsEnabled,
accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath()));
localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration);
} else {
if (nodePermissionsConfigFile() != null && !permissionsNodesEnabled) {
logger.warn(
"Node permissioning config file set {} but no permissions enabled",
nodePermissionsConfigFile());
}
if (accountsPermissionsConfigFile() != null && !permissionsAccountsEnabled) {
logger.warn(
"Account permissioning config file set {} but no permissions enabled",
accountsPermissionsConfigFile());
}
localPermissioningConfigurationOptional = Optional.empty();
}
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
SmartContractPermissioningConfiguration.createDefault();
if (permissionsNodesContractEnabled) {
if (permissionsNodesContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No node permissioning contract address specified. Cannot enable smart contract based node permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractNodeWhitelistEnabled(
permissionsNodesContractEnabled);
smartContractPermissioningConfiguration.setNodeSmartContractAddress(
permissionsNodesContractAddress);
}
} else if (permissionsNodesContractAddress != null) {
logger.warn(
"Node permissioning smart contract address set {} but smart contract node permissioning is disabled.",
permissionsNodesContractAddress);
}
if (permissionsAccountsContractEnabled) {
if (permissionsAccountsContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No account permissioning contract address specified. Cannot enable smart contract based account permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractAccountWhitelistEnabled(
permissionsAccountsContractEnabled);
smartContractPermissioningConfiguration.setAccountSmartContractAddress(
permissionsAccountsContractAddress);
}
} else if (permissionsAccountsContractAddress != null) {
logger.warn(
"Account permissioning smart contract address set {} but smart contract account permissioning is disabled.",
permissionsAccountsContractAddress);
}
final PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
localPermissioningConfigurationOptional,
Optional.of(smartContractPermissioningConfiguration));
return Optional.of(permissioningConfiguration);
}
private boolean localPermissionsEnabled() {
return permissionsAccountsEnabled || permissionsNodesEnabled;
}
private boolean contractPermissionsEnabled() {
return permissionsNodesContractEnabled || permissionsAccountsContractEnabled;
}
private PrivacyParameters privacyParameters() throws IOException {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-enabled",
!isPrivacyEnabled,
asList("--privacy-url", "--privacy-public-key-file", "--privacy-precompiled-address"));
final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder();
if (isPrivacyEnabled) {
privacyParametersBuilder.setEnabled(true);
privacyParametersBuilder.setEnclaveUrl(privacyUrl);
if (privacyPublicKeyFile() != null) {
privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile());
} else {
throw new ParameterException(
commandLine, "Please specify Enclave public key file path to enable privacy");
}
privacyParametersBuilder.setPrivacyAddress(privacyPrecompiledAddress);
privacyParametersBuilder.setPrivateKeyPath(privacyMarkerTransactionSigningKeyPath);
privacyParametersBuilder.setStorageProvider(
keyStorageProvider(keyValueStorageName + "-privacy"));
}
return privacyParametersBuilder.build();
}
private KeyValueStorageProvider keyStorageProvider(final String name) {
return new KeyValueStorageProviderBuilder()
.withStorageFactory(
storageService
.getByName(name)
.orElseThrow(
() -> new StorageException("No KeyValueStorageFactory found for key: " + name)))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private SynchronizerConfiguration buildSyncConfig() {
return synchronizerOptions
.toDomainObject()
.syncMode(syncMode)
.fastSyncMinimumPeerCount(fastSyncMinPeerCount)
.build();
}
private TransactionPoolConfiguration buildTransactionPoolConfiguration() {
return transactionPoolOptions
.toDomainObject()
.txPoolMaxSize(txPoolMaxSize)
.pendingTxRetentionPeriod(pendingTxRetentionPeriod)
.build();
}
private PruningConfiguration buildPruningConfiguration() {
return new PruningConfiguration(pruningBlockConfirmations, pruningBlocksRetained);
}
// Blockchain synchronisation from peers.
private void synchronize(
final BesuController<?> controller,
final boolean p2pEnabled,
final boolean peerDiscoveryEnabled,
final EthNetworkConfig ethNetworkConfig,
final int maxPeers,
final String p2pAdvertisedHost,
final String p2pListenInterface,
final int p2pListenPort,
final GraphQLConfiguration graphQLConfiguration,
final JsonRpcConfiguration jsonRpcConfiguration,
final WebSocketConfiguration webSocketConfiguration,
final MetricsConfiguration metricsConfiguration,
final Optional<PermissioningConfiguration> permissioningConfiguration,
final Collection<EnodeURL> staticNodes) {
checkNotNull(runnerBuilder);
permissioningConfiguration.ifPresent(runnerBuilder::permissioningConfiguration);
final ObservableMetricsSystem metricsSystem = this.metricsSystem.get();
final Runner runner =
runnerBuilder
.vertx(Vertx.vertx(createVertxOptions(metricsSystem)))
.besuController(controller)
.p2pEnabled(p2pEnabled)
.natMethod(natMethod)
.discovery(peerDiscoveryEnabled)
.ethNetworkConfig(ethNetworkConfig)
.p2pAdvertisedHost(p2pAdvertisedHost)
.p2pListenInterface(p2pListenInterface)
.p2pListenPort(p2pListenPort)
.maxPeers(maxPeers)
.limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled)
.fractionRemoteConnectionsAllowed(
Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue())
.networkingConfiguration(networkingOptions.toDomainObject())
.graphQLConfiguration(graphQLConfiguration)
.jsonRpcConfiguration(jsonRpcConfiguration)
.webSocketConfiguration(webSocketConfiguration)
.dataDir(dataDir())
.bannedNodeIds(bannedNodeIds)
.metricsSystem(metricsSystem)
.metricsConfiguration(metricsConfiguration)
.staticNodes(staticNodes)
.build();
addShutdownHook(runner);
runner.start();
runner.awaitStop();
}
private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) {
return new VertxOptions()
.setMetricsOptions(
new MetricsOptions()
.setEnabled(true)
.setFactory(new VertxMetricsAdapterFactory(metricsSystem)));
}
private void addShutdownHook(final Runner runner) {
Runtime.getRuntime()
.addShutdownHook(
new Thread(
() -> {
try {
besuPluginContext.stopPlugins();
runner.close();
LogManager.shutdown();
} catch (final Exception e) {
logger.error("Failed to stop Besu");
}
}));
}
// Used to discover the default IP of the client.
// Loopback IP is used by default as this is how smokeTests require it to be
// and it's probably a good security behaviour to default only on the localhost.
private InetAddress autoDiscoverDefaultIP() {
if (autoDiscoveredDefaultIP != null) {
return autoDiscoveredDefaultIP;
}
autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress();
return autoDiscoveredDefaultIP;
}
private EthNetworkConfig updateNetworkConfig(final NetworkName network) {
final EthNetworkConfig.Builder builder =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network));
// custom genesis file use comes with specific default values for the genesis
// file itself
// but also for the network id and the bootnodes list.
final File genesisFile = genesisFile();
if (genesisFile != null) {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
if (this.network != null) {
// We check if network option was really provided by user and not only looking
// at the
// default value.
// if user provided it and provided the genesis file option at the same time, it
// raises a
// conflict error
throw new ParameterException(
this.commandLine,
"--network option and --genesis-file option can't be used at the same time. Please "
+ "refer to CLI reference for more details about this constraint.");
}
builder.setGenesisConfig(genesisConfig());
if (networkId == null) {
// if no network id option is defined on the CLI we have to set a default value
// from the
// genesis file.
// We do the genesis parsing only in this case as we already have network id
// constants
// for known networks to speed up the process.
// Also we have to parse the genesis as we don't already have a parsed version
// at this
// stage.
// If no chain id is found in the genesis as it's an optional, we use mainnet
// network id.
try {
final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig());
builder.setNetworkId(
genesisConfigFile
.getConfigOptions(genesisConfigOverrides)
.getChainId()
.orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId()));
} catch (final DecodeException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e);
} catch (final ArithmeticException e) {
throw new ParameterException(
this.commandLine,
"No networkId specified and chainId in "
+ "genesis file is too large to be used as a networkId");
}
}
if (bootNodes == null) {
// We default to an empty bootnodes list if the option is not provided on CLI
// because
// mainnet bootnodes won't work as the default value for a custom genesis,
// so it's better to have an empty list as default value that forces to create a
// custom one
// than a useless one that may make user think that it can work when it can't.
builder.setBootNodes(new ArrayList<>());
}
}
if (networkId != null) {
builder.setNetworkId(networkId);
}
if (bootNodes != null) {
builder.setBootNodes(bootNodes);
}
return builder.build();
}
private String genesisConfig() {
try {
return Resources.toString(genesisFile().toURI().toURL(), UTF_8);
} catch (final IOException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to load genesis file %s.", genesisFile()), e);
}
}
private File genesisFile() {
if (isFullInstantiation()) {
return standaloneCommands.genesisFile;
} else if (isDocker) {
final File genesisFile = new File(DOCKER_GENESIS_LOCATION);
if (genesisFile.exists()) {
return genesisFile;
} else {
return null;
}
} else {
return null;
}
}
public Path dataDir() {
if (isFullInstantiation()) {
return standaloneCommands.dataPath.toAbsolutePath();
} else if (isDocker) {
return Paths.get(DOCKER_DATADIR_LOCATION);
} else {
return getDefaultBesuDataPath(this);
}
}
private Path pluginsDir() {
if (isFullInstantiation()) {
final String pluginsDir = System.getProperty("besu.plugins.dir");
if (pluginsDir == null) {
return new File(System.getProperty("besu.home", "."), "plugins").toPath();
} else {
return new File(pluginsDir).toPath();
}
} else if (isDocker) {
return Paths.get(DOCKER_PLUGINSDIR_LOCATION);
} else {
return null; // null means no plugins
}
}
public File nodePrivateKeyFile() {
File nodePrivateKeyFile = null;
if (isFullInstantiation()) {
nodePrivateKeyFile = standaloneCommands.nodePrivateKeyFile;
}
return nodePrivateKeyFile != null
? nodePrivateKeyFile
: KeyPairUtil.getDefaultKeyFile(dataDir());
}
private File privacyPublicKeyFile() {
if (isDocker) {
final File keyFile = new File(DOCKER_PRIVACY_PUBLIC_KEY_FILE);
if (keyFile.exists()) {
return keyFile;
} else {
return null;
}
} else {
return standaloneCommands.privacyPublicKeyFile;
}
}
private String rpcHttpAuthenticationCredentialsFile() {
String filename = null;
if (isFullInstantiation()) {
filename = standaloneCommands.rpcHttpAuthenticationCredentialsFile;
} else if (isDocker) {
final File authFile = new File(DOCKER_RPC_HTTP_AUTHENTICATION_CREDENTIALS_FILE_LOCATION);
if (authFile.exists()) {
filename = authFile.getAbsolutePath();
}
}
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "HTTP");
}
return filename;
}
private String rpcWsAuthenticationCredentialsFile() {
String filename = null;
if (isFullInstantiation()) {
filename = standaloneCommands.rpcWsAuthenticationCredentialsFile;
} else if (isDocker) {
final File authFile = new File(DOCKER_RPC_WS_AUTHENTICATION_CREDENTIALS_FILE_LOCATION);
if (authFile.exists()) {
filename = authFile.getAbsolutePath();
}
}
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
private String nodePermissionsConfigFile() {
return permissionsConfigFile(standaloneCommands.nodePermissionsConfigFile);
}
private String accountsPermissionsConfigFile() {
return permissionsConfigFile(standaloneCommands.accountPermissionsConfigFile);
}
private String permissionsConfigFile(final String permissioningFilename) {
String filename = null;
if (isFullInstantiation()) {
filename = permissioningFilename;
} else if (isDocker) {
final File file = new File(DOCKER_PERMISSIONS_CONFIG_FILE_LOCATION);
if (file.exists()) {
filename = file.getAbsolutePath();
}
}
return filename;
}
private String getDefaultPermissioningFilePath() {
return dataDir().toAbsolutePath()
+ System.getProperty("file.separator")
+ DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION;
}
private boolean isFullInstantiation() {
return !isDocker;
}
public MetricsSystem getMetricsSystem() {
return metricsSystem.get();
}
private Set<EnodeURL> loadStaticNodes() throws IOException {
final String staticNodesFilename = "static-nodes.json";
final Path staticNodesPath = dataDir().resolve(staticNodesFilename);
return StaticNodesParser.fromPath(staticNodesPath);
}
public BesuExceptionHandler exceptionHandler() {
return new BesuExceptionHandler(this::getLogLevel);
}
private Level getLogLevel() {
return logLevel;
}
}
| 1 | 20,300 | Suggestion: specify `arity` for this option. | hyperledger-besu | java |
@@ -264,6 +264,9 @@ class SpikesPlot(PathPlot):
position = param.Number(default=0., doc="""
The position of the lower end of each spike.""")
+ show_legend = param.Boolean(default=True, doc="""
+ Whether to show legend for the plot.""")
+
style_opts = (['color', 'cmap', 'palette'] + line_properties)
def get_extents(self, element, ranges): | 1 | import numpy as np
from bokeh.charts import Bar, BoxPlot as BokehBoxPlot
from bokeh.models import Circle, GlyphRenderer, ColumnDataSource, Range1d
import param
from ...element import Raster, Points, Polygons, Spikes
from ...core.util import max_range
from ..util import compute_sizes, get_sideplot_ranges, match_spec
from .element import ElementPlot, line_properties, fill_properties
from .path import PathPlot, PolygonPlot
from .util import map_colors, get_cmap, mpl_to_bokeh
class PointPlot(ElementPlot):
color_index = param.Integer(default=3, doc="""
Index of the dimension from which the color will the drawn""")
size_index = param.Integer(default=2, doc="""
Index of the dimension from which the sizes will the drawn.""")
radius_index = param.Integer(default=None, doc="""
Index of the dimension from which the sizes will the drawn.""")
scaling_factor = param.Number(default=1, bounds=(1, None), doc="""
If values are supplied the area of the points is computed relative
to the marker size. It is then multiplied by scaling_factor to the power
of the ratio between the smallest point and all other points.
For values of 1 scaling by the values is disabled, a factor of 2
allows for linear scaling of the area and a factor of 4 linear
scaling of the point width.""")
size_fn = param.Callable(default=np.abs, doc="""
Function applied to size values before applying scaling,
to remove values lower than zero.""")
style_opts = (['cmap', 'palette', 'marker', 'size', 's', 'alpha', 'color',
'unselected_color'] +
line_properties + fill_properties)
_plot_method = 'scatter'
def get_data(self, element, ranges=None, empty=False):
style = self.style[self.cyclic_index]
dims = element.dimensions(label=True)
mapping = dict(x=dims[0], y=dims[1])
data = {}
cmap = style.get('palette', style.get('cmap', None))
if self.color_index < len(dims) and cmap:
map_key = 'color_' + dims[self.color_index]
mapping['color'] = map_key
if empty:
data[map_key] = []
else:
cmap = get_cmap(cmap)
colors = element.dimension_values(self.color_index)
crange = ranges.get(dims[self.color_index], None)
data[map_key] = map_colors(colors, crange, cmap)
if self.size_index < len(dims) and self.scaling_factor != 1:
map_key = 'size_' + dims[self.size_index]
mapping['size'] = map_key
if empty:
data[map_key] = []
else:
ms = style.get('size', 1)
sizes = element.dimension_values(self.size_index)
data[map_key] = compute_sizes(sizes, self.size_fn,
self.scaling_factor, ms)
data[dims[0]] = [] if empty else element.dimension_values(0)
data[dims[1]] = [] if empty else element.dimension_values(1)
if 'hover' in self.tools+self.default_tools:
for d in dims:
data[d] = [] if empty else element.dimension_values(d)
return data, mapping
def _init_glyph(self, plot, mapping, properties):
"""
Returns a Bokeh glyph object.
"""
properties = mpl_to_bokeh(properties)
unselect_color = properties.pop('unselected_color', None)
if (any(t in self.tools for t in ['box_select', 'lasso_select'])
and unselect_color is not None):
source = properties.pop('source')
color = properties.pop('color', None)
color = mapping.pop('color', color)
properties.pop('legend', None)
unselected = Circle(**dict(properties, fill_color=unselect_color, **mapping))
selected = Circle(**dict(properties, fill_color=color, **mapping))
renderer = plot.add_glyph(source, selected, selection_glyph=selected,
nonselection_glyph=unselected)
else:
renderer = getattr(plot, self._plot_method)(**dict(properties, **mapping))
return renderer, renderer.glyph
class CurvePlot(ElementPlot):
style_opts = ['color'] + line_properties
_plot_method = 'line'
def get_data(self, element, ranges=None, empty=False):
x = element.get_dimension(0).name
y = element.get_dimension(1).name
return ({x: [] if empty else element.dimension_values(0),
y: [] if empty else element.dimension_values(1)},
dict(x=x, y=y))
class AreaPlot(PolygonPlot):
def get_extents(self, element, ranges):
vdims = element.vdims
vdim = vdims[0].name
if len(vdims) > 1:
ranges[vdim] = max_range([ranges[vd.name] for vd in vdims])
else:
vdim = vdims[0].name
ranges[vdim] = (np.nanmin([0, ranges[vdim][0]]), ranges[vdim][1])
return super(AreaPlot, self).get_extents(element, ranges)
def get_data(self, element, ranges=None, empty=False):
mapping = dict(self._mapping)
if empty: return {'xs': [], 'ys': []}
xs = element.dimension_values(0)
x2 = np.hstack((xs[::-1], xs))
if len(element.vdims) > 1:
bottom = element.dimension_values(2)
else:
bottom = np.zeros(len(element))
ys = np.hstack((bottom[::-1], element.dimension_values(1)))
data = dict(xs=[x2], ys=[ys])
return data, mapping
class SpreadPlot(PolygonPlot):
style_opts = ['color'] + line_properties + fill_properties
def get_data(self, element, ranges=None, empty=None):
if empty:
return dict(xs=[], ys=[]), self._mapping
xvals = element.dimension_values(0)
mean = element.dimension_values(1)
neg_error = element.dimension_values(2)
pos_idx = 3 if len(element.dimensions()) > 3 else 2
pos_error = element.dimension_values(pos_idx)
lower = mean - neg_error
upper = mean + pos_error
band_x = np.append(xvals, xvals[::-1])
band_y = np.append(lower, upper[::-1])
return dict(xs=[band_x], ys=[band_y]), self._mapping
class HistogramPlot(ElementPlot):
style_opts = ['color'] + line_properties + fill_properties
_plot_method = 'quad'
def get_data(self, element, ranges=None, empty=None):
mapping = dict(top='top', bottom=0, left='left', right='right')
if empty:
data = dict(top=[], left=[], right=[])
else:
data = dict(top=element.values, left=element.edges[:-1],
right=element.edges[1:])
if 'hover' in self.default_tools + self.tools:
data.update({d: [] if empty else element.dimension_values(d)
for d in element.dimensions(label=True)})
return (data, mapping)
class SideHistogramPlot(HistogramPlot):
style_opts = HistogramPlot.style_opts + ['cmap']
height = param.Integer(default=125, doc="The height of the plot")
width = param.Integer(default=125, doc="The width of the plot")
show_title = param.Boolean(default=False, doc="""
Whether to display the plot title.""")
def get_data(self, element, ranges=None, empty=None):
if self.invert_axes:
mapping = dict(top='left', bottom='right', left=0, right='top')
else:
mapping = dict(top='top', bottom=0, left='left', right='right')
if empty:
data = dict(top=[], left=[], right=[])
else:
data = dict(top=element.values, left=element.edges[:-1],
right=element.edges[1:])
dim = element.get_dimension(0).name
main = self.adjoined.main
range_item, main_range, dim = get_sideplot_ranges(self, element, main, ranges)
vals = element.dimension_values(dim)
if isinstance(range_item, (Raster, Points, Polygons, Spikes)):
style = self.lookup_options(range_item, 'style')[self.cyclic_index]
else:
style = {}
if 'cmap' in style or 'palette' in style:
cmap = get_cmap(style.get('cmap', style.get('palette', None)))
data['color'] = [] if empty else map_colors(vals, main_range, cmap)
mapping['fill_color'] = 'color'
if 'hover' in self.default_tools + self.tools:
data.update({d: [] if empty else element.dimension_values(d)
for d in element.dimensions(label=True)})
return (data, mapping)
class ErrorPlot(PathPlot):
horizontal = param.Boolean(default=False)
style_opts = ['color'] + line_properties
def get_data(self, element, ranges=None, empty=False):
if empty:
return dict(xs=[], ys=[]), self._mapping
data = element.array(dimensions=element.dimensions()[0:4])
err_xs = []
err_ys = []
for row in data:
x, y = row[0:2]
if len(row) > 3:
neg, pos = row[2:]
else:
neg, pos = row[2], row[2]
if self.horizontal:
err_xs.append((x - neg, x + pos))
err_ys.append((y, y))
else:
err_xs.append((x, x))
err_ys.append((y - neg, y + pos))
return (dict(xs=err_xs, ys=err_ys), self._mapping)
class SpikesPlot(PathPlot):
color_index = param.Integer(default=1, doc="""
Index of the dimension from which the color will the drawn""")
spike_length = param.Number(default=0.5, doc="""
The length of each spike if Spikes object is one dimensional.""")
position = param.Number(default=0., doc="""
The position of the lower end of each spike.""")
style_opts = (['color', 'cmap', 'palette'] + line_properties)
def get_extents(self, element, ranges):
l, b, r, t = super(SpikesPlot, self).get_extents(element, ranges)
if len(element.dimensions()) == 1:
b, t = self.position, self.position+self.spike_length
return l, b, r, t
def get_data(self, element, ranges=None, empty=False):
style = self.style[self.cyclic_index]
dims = element.dimensions(label=True)
pos = self.position
if empty:
xs, ys, keys = [], [], []
mapping = dict(xs=dims[0], ys=dims[1] if len(dims) > 1 else 'heights')
elif len(dims) > 1:
xs, ys = zip(*(((x, x), (pos, pos+y))
for x, y in element.array()))
mapping = dict(xs=dims[0], ys=dims[1])
keys = (dims[0], dims[1])
else:
height = self.spike_length
xs, ys = zip(*(((x[0], x[0]), (pos, pos+height))
for x in element.array()))
mapping = dict(xs=dims[0], ys='heights')
keys = (dims[0], 'heights')
if not empty and self.invert_axes: keys = keys[::-1]
data = dict(zip(keys, (xs, ys)))
cmap = style.get('palette', style.get('cmap', None))
if self.color_index < len(dims) and cmap:
cdim = dims[self.color_index]
map_key = 'color_' + cdim
mapping['color'] = map_key
if empty:
colors = []
else:
cmap = get_cmap(cmap)
cvals = element.dimension_values(cdim)
crange = ranges.get(cdim, None)
colors = map_colors(cvals, crange, cmap)
data[map_key] = colors
return data, mapping
class SideSpikesPlot(SpikesPlot):
"""
SpikesPlot with useful defaults for plotting adjoined rug plot.
"""
xaxis = param.ObjectSelector(default='top-bare',
objects=['top', 'bottom', 'bare', 'top-bare',
'bottom-bare', None], doc="""
Whether and where to display the xaxis, bare options allow suppressing
all axis labels including ticks and xlabel. Valid options are 'top',
'bottom', 'bare', 'top-bare' and 'bottom-bare'.""")
yaxis = param.ObjectSelector(default='right-bare',
objects=['left', 'right', 'bare', 'left-bare',
'right-bare', None], doc="""
Whether and where to display the yaxis, bare options allow suppressing
all axis labels including ticks and ylabel. Valid options are 'left',
'right', 'bare' 'left-bare' and 'right-bare'.""")
height = param.Integer(default=80, doc="Height of plot")
width = param.Integer(default=80, doc="Width of plot")
class ChartPlot(ElementPlot):
"""
ChartPlot creates and updates Bokeh high-level Chart instances.
The current implementation requires creating a new Chart for each
frame and updating the existing Chart. Once Bokeh supports updating
Charts directly this workaround will no longer be required.
"""
def initialize_plot(self, ranges=None, plot=None, plots=None, source=None):
"""
Initializes a new plot object with the last available frame.
"""
# Get element key and ranges for frame
element = self.hmap.last
key = self.keys[-1]
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(element, ranges)
self.current_ranges = ranges
self.current_frame = element
self.current_key = key
# Initialize plot, source and glyph
if plot is not None:
raise Exception("Can't overlay Bokeh Charts based plot properties")
init_element = element.clone(element.interface.concat(self.hmap.values()))
plot = self._init_chart(init_element, ranges)
self.handles['plot'] = plot
self.handles['glyph_renderers'] = [r for r in plot.renderers
if isinstance(r, GlyphRenderer)]
self._update_chart(key, element, ranges)
# Update plot, source and glyph
self.drawn = True
return plot
def update_frame(self, key, ranges=None, plot=None, element=None):
"""
Updates an existing plot with data corresponding
to the key.
"""
element = self._get_frame(key)
if not element:
if self.dynamic and self.overlaid:
self.current_key = key
element = self.current_frame
else:
element = self._get_frame(key)
else:
self.current_key = key
self.current_frame = element
self.style = self.lookup_options(element, 'style')
self.set_param(**self.lookup_options(element, 'plot').options)
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = match_spec(element, ranges)
self.current_ranges = ranges
self._update_chart(key, element, ranges)
def _update_chart(self, key, element, ranges):
new_chart = self._init_chart(element, ranges)
old_chart = self.handles['plot']
old_renderers = old_chart.select(type=GlyphRenderer)
new_renderers = new_chart.select(type=GlyphRenderer)
old_chart.y_range.update(**new_chart.y_range.properties_with_values())
updated = []
for new_r in new_renderers:
for old_r in old_renderers:
if type(old_r.glyph) == type(new_r.glyph):
old_renderers.pop(old_renderers.index(old_r))
new_props = new_r.properties_with_values()
source = new_props.pop('data_source')
old_r.glyph.update(**new_r.glyph.properties_with_values())
old_r.update(**new_props)
old_r.data_source.data.update(source.data)
updated.append(old_r)
break
for old_r in old_renderers:
if old_r not in updated:
emptied = {k: [] for k in old_r.data_source.data}
old_r.data_source.data.update(emptied)
properties = self._plot_properties(key, old_chart, element)
old_chart.update(**properties)
@property
def current_handles(self):
plot = self.handles['plot']
sources = plot.select(type=ColumnDataSource)
return sources
class BoxPlot(ChartPlot):
"""
BoxPlot generates a box and whisker plot from a BoxWhisker
Element. This allows plotting the median, mean and various
percentiles. Displaying outliers is currently not supported
as they cannot be consistently updated.
"""
style_opts = ['color', 'whisker_color'] + line_properties
def _init_chart(self, element, ranges):
properties = self.style[self.cyclic_index]
dframe = element.dframe()
label = element.dimensions('key', True)
if len(element.dimensions()) == 1:
dframe[''] = ''
label = ['']
plot = BokehBoxPlot(dframe, label=label,
values=element.dimensions('value', True)[0],
**properties)
# Disable outliers for now as they cannot be consistently updated.
plot.renderers = [r for r in plot.renderers
if not (isinstance(r, GlyphRenderer) and
isinstance(r.glyph, Circle))]
return plot
class BarPlot(ChartPlot):
"""
BarPlot allows generating single- or multi-category
bar Charts, by selecting which key dimensions are
mapped onto separate groups, categories and stacks.
"""
group_index = param.Integer(default=0, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into groups.""")
category_index = param.Integer(default=1, doc="""
Index of the dimension in the supplied Bars
Element, which will be laid out into categories.""")
stack_index = param.Integer(default=2, doc="""
Index of the dimension in the supplied Bars
Element, which will stacked.""")
style_opts = ['bar_width', 'max_height', 'color', 'fill_alpha']
def _init_chart(self, element, ranges):
kdims = element.dimensions('key', True)
vdim = element.dimensions('value', True)[0]
kwargs = self.style[self.cyclic_index]
if self.group_index < element.ndims:
kwargs['label'] = kdims[self.group_index]
if self.category_index < element.ndims:
kwargs['group'] = kdims[self.category_index]
if self.stack_index < element.ndims:
kwargs['stack'] = kdims[self.stack_index]
crange = Range1d(*ranges.get(vdim))
plot = Bar(element.dframe(), values=vdim,
continuous_range=crange, **kwargs)
return plot
| 1 | 14,252 | This is a parameter available for the matplotlib backend IIRC. In which case, it is good to see this support added to the Bokeh backend. | holoviz-holoviews | py |
@@ -641,7 +641,7 @@ public final class Span implements Serializable { // for Spark and Flink jobs
*/
public static String normalizeTraceId(String traceId) {
if (traceId == null) throw new NullPointerException("traceId == null");
- int length = traceId.length();
+ int length = traceId.trim().length();
if (length == 0) throw new IllegalArgumentException("traceId is empty");
if (length > 32) throw new IllegalArgumentException("traceId.length > 32");
int zeros = validateHexAndReturnZeroPrefix(traceId); | 1 | /*
* Copyright 2015-2019 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin2;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.io.StreamCorruptedException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.TreeMap;
import java.util.logging.Logger;
import zipkin2.codec.SpanBytesDecoder;
import zipkin2.codec.SpanBytesEncoder;
import zipkin2.internal.Nullable;
import zipkin2.internal.Platform;
import static java.lang.String.format;
import static java.util.logging.Level.FINEST;
import static zipkin2.internal.HexCodec.HEX_DIGITS;
/**
* A span is a single-host view of an operation. A trace is a series of spans (often RPC calls)
* which nest to form a latency tree. Spans are in the same trace when they share the same trace ID.
* The {@link #parentId} field establishes the position of one span in the tree.
*
* <p>The root span is where {@link #parentId} is null and usually has the longest {@link
* #duration} in the trace. However, nested asynchronous work can materialize as child spans whose
* duration exceed the root span.
*
* <p>Spans usually represent remote activity such as RPC calls, or messaging producers and
* consumers. However, they can also represent in-process activity in any position of the trace. For
* example, a root span could represent a server receiving an initial client request. A root span
* could also represent a scheduled job that has no remote context.
*
* <p>While span identifiers are packed into longs, they should be treated opaquely. ID encoding is
* 16 or 32 character lower-hex, to avoid signed interpretation.
*
* <h3>Relationship to {@code zipkin.Span}</h3>
*
* <p>This type is intended to replace use of {@code zipkin.Span}. Particularly, tracers represent
* a single-host view of an operation. By making one endpoint implicit for all data, this type does
* not need to repeat endpoints on each data like {@code zipkin.Span} does. This results in simpler
* and smaller data.
*/
//@Immutable
public final class Span implements Serializable { // for Spark and Flink jobs
static final Charset UTF_8 = Charset.forName("UTF-8");
static final Endpoint EMPTY_ENDPOINT = Endpoint.newBuilder().build();
static final int FLAG_DEBUG = 1 << 1;
static final int FLAG_DEBUG_SET = 1 << 2;
static final int FLAG_SHARED = 1 << 3;
static final int FLAG_SHARED_SET = 1 << 4;
private static final long serialVersionUID = 0L;
/**
* Trace identifier, set on all spans within it.
*
* <p>Encoded as 16 or 32 lowercase hex characters corresponding to 64 or 128 bits. For example,
* a 128bit trace ID looks like {@code 4e441824ec2b6a44ffdc9bb9a6453df3}.
*
* <p>Some systems downgrade trace identifiers to 64bit by dropping the left-most 16 characters.
* For example, {@code 4e441824ec2b6a44ffdc9bb9a6453df3} becomes {@code ffdc9bb9a6453df3}.
*/
public String traceId() {
return traceId;
}
/**
* The parent's {@link #id} or null if this the root span in a trace.
*
* <p>This is the same encoding as {@link #id}. For example {@code ffdc9bb9a6453df3}
*/
@Nullable public String parentId() {
return parentId;
}
/**
* Unique 64bit identifier for this operation within the trace.
*
* <p>Encoded as 16 lowercase hex characters. For example {@code ffdc9bb9a6453df3}
*
* <p>A span is uniquely identified in storage by ({@linkplain #traceId}, {@linkplain #id()}).
*/
public String id() {
return id;
}
/** Indicates the primary span type. */
public enum Kind {
CLIENT,
SERVER,
/**
* When present, {@link #timestamp()} is the moment a producer sent a message to a destination.
* {@link #duration()} represents delay sending the message, such as batching, while {@link
* #remoteEndpoint()} indicates the destination, such as a broker.
*
* <p>Unlike {@link #CLIENT}, messaging spans never share a span ID. For example, the {@link
* #CONSUMER} of the same message has {@link #parentId()} set to this span's {@link #id()}.
*/
PRODUCER,
/**
* When present, {@link #timestamp()} is the moment a consumer received a message from an
* origin. {@link #duration()} represents delay consuming the message, such as from backlog,
* while {@link #remoteEndpoint()} indicates the origin, such as a broker.
*
* <p>Unlike {@link #SERVER}, messaging spans never share a span ID. For example, the {@link
* #PRODUCER} of this message is the {@link #parentId()} of this span.
*/
CONSUMER
}
/** When present, used to interpret {@link #remoteEndpoint} */
@Nullable public Kind kind() {
return kind;
}
/**
* Span name in lowercase, rpc method for example.
*
* <p>Conventionally, when the span name isn't known, name = "unknown".
*/
@Nullable public String name() {
return name;
}
/**
* Epoch microseconds of the start of this span, possibly absent if this an incomplete span.
*
* <p>This value should be set directly by instrumentation, using the most precise value
* possible. For example, {@code gettimeofday} or multiplying {@link System#currentTimeMillis} by
* 1000.
*
* <p>There are three known edge-cases where this could be reported absent:
*
* <pre><ul>
* <li>A span was allocated but never started (ex not yet received a timestamp)</li>
* <li>The span's start event was lost</li>
* <li>Data about a completed span (ex tags) were sent after the fact</li>
* </pre><ul>
*
* <p>Note: timestamps at or before epoch (0L == 1970) are invalid
*
* @see #duration()
* @see #timestampAsLong()
*/
@Nullable public Long timestamp() {
return timestamp > 0 ? timestamp : null;
}
/**
* Like {@link #timestamp()} except returns a primitive where zero implies absent.
*
* <p>Using this method will avoid allocation, so is encouraged when copying data.
*/
public long timestampAsLong() {
return timestamp;
}
/**
* Measurement in microseconds of the critical path, if known. Durations of less than one
* microsecond must be rounded up to 1 microsecond.
*
* <p>This value should be set directly, as opposed to implicitly via annotation timestamps.
* Doing so encourages precision decoupled from problems of clocks, such as skew or NTP updates
* causing time to move backwards.
*
* <p>If this field is persisted as unset, zipkin will continue to work, except duration query
* support will be implementation-specific. Similarly, setting this field non-atomically is
* implementation-specific.
*
* <p>This field is i64 vs i32 to support spans longer than 35 minutes.
*
* @see #durationAsLong()
*/
@Nullable public Long duration() {
return duration > 0 ? duration : null;
}
/**
* Like {@link #duration()} except returns a primitive where zero implies absent.
*
* <p>Using this method will avoid allocation, so is encouraged when copying data.
*/
public long durationAsLong() {
return duration;
}
/**
* The host that recorded this span, primarily for query by service name.
*
* <p>Instrumentation should always record this and be consistent as possible with the service
* name as it is used in search. This is nullable for legacy reasons.
*/
// Nullable for data conversion especially late arriving data which might not have an annotation
@Nullable public Endpoint localEndpoint() {
return localEndpoint;
}
/**
* When an RPC (or messaging) span, indicates the other side of the connection.
*
* <p>By recording the remote endpoint, your trace will contain network context even if the peer
* is not tracing. For example, you can record the IP from the {@code X-Forwarded-For} header or
* the service name and socket of a remote peer.
*/
@Nullable public Endpoint remoteEndpoint() {
return remoteEndpoint;
}
/**
* Events that explain latency with a timestamp. Unlike log statements, annotations are often
* short or contain codes: for example "brave.flush". Annotations are sorted ascending by
* timestamp.
*/
public List<Annotation> annotations() {
return annotations;
}
/**
* Tags a span with context, usually to support query or aggregation.
*
* <p>For example, a tag key could be {@code "http.path"}.
*/
public Map<String, String> tags() {
return tags;
}
/** True is a request to store this span even if it overrides sampling policy. */
@Nullable public Boolean debug() {
return (flags & FLAG_DEBUG_SET) == FLAG_DEBUG_SET
? (flags & FLAG_DEBUG) == FLAG_DEBUG
: null;
}
/**
* True if we are contributing to a span started by another tracer (ex on a different host).
* Defaults to null. When set, it is expected for {@link #kind()} to be {@link Kind#SERVER}.
*
* <p>When an RPC trace is client-originated, it will be sampled and the same span ID is used for
* the server side. However, the server shouldn't set span.timestamp or duration since it didn't
* start the span.
*/
@Nullable public Boolean shared() {
return (flags & FLAG_SHARED_SET) == FLAG_SHARED_SET
? (flags & FLAG_SHARED) == FLAG_SHARED
: null;
}
@Nullable public String localServiceName() {
Endpoint localEndpoint = localEndpoint();
return localEndpoint != null ? localEndpoint.serviceName() : null;
}
@Nullable public String remoteServiceName() {
Endpoint remoteEndpoint = remoteEndpoint();
return remoteEndpoint != null ? remoteEndpoint.serviceName() : null;
}
public static Builder newBuilder() {
return new Builder();
}
public Builder toBuilder() {
return new Builder(this);
}
public static final class Builder {
String traceId, parentId, id;
Kind kind;
String name;
long timestamp, duration; // zero means null
Endpoint localEndpoint, remoteEndpoint;
ArrayList<Annotation> annotations;
TreeMap<String, String> tags;
int flags = 0; // bit field for timestamp and duration
public Builder clear() {
traceId = null;
parentId = null;
id = null;
kind = null;
name = null;
timestamp = 0L;
duration = 0L;
localEndpoint = null;
remoteEndpoint = null;
if (annotations != null) annotations.clear();
if (tags != null) tags.clear();
flags = 0;
return this;
}
@Override public Builder clone() {
Builder result = new Builder();
result.traceId = traceId;
result.parentId = parentId;
result.id = id;
result.kind = kind;
result.name = name;
result.timestamp = timestamp;
result.duration = duration;
result.localEndpoint = localEndpoint;
result.remoteEndpoint = remoteEndpoint;
if (annotations != null) {
result.annotations = (ArrayList) annotations.clone();
}
if (tags != null) {
result.tags = (TreeMap) tags.clone();
}
result.flags = flags;
return result;
}
Builder(Span source) {
traceId = source.traceId;
parentId = source.parentId;
id = source.id;
kind = source.kind;
name = source.name;
timestamp = source.timestamp;
duration = source.duration;
localEndpoint = source.localEndpoint;
remoteEndpoint = source.remoteEndpoint;
if (!source.annotations.isEmpty()) {
annotations = new ArrayList<>(source.annotations.size());
annotations.addAll(source.annotations);
}
if (!source.tags.isEmpty()) {
tags = new TreeMap<>();
tags.putAll(source.tags);
}
flags = source.flags;
}
/**
* Used to merge multiple incomplete spans representing the same operation on the same host. Do
* not use this to merge spans that occur on different hosts.
*/
public Builder merge(Span source) {
if (traceId == null) traceId = source.traceId;
if (id == null) id = source.id;
if (parentId == null) parentId = source.parentId;
if (kind == null) kind = source.kind;
if (name == null) name = source.name;
if (timestamp == 0L) timestamp = source.timestamp;
if (duration == 0L) duration = source.duration;
if (localEndpoint == null) {
localEndpoint = source.localEndpoint;
} else if (source.localEndpoint != null) {
localEndpoint = localEndpoint.toBuilder().merge(source.localEndpoint).build();
}
if (remoteEndpoint == null) {
remoteEndpoint = source.remoteEndpoint;
} else if (source.remoteEndpoint != null) {
remoteEndpoint = remoteEndpoint.toBuilder().merge(source.remoteEndpoint).build();
}
if (!source.annotations.isEmpty()) {
if (annotations == null) {
annotations = new ArrayList<>(source.annotations.size());
}
annotations.addAll(source.annotations);
}
if (!source.tags.isEmpty()) {
if (tags == null) tags = new TreeMap<>();
tags.putAll(source.tags);
}
flags = flags | source.flags;
return this;
}
@Nullable public Kind kind() {
return kind;
}
@Nullable public Endpoint localEndpoint() {
return localEndpoint;
}
/**
* @throws IllegalArgumentException if not lower-hex format
* @see Span#id()
*/
public Builder traceId(String traceId) {
this.traceId = normalizeTraceId(traceId);
return this;
}
/**
* Encodes 64 or 128 bits from the input into a hex trace ID.
*
* @param high Upper 64bits of the trace ID. Zero means the trace ID is 64-bit.
* @param low Lower 64bits of the trace ID.
* @throws IllegalArgumentException if both values are zero
*/
public Builder traceId(long high, long low) {
if (high == 0L && low == 0L) throw new IllegalArgumentException("empty trace ID");
char[] data = Platform.shortStringBuffer();
int pos = 0;
if (high != 0L) {
writeHexLong(data, pos, high);
pos += 16;
}
writeHexLong(data, pos, low);
this.traceId = new String(data, 0, high != 0L ? 32 : 16);
return this;
}
/**
* Encodes 64 bits from the input into a hex parent ID. Unsets the {@link Span#parentId()} if
* the input is 0.
*
* @see Span#parentId()
*/
public Builder parentId(long parentId) {
this.parentId = parentId != 0L ? toLowerHex(parentId) : null;
return this;
}
/**
* @throws IllegalArgumentException if not lower-hex format
* @see Span#parentId()
*/
public Builder parentId(@Nullable String parentId) {
if (parentId == null) {
this.parentId = null;
return this;
}
int length = parentId.length();
if (length == 0) throw new IllegalArgumentException("parentId is empty");
if (length > 16) throw new IllegalArgumentException("parentId.length > 16");
if (validateHexAndReturnZeroPrefix(parentId) == length) {
this.parentId = null;
} else {
this.parentId = length < 16 ? padLeft(parentId, 16) : parentId;
}
return this;
}
/**
* Encodes 64 bits from the input into a hex span ID.
*
* @throws IllegalArgumentException if the input is zero
* @see Span#id()
*/
public Builder id(long id) {
if (id == 0L) throw new IllegalArgumentException("empty id");
this.id = toLowerHex(id);
return this;
}
/**
* @throws IllegalArgumentException if not lower-hex format
* @see Span#id()
*/
public Builder id(String id) {
if (id == null) throw new NullPointerException("id == null");
int length = id.length();
if (length == 0) throw new IllegalArgumentException("id is empty");
if (length > 16) throw new IllegalArgumentException("id.length > 16");
if (validateHexAndReturnZeroPrefix(id) == 16) {
throw new IllegalArgumentException("id is all zeros");
}
this.id = length < 16 ? padLeft(id, 16) : id;
return this;
}
/** @see Span#kind */
public Builder kind(@Nullable Kind kind) {
this.kind = kind;
return this;
}
/** @see Span#name */
public Builder name(@Nullable String name) {
this.name = name == null || name.isEmpty() ? null : name.toLowerCase(Locale.ROOT);
return this;
}
/** @see Span#timestampAsLong() */
public Builder timestamp(long timestamp) {
if (timestamp < 0L) timestamp = 0L;
this.timestamp = timestamp;
return this;
}
/** @see Span#timestamp() */
public Builder timestamp(@Nullable Long timestamp) {
if (timestamp == null || timestamp < 0L) timestamp = 0L;
this.timestamp = timestamp;
return this;
}
/** @see Span#durationAsLong() */
public Builder duration(long duration) {
if (duration < 0L) duration = 0L;
this.duration = duration;
return this;
}
/** @see Span#duration() */
public Builder duration(@Nullable Long duration) {
if (duration == null || duration < 0L) duration = 0L;
this.duration = duration;
return this;
}
/** @see Span#localEndpoint */
public Builder localEndpoint(@Nullable Endpoint localEndpoint) {
if (EMPTY_ENDPOINT.equals(localEndpoint)) localEndpoint = null;
this.localEndpoint = localEndpoint;
return this;
}
/** @see Span#remoteEndpoint */
public Builder remoteEndpoint(@Nullable Endpoint remoteEndpoint) {
if (EMPTY_ENDPOINT.equals(remoteEndpoint)) remoteEndpoint = null;
this.remoteEndpoint = remoteEndpoint;
return this;
}
/** @see Span#annotations */
public Builder addAnnotation(long timestamp, String value) {
if (annotations == null) annotations = new ArrayList<>(2);
annotations.add(Annotation.create(timestamp, value));
return this;
}
/** @see Span#annotations */
public Builder clearAnnotations() {
if (annotations == null) return this;
annotations.clear();
return this;
}
/** @see Span#tags */
public Builder putTag(String key, String value) {
if (tags == null) tags = new TreeMap<>();
if (key == null) throw new NullPointerException("key == null");
if (value == null) throw new NullPointerException("value of " + key + " == null");
this.tags.put(key, value);
return this;
}
/** @see Span#tags */
public Builder clearTags() {
if (tags == null) return this;
tags.clear();
return this;
}
/** @see Span#debug */
public Builder debug(boolean debug) {
flags |= FLAG_DEBUG_SET;
if (debug) {
flags |= FLAG_DEBUG;
} else {
flags &= ~FLAG_DEBUG;
}
return this;
}
/** @see Span#debug */
public Builder debug(@Nullable Boolean debug) {
if (debug != null) return debug((boolean) debug);
flags &= ~(FLAG_DEBUG_SET | FLAG_DEBUG);
return this;
}
/** @see Span#shared */
public Builder shared(boolean shared) {
flags |= FLAG_SHARED_SET;
if (shared) {
flags |= FLAG_SHARED;
} else {
flags &= ~FLAG_SHARED;
}
return this;
}
/** @see Span#shared */
public Builder shared(@Nullable Boolean shared) {
if (shared != null) return shared((boolean) shared);
flags &= ~(FLAG_SHARED_SET | FLAG_SHARED);
return this;
}
public Span build() {
String missing = "";
if (traceId == null) missing += " traceId";
if (id == null) missing += " id";
if (!"".equals(missing)) throw new IllegalStateException("Missing :" + missing);
if (id.equals(parentId)) { // edge case, so don't require a logger field
Logger logger = Logger.getLogger(Span.class.getName());
if (logger.isLoggable(FINEST)) {
logger.fine(format("undoing circular dependency: traceId=%s, spanId=%s", traceId, id));
}
parentId = null;
}
// shared is for the server side, unset it if accidentally set on the client side
if ((flags & FLAG_SHARED) == FLAG_SHARED && kind == Kind.CLIENT) {
Logger logger = Logger.getLogger(Span.class.getName());
if (logger.isLoggable(FINEST)) {
logger.fine(format("removing shared flag on client: traceId=%s, spanId=%s", traceId, id));
}
shared(null);
}
return new Span(this);
}
Builder() {
}
}
@Override public String toString() {
return new String(SpanBytesEncoder.JSON_V2.encode(this), UTF_8);
}
/**
* Returns a valid lower-hex trace ID, padded left as needed to 16 or 32 characters.
*
* @throws IllegalArgumentException if oversized or not lower-hex
*/
public static String normalizeTraceId(String traceId) {
if (traceId == null) throw new NullPointerException("traceId == null");
int length = traceId.length();
if (length == 0) throw new IllegalArgumentException("traceId is empty");
if (length > 32) throw new IllegalArgumentException("traceId.length > 32");
int zeros = validateHexAndReturnZeroPrefix(traceId);
if (zeros == length) throw new IllegalArgumentException("traceId is all zeros");
if (length == 32 || length == 16) {
if (length == 32 && zeros >= 16) return traceId.substring(16);
return traceId;
} else if (length < 16) {
return padLeft(traceId, 16);
} else {
return padLeft(traceId, 32);
}
}
static final String THIRTY_TWO_ZEROS;
static {
char[] zeros = new char[32];
Arrays.fill(zeros, '0');
THIRTY_TWO_ZEROS = new String(zeros);
}
static String padLeft(String id, int desiredLength) {
int length = id.length();
int numZeros = desiredLength - length;
char[] data = Platform.shortStringBuffer();
THIRTY_TWO_ZEROS.getChars(0, numZeros, data, 0);
id.getChars(0, length, data, numZeros);
return new String(data, 0, desiredLength);
}
static String toLowerHex(long v) {
char[] data = Platform.shortStringBuffer();
writeHexLong(data, 0, v);
return new String(data, 0, 16);
}
/** Inspired by {@code okio.Buffer.writeLong} */
static void writeHexLong(char[] data, int pos, long v) {
writeHexByte(data, pos + 0, (byte) ((v >>> 56L) & 0xff));
writeHexByte(data, pos + 2, (byte) ((v >>> 48L) & 0xff));
writeHexByte(data, pos + 4, (byte) ((v >>> 40L) & 0xff));
writeHexByte(data, pos + 6, (byte) ((v >>> 32L) & 0xff));
writeHexByte(data, pos + 8, (byte) ((v >>> 24L) & 0xff));
writeHexByte(data, pos + 10, (byte) ((v >>> 16L) & 0xff));
writeHexByte(data, pos + 12, (byte) ((v >>> 8L) & 0xff));
writeHexByte(data, pos + 14, (byte) (v & 0xff));
}
static void writeHexByte(char[] data, int pos, byte b) {
data[pos + 0] = HEX_DIGITS[(b >> 4) & 0xf];
data[pos + 1] = HEX_DIGITS[b & 0xf];
}
static int validateHexAndReturnZeroPrefix(String id) {
int zeros = 0;
boolean inZeroPrefix = id.charAt(0) == '0';
for (int i = 0, length = id.length(); i < length; i++) {
char c = id.charAt(i);
if ((c < '0' || c > '9') && (c < 'a' || c > 'f')) {
throw new IllegalArgumentException(id + " should be lower-hex encoded with no prefix");
}
if (c != '0') {
inZeroPrefix = false;
} else if (inZeroPrefix) {
zeros++;
}
}
return zeros;
}
static <T extends Comparable<? super T>> List<T> sortedList(@Nullable List<T> in) {
if (in == null || in.isEmpty()) return Collections.emptyList();
if (in.size() == 1) return Collections.singletonList(in.get(0));
Object[] array = in.toArray();
Arrays.sort(array);
// dedupe
int j = 0, i = 1;
while (i < array.length) {
if (!array[i].equals(array[j])) {
array[++j] = array[i];
}
i++;
}
List result = Arrays.asList(i == j + 1 ? array : Arrays.copyOf(array, j + 1));
return Collections.unmodifiableList(result);
}
// Custom impl to reduce GC churn and Kryo which cannot handle AutoValue subclass
// See https://github.com/openzipkin/zipkin/issues/1879
final String traceId, parentId, id;
final Kind kind;
final String name;
final long timestamp, duration; // zero means null, saving 2 object references
final Endpoint localEndpoint, remoteEndpoint;
final List<Annotation> annotations;
final Map<String, String> tags;
final int flags; // bit field for timestamp and duration, saving 2 object references
Span(Builder builder) {
traceId = builder.traceId;
// prevent self-referencing spans
parentId = builder.id.equals(builder.parentId) ? null : builder.parentId;
id = builder.id;
kind = builder.kind;
name = builder.name;
timestamp = builder.timestamp;
duration = builder.duration;
localEndpoint = builder.localEndpoint;
remoteEndpoint = builder.remoteEndpoint;
annotations = sortedList(builder.annotations);
tags = builder.tags == null ? Collections.emptyMap() : new LinkedHashMap<>(builder.tags);
flags = builder.flags;
}
@Override public boolean equals(Object o) {
if (o == this) return true;
if (!(o instanceof Span)) return false;
Span that = (Span) o;
return traceId.equals(that.traceId)
&& (parentId == null ? that.parentId == null : parentId.equals(that.parentId))
&& id.equals(that.id)
&& (kind == null ? that.kind == null : kind.equals(that.kind))
&& (name == null ? that.name == null : name.equals(that.name))
&& timestamp == that.timestamp
&& duration == that.duration
&& (localEndpoint == null
? that.localEndpoint == null : localEndpoint.equals(that.localEndpoint))
&& (remoteEndpoint == null
? that.remoteEndpoint == null : remoteEndpoint.equals(that.remoteEndpoint))
&& annotations.equals(that.annotations)
&& tags.equals(that.tags)
&& flags == that.flags;
}
@Override public int hashCode() {
int h = 1;
h *= 1000003;
h ^= traceId.hashCode();
h *= 1000003;
h ^= (parentId == null) ? 0 : parentId.hashCode();
h *= 1000003;
h ^= id.hashCode();
h *= 1000003;
h ^= (kind == null) ? 0 : kind.hashCode();
h *= 1000003;
h ^= (name == null) ? 0 : name.hashCode();
h *= 1000003;
h ^= (int) (h ^ ((timestamp >>> 32) ^ timestamp));
h *= 1000003;
h ^= (int) (h ^ ((duration >>> 32) ^ duration));
h *= 1000003;
h ^= (localEndpoint == null) ? 0 : localEndpoint.hashCode();
h *= 1000003;
h ^= (remoteEndpoint == null) ? 0 : remoteEndpoint.hashCode();
h *= 1000003;
h ^= annotations.hashCode();
h *= 1000003;
h ^= tags.hashCode();
h *= 1000003;
h ^= flags;
return h;
}
// This is an immutable object, and our encoder is faster than java's: use a serialization proxy.
final Object writeReplace() throws ObjectStreamException {
return new SerializedForm(SpanBytesEncoder.PROTO3.encode(this));
}
private static final class SerializedForm implements Serializable {
private static final long serialVersionUID = 0L;
final byte[] bytes;
SerializedForm(byte[] bytes) {
this.bytes = bytes;
}
Object readResolve() throws ObjectStreamException {
try {
return SpanBytesDecoder.PROTO3.decodeOne(bytes);
} catch (IllegalArgumentException e) {
throw new StreamCorruptedException(e.getMessage());
}
}
}
}
| 1 | 16,751 | trim has performance impact and this is the lowest level library... seems the trim if occurs should happen in the UI or Query controller instead.. | openzipkin-zipkin | java |
@@ -80,6 +80,7 @@ class Request(testprocess.Line):
'/500-inline': [http.client.INTERNAL_SERVER_ERROR],
}
+ # pylint: enable=no-member
for i in range(15):
path_to_statuses['/redirect/{}'.format(i)] = [http.client.FOUND]
for suffix in ['', '1', '2', '3', '4', '5', '6']: | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Fixtures for the server webserver."""
import re
import sys
import json
import os.path
import http.client
import attr
import pytest
from PyQt5.QtCore import pyqtSignal, QUrl
from end2end.fixtures import testprocess
from qutebrowser.utils import utils
class Request(testprocess.Line):
"""A parsed line from the flask log output.
Attributes:
verb/path/status: Parsed from the log output.
"""
def __init__(self, data):
super().__init__(data)
try:
parsed = json.loads(data)
except ValueError:
raise testprocess.InvalidLine(data)
assert isinstance(parsed, dict)
assert set(parsed.keys()) == {'path', 'verb', 'status'}
self.verb = parsed['verb']
path = parsed['path']
self.path = '/' if path == '/' else path.rstrip('/')
self.status = parsed['status']
self._check_status()
def _check_status(self):
"""Check if the http status is what we expected."""
# WORKAROUND for https://github.com/PyCQA/pylint/issues/399 (?)
# pylint: disable=no-member
path_to_statuses = {
'/favicon.ico': [http.client.NOT_FOUND],
'/does-not-exist': [http.client.NOT_FOUND],
'/does-not-exist-2': [http.client.NOT_FOUND],
'/404': [http.client.NOT_FOUND],
'/redirect-later': [http.client.FOUND],
'/redirect-self': [http.client.FOUND],
'/redirect-to': [http.client.FOUND],
'/relative-redirect': [http.client.FOUND],
'/absolute-redirect': [http.client.FOUND],
'/cookies/set': [http.client.FOUND],
'/500-inline': [http.client.INTERNAL_SERVER_ERROR],
}
for i in range(15):
path_to_statuses['/redirect/{}'.format(i)] = [http.client.FOUND]
for suffix in ['', '1', '2', '3', '4', '5', '6']:
key = '/basic-auth/user{}/password{}'.format(suffix, suffix)
path_to_statuses[key] = [http.client.UNAUTHORIZED, http.client.OK]
default_statuses = [http.client.OK, http.client.NOT_MODIFIED]
sanitized = QUrl('http://localhost' + self.path).path() # Remove ?foo
expected_statuses = path_to_statuses.get(sanitized, default_statuses)
if self.status not in expected_statuses:
raise AssertionError(
"{} loaded with status {} but expected {}".format(
sanitized, self.status,
' / '.join(repr(e) for e in expected_statuses)))
def __eq__(self, other):
return NotImplemented
@attr.s(frozen=True, cmp=False, hash=True)
class ExpectedRequest:
"""Class to compare expected requests easily."""
verb = attr.ib()
path = attr.ib()
@classmethod
def from_request(cls, request):
"""Create an ExpectedRequest from a Request."""
return cls(request.verb, request.path)
def __eq__(self, other):
if isinstance(other, (Request, ExpectedRequest)):
return self.verb == other.verb and self.path == other.path
else:
return NotImplemented
class WebserverProcess(testprocess.Process):
"""Abstraction over a running Flask server process.
Reads the log from its stdout and parses it.
Signals:
new_request: Emitted when there's a new request received.
"""
new_request = pyqtSignal(Request)
Request = Request # So it can be used from the fixture easily.
ExpectedRequest = ExpectedRequest
KEYS = ['verb', 'path']
def __init__(self, script, parent=None):
super().__init__(parent)
self._script = script
self.port = utils.random_port()
self.new_data.connect(self.new_request)
def get_requests(self):
"""Get the requests to the server during this test."""
requests = self._get_data()
return [r for r in requests if r.path != '/favicon.ico']
def _parse_line(self, line):
self._log(line)
started_re = re.compile(r' \* Running on https?://127\.0\.0\.1:{}/ '
r'\(Press CTRL\+C to quit\)'.format(self.port))
if started_re.fullmatch(line):
self.ready.emit()
return None
return Request(line)
def _executable_args(self):
if hasattr(sys, 'frozen'):
executable = os.path.join(os.path.dirname(sys.executable),
self._script)
args = []
else:
executable = sys.executable
py_file = os.path.join(os.path.dirname(__file__),
self._script + '.py')
args = [py_file]
return executable, args
def _default_args(self):
return [str(self.port)]
def cleanup(self):
"""Clean up and shut down the process."""
self.proc.terminate()
self.proc.waitForFinished()
@pytest.fixture(scope='session', autouse=True)
def server(qapp):
"""Fixture for an server object which ensures clean setup/teardown."""
server = WebserverProcess('webserver_sub')
server.start()
yield server
server.cleanup()
@pytest.fixture(autouse=True)
def server_after_test(server, request):
"""Fixture to clean server request list after each test."""
request.node._server_log = server.captured_log
yield
server.after_test()
@pytest.fixture
def ssl_server(request, qapp):
"""Fixture for a webserver with a self-signed SSL certificate.
This needs to be explicitly used in a test, and overwrites the server log
used in that test.
"""
server = WebserverProcess('webserver_sub_ssl')
request.node._server_log = server.captured_log
server.start()
yield server
server.after_test()
server.cleanup()
| 1 | 19,423 | Probably also needed for the `http.client.FOUND` below? | qutebrowser-qutebrowser | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.