patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -77,6 +77,8 @@ class Thelia extends Kernel
);
$serviceContainer = Propel::getServiceContainer();
$serviceContainer->setAdapterClass('thelia', 'mysql');
+ $serviceContainer->setDefaultDatasource('thelia');
+
$manager = new ConnectionManagerSingle();
$manager->setConfiguration($definePropel->getConfig());
$serviceContainer->setConnectionManager('thelia', $manager); | 1 | <?php
/*************************************************************************************/
/* This file is part of the Thelia package. */
/* */
/* Copyright (c) OpenStudio */
/* email : [email protected] */
/* web : http://www.thelia.net */
/* */
/* For the full copyright and license information, please view the LICENSE.txt */
/* file that was distributed with this source code. */
/*************************************************************************************/
namespace Thelia\Core;
/**
* Root class of Thelia
*
* It extends Symfony\Component\HttpKernel\Kernel for changing some features
*
*
* @author Manuel Raynaud <[email protected]>
*/
use Propel\Runtime\Connection\ConnectionManagerSingle;
use Propel\Runtime\Connection\ConnectionWrapper;
use Propel\Runtime\Propel;
use Symfony\Component\Config\FileLocator;
use Symfony\Component\Config\Loader\LoaderInterface;
use Symfony\Component\Debug\Debug;
use Symfony\Component\DependencyInjection\ContainerBuilder;
use Symfony\Component\DependencyInjection\Definition;
use Symfony\Component\DependencyInjection\ParameterBag\ParameterBag;
use Symfony\Component\DependencyInjection\Reference;
use Symfony\Component\Finder\Finder;
use Symfony\Component\HttpKernel\Kernel;
use Symfony\Component\Yaml\Yaml;
use Thelia\Config\DatabaseConfiguration;
use Thelia\Config\DefinePropel;
use Thelia\Core\DependencyInjection\Loader\XmlFileLoader;
use Thelia\Core\Event\TheliaEvents;
use Thelia\Core\Template\ParserInterface;
use Thelia\Core\Template\TemplateDefinition;
use Thelia\Core\Template\TemplateHelper;
use Thelia\Log\Tlog;
use Thelia\Model\Module;
use Thelia\Model\ModuleQuery;
class Thelia extends Kernel
{
const THELIA_VERSION = '2.1.0-alpha1';
public function __construct($environment, $debug)
{
parent::__construct($environment, $debug);
if ($debug) {
Debug::enable();
}
$this->initPropel();
}
public static function isInstalled()
{
return file_exists(THELIA_CONF_DIR . 'database.yml');
}
protected function initPropel()
{
if (self::isInstalled() === false) {
return ;
}
$definePropel = new DefinePropel(
new DatabaseConfiguration(),
Yaml::parse(THELIA_CONF_DIR . 'database.yml')
);
$serviceContainer = Propel::getServiceContainer();
$serviceContainer->setAdapterClass('thelia', 'mysql');
$manager = new ConnectionManagerSingle();
$manager->setConfiguration($definePropel->getConfig());
$serviceContainer->setConnectionManager('thelia', $manager);
$con = Propel::getConnection(\Thelia\Model\Map\ProductTableMap::DATABASE_NAME);
$con->setAttribute(ConnectionWrapper::PROPEL_ATTR_CACHE_PREPARES, true);
if ($this->isDebug()) {
$serviceContainer->setLogger('defaultLogger', Tlog::getInstance());
$con->useDebug(true);
}
}
/**
* dispatch an event when application is boot
*/
public function boot()
{
parent::boot();
if (self::isInstalled()) {
$this->getContainer()->get("event_dispatcher")->dispatch(TheliaEvents::BOOT);
}
}
/**
* Add all module's standard templates to the parser environment
*
* @param ParserInterface $parser the parser
* @param Module $module the Module.
*/
protected function addStandardModuleTemplatesToParserEnvironment($parser, $module)
{
$stdTpls = TemplateDefinition::getStandardTemplatesSubdirsIterator();
foreach ($stdTpls as $templateType => $templateSubdirName) {
$this->addModuleTemplateToParserEnvironment($parser, $module, $templateType, $templateSubdirName);
}
}
/**
* Add a module template directory to the parser environment
*
* @param ParserInterface $parser the parser
* @param Module $module the Module.
* @param string $templateType the template type (one of the TemplateDefinition type constants)
* @param string $templateSubdirName the template subdirectory name (one of the TemplateDefinition::XXX_SUBDIR constants)
*/
protected function addModuleTemplateToParserEnvironment($parser, $module, $templateType, $templateSubdirName)
{
// Get template path
$templateDirectory = $module->getAbsoluteTemplateDirectoryPath($templateSubdirName);
try {
$templateDirBrowser = new \DirectoryIterator($templateDirectory);
$code = ucfirst($module->getCode());
/* browse the directory */
foreach ($templateDirBrowser as $templateDirContent) {
/* is it a directory which is not . or .. ? */
if ($templateDirContent->isDir() && ! $templateDirContent->isDot()) {
$parser->addMethodCall(
'addTemplateDirectory',
array(
$templateType,
$templateDirContent->getFilename(),
$templateDirContent->getPathName(),
$code
)
);
}
}
} catch (\UnexpectedValueException $ex) {
// The directory does not exists, ignore it.
}
}
/**
*
* Load some configuration
* Initialize all plugins
*
*/
protected function loadConfiguration(ContainerBuilder $container)
{
$loader = new XmlFileLoader($container, new FileLocator(THELIA_ROOT . "/core/lib/Thelia/Config/Resources"));
$finder = Finder::create()
->name('*.xml')
->depth(0)
->in(THELIA_ROOT . "/core/lib/Thelia/Config/Resources");
/** @var \SplFileInfo $file */
foreach ($finder as $file) {
$loader->load($file->getBaseName());
}
if (defined("THELIA_INSTALL_MODE") === false) {
$modules = ModuleQuery::getActivated();
$translationDirs = array();
/** @var Module $module */
foreach ($modules as $module) {
try {
$definition = new Definition();
$definition->setClass($module->getFullNamespace());
$definition->addMethodCall("setContainer", array(new Reference('service_container')));
$container->setDefinition(
"module." . $module->getCode(),
$definition
);
$compilers = call_user_func(array($module->getFullNamespace(), 'getCompilers'));
foreach ($compilers as $compiler) {
if (is_array($compiler)) {
$container->addCompilerPass($compiler[0], $compiler[1]);
} else {
$container->addCompilerPass($compiler);
}
}
$loader = new XmlFileLoader($container, new FileLocator($module->getAbsoluteConfigPath()));
$loader->load("config.xml", "module." . $module->getCode());
} catch (\Exception $e) {
Tlog::getInstance()->addError(
sprintf("Failed to load module %s: %s", $module->getCode(), $e->getMessage()),
$e
);
}
}
/** @var ParserInterface $parser */
$parser = $container->getDefinition('thelia.parser');
/** @var Module $module */
foreach ($modules as $module) {
try {
// Core module translation
if (is_dir($dir = $module->getAbsoluteI18nPath())) {
$translationDirs[$module->getTranslationDomain()] = $dir;
}
// Admin includes translation
if (is_dir($dir = $module->getAbsoluteAdminIncludesI18nPath())) {
$translationDirs[$module->getAdminIncludesTranslationDomain()] = $dir;
}
// Module back-office template, if any
$templates =
TemplateHelper::getInstance()->getList(
TemplateDefinition::BACK_OFFICE,
$module->getAbsoluteTemplateBasePath()
);
foreach ($templates as $template) {
$translationDirs[$module->getBackOfficeTemplateTranslationDomain($template->getName())] =
$module->getAbsoluteBackOfficeI18nTemplatePath($template->getName());
}
// Module front-office template, if any
$templates =
TemplateHelper::getInstance()->getList(
TemplateDefinition::FRONT_OFFICE,
$module->getAbsoluteTemplateBasePath()
);
foreach ($templates as $template) {
$translationDirs[$module->getFrontOfficeTemplateTranslationDomain($template->getName())] =
$module->getAbsoluteFrontOfficeI18nTemplatePath($template->getName());
}
$this->addStandardModuleTemplatesToParserEnvironment($parser, $module);
} catch (\Exception $e) {
Tlog::getInstance()->addError(
sprintf("Failed to load module %s: %s", $module->getCode(), $e->getMessage()),
$e
);
}
}
// Load core translation
$translationDirs['core'] = THELIA_ROOT . 'core'.DS.'lib'.DS.'Thelia'.DS.'Config'.DS.'I18n';
// Standard templates (front, back, pdf, mail)
$th = TemplateHelper::getInstance();
/** @var TemplateDefinition $templateDefinition */
foreach ($th->getStandardTemplateDefinitions() as $templateDefinition) {
if (is_dir($dir = $templateDefinition->getAbsoluteI18nPath())) {
$translationDirs[$templateDefinition->getTranslationDomain()] = $dir;
}
}
if ($translationDirs) {
$this->loadTranslation($container, $translationDirs);
}
}
}
private function loadTranslation(ContainerBuilder $container, array $dirs)
{
$translator = $container->getDefinition('thelia.translator');
foreach ($dirs as $domain => $dir) {
try {
$finder = Finder::create()
->files()
->depth(0)
->in($dir);
/** @var \DirectoryIterator $file */
foreach ($finder as $file) {
list($locale, $format) = explode('.', $file->getBaseName(), 2);
$translator->addMethodCall('addResource', array($format, (string) $file, $locale, $domain));
}
} catch (\InvalidArgumentException $ex) {
// Ignore missing I18n directories
Tlog::getInstance()->addWarning("loadTranslation: missing $dir directory");
}
}
}
/**
*
* initialize session in Request object
*
* All param must be change in Config table
*
* @param \Symfony\Component\HttpFoundation\Request $request
*/
/**
* Gets a new ContainerBuilder instance used to build the service container.
*
* @return ContainerBuilder
*/
protected function getContainerBuilder()
{
return new TheliaContainerBuilder(new ParameterBag($this->getKernelParameters()));
}
/**
* Builds the service container.
*
* @return ContainerBuilder The compiled service container
*
* @throws \RuntimeException
*/
protected function buildContainer()
{
$container = parent::buildContainer();
$this->loadConfiguration($container);
$container->customCompile();
return $container;
}
/**
* Gets the cache directory.
*
* @return string The cache directory
*
* @api
*/
public function getCacheDir()
{
if (defined('THELIA_ROOT')) {
return THELIA_CACHE_DIR.DS.$this->environment;
} else {
return parent::getCacheDir();
}
}
/**
* Gets the log directory.
*
* @return string The log directory
*
* @api
*/
public function getLogDir()
{
if (defined('THELIA_ROOT')) {
return THELIA_LOG_DIR;
} else {
return parent::getLogDir();
}
}
/**
* Returns the kernel parameters.
*
* @return array An array of kernel parameters
*/
protected function getKernelParameters()
{
$parameters = parent::getKernelParameters();
$parameters["thelia.root_dir"] = THELIA_ROOT;
$parameters["thelia.core_dir"] = THELIA_ROOT . "core/lib/Thelia";
$parameters["thelia.module_dir"] = THELIA_MODULE_DIR;
return $parameters;
}
/**
* return available bundle
*
* Part of Symfony\Component\HttpKernel\KernelInterface
*
* @return array An array of bundle instances.
*
*/
public function registerBundles()
{
$bundles = array(
/* TheliaBundle contain all the dependency injection description */
new Bundle\TheliaBundle(),
);
/**
* OTHER CORE BUNDLE CAN BE DECLARE HERE AND INITIALIZE WITH SPECIFIC CONFIGURATION
*
* HOW TO DECLARE OTHER BUNDLE ? ETC
*/
return $bundles;
}
/**
* Loads the container configuration
*
* part of Symfony\Component\HttpKernel\KernelInterface
*
* @param LoaderInterface $loader A LoaderInterface instance
*
* @api
*/
public function registerContainerConfiguration(LoaderInterface $loader)
{
//Nothing is load here but it's possible to load container configuration here.
//exemple in sf2 : $loader->load(__DIR__.'/config/config_'.$this->getEnvironment().'.yml');
}
}
| 1 | 10,126 | @lunika Please see this. It allows us to do `Propel::getConnection()` easily, as we have only one database. | thelia-thelia | php |
@@ -92,7 +92,9 @@ namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNet
var controllerContext = (IHttpControllerContext)state.State;
// some fields aren't set till after execution, so populate anything missing
- AspNetWebApi2Integration.UpdateSpan(controllerContext, scope.Span, (AspNetTags)scope.Span.Tags, Enumerable.Empty<KeyValuePair<string, string>>());
+ var span = scope.Span as IHasTags;
+ var aspNetTags = span.Tags as AspNetTags;
+ AspNetWebApi2Integration.UpdateSpan(controllerContext, scope.Span, aspNetTags, Enumerable.Empty<KeyValuePair<string, string>>());
if (exception != null)
{ | 1 | // <copyright file="ApiController_ExecuteAsync_Integration.cs" company="Datadog">
// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License.
// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc.
// </copyright>
#if NETFRAMEWORK
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Linq;
using System.Threading;
using System.Web;
using Datadog.Trace.ClrProfiler.CallTarget;
using Datadog.Trace.ClrProfiler.Emit;
using Datadog.Trace.Configuration;
using Datadog.Trace.DuckTyping;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Headers;
using Datadog.Trace.Tagging;
namespace Datadog.Trace.ClrProfiler.AutoInstrumentation.AspNet
{
/// <summary>
/// System.Web.Http.ApiController.ExecuteAsync calltarget instrumentation
/// </summary>
[InstrumentMethod(
AssemblyName = SystemWebHttpAssemblyName,
TypeName = "System.Web.Http.ApiController",
MethodName = "ExecuteAsync",
ReturnTypeName = ClrNames.HttpResponseMessageTask,
ParameterTypeNames = new[] { HttpControllerContextTypeName, ClrNames.CancellationToken },
MinimumVersion = Major5Minor1,
MaximumVersion = Major5MinorX,
IntegrationName = IntegrationName)]
// ReSharper disable once InconsistentNaming
[Browsable(false)]
[EditorBrowsable(EditorBrowsableState.Never)]
public class ApiController_ExecuteAsync_Integration
{
private const string SystemWebHttpAssemblyName = "System.Web.Http";
private const string HttpControllerContextTypeName = "System.Web.Http.Controllers.HttpControllerContext";
private const string Major5Minor1 = "5.1";
private const string Major5MinorX = "5";
private const string IntegrationName = nameof(IntegrationId.AspNetWebApi2);
/// <summary>
/// OnMethodBegin callback
/// </summary>
/// <typeparam name="TTarget">Type of the target</typeparam>
/// <typeparam name="TController">Type of the controller context</typeparam>
/// <param name="instance">Instance value, aka `this` of the instrumented method.</param>
/// <param name="controllerContext">The context of the controller</param>
/// <param name="cancellationToken">The cancellation token</param>
/// <returns>Calltarget state value</returns>
public static CallTargetState OnMethodBegin<TTarget, TController>(TTarget instance, TController controllerContext, CancellationToken cancellationToken)
where TController : IHttpControllerContext
{
// Make sure to box the controllerContext proxy only once
var boxedControllerContext = (IHttpControllerContext)controllerContext;
var scope = AspNetWebApi2Integration.CreateScope(boxedControllerContext, out _);
if (scope != null)
{
return new CallTargetState(scope, boxedControllerContext);
}
return CallTargetState.GetDefault();
}
/// <summary>
/// OnAsyncMethodEnd callback
/// </summary>
/// <typeparam name="TTarget">Type of the target</typeparam>
/// <typeparam name="TResponse">Type of the response, in an async scenario will be T of Task of T</typeparam>
/// <param name="instance">Instance value, aka `this` of the instrumented method.</param>
/// <param name="responseMessage">HttpResponse message instance</param>
/// <param name="exception">Exception instance in case the original code threw an exception.</param>
/// <param name="state">Calltarget state value</param>
/// <returns>A response value, in an async scenario will be T of Task of T</returns>
[PreserveContext]
public static TResponse OnAsyncMethodEnd<TTarget, TResponse>(TTarget instance, TResponse responseMessage, Exception exception, CallTargetState state)
{
var scope = state.Scope;
if (scope is null)
{
return responseMessage;
}
var controllerContext = (IHttpControllerContext)state.State;
// some fields aren't set till after execution, so populate anything missing
AspNetWebApi2Integration.UpdateSpan(controllerContext, scope.Span, (AspNetTags)scope.Span.Tags, Enumerable.Empty<KeyValuePair<string, string>>());
if (exception != null)
{
scope.Span.SetException(exception);
// We don't have access to the final status code at this point
// Ask the HttpContext to call us back to that we can get it
var httpContext = HttpContext.Current;
if (httpContext != null)
{
// We don't know how long it'll take for ASP.NET to invoke the callback,
// so we store the real finish time
var now = scope.Span.Context.TraceContext.UtcNow;
httpContext.AddOnRequestCompleted(h => OnRequestCompleted(h, scope, now));
}
else
{
// Looks like we won't be able to get the final status code
scope.Dispose();
}
}
else
{
HttpContextHelper.AddHeaderTagsFromHttpResponse(HttpContext.Current, scope);
scope.Span.SetHttpStatusCode(responseMessage.DuckCast<HttpResponseMessageStruct>().StatusCode, isServer: true, Tracer.Instance.Settings);
scope.Dispose();
}
return responseMessage;
}
private static void OnRequestCompleted(HttpContext httpContext, Scope scope, DateTimeOffset finishTime)
{
HttpContextHelper.AddHeaderTagsFromHttpResponse(httpContext, scope);
scope.Span.SetHttpStatusCode(httpContext.Response.StatusCode, isServer: true, Tracer.Instance.Settings);
scope.Span.Finish(finishTime);
scope.Dispose();
}
}
}
#endif
| 1 | 24,082 | Not related to this particular cast but I wonder if we shouldn't have a cast helper that would log if null to gain visibility. | DataDog-dd-trace-dotnet | .cs |
@@ -2,7 +2,8 @@ const request = require('request')
// @ts-ignore
const purest = require('purest')({ request })
const logger = require('../logger')
-
+const DRIVE_FILE_FIELDS = 'kind,id,name,mimeType,ownedByMe,permissions(role,emailAddress),size,modifiedTime,iconLink,thumbnailLink'
+const DRIVE_FILES_FIELDS = `kind,nextPageToken,incompleteSearch,files(${DRIVE_FILE_FIELDS})`
/**
* @class
* @implements {Provider} | 1 | const request = require('request')
// @ts-ignore
const purest = require('purest')({ request })
const logger = require('../logger')
/**
* @class
* @implements {Provider}
*/
class Drive {
constructor (options) {
this.authProvider = options.provider = Drive.authProvider
options.alias = 'drive'
this.client = purest(options)
}
static get authProvider () {
return 'google'
}
list (options, done) {
const directory = options.directory || 'root'
const trashed = options.trashed || false
return this.client
.query()
.get('files')
.where({ q: `'${directory}' in parents and trashed=${trashed}` })
.auth(options.token)
.request(done)
}
stats ({ id, token }, done) {
return this.client.query().get(`files/${id}`).auth(token).request(done)
}
download ({ id, token }, onData) {
return this.client
.query()
.get(`files/${id}`)
.where({ alt: 'media' })
.auth(token)
.request()
.on('data', onData)
.on('end', () => onData(null))
.on('error', (err) => {
logger.error(err, 'provider.drive.download.error')
})
}
thumbnail ({id, token}, done) {
return this.stats({id, token}, (err, resp, body) => {
if (err) {
logger.error(err, 'provider.drive.thumbnail.error')
return done(null)
}
done(body.thumbnailLink ? request(body.thumbnailLink) : null)
})
}
size ({id, token}, done) {
return this.stats({ id, token }, (err, resp, body) => {
if (err) {
logger.error(err, 'provider.drive.size.error')
return done(null)
}
done(parseInt(body.fileSize))
})
}
}
module.exports = Drive
| 1 | 11,040 | why do we need to explicitly declare these fields? | transloadit-uppy | js |
@@ -99,12 +99,8 @@ class OaiController extends AbstractBase
$this->getRequest()->getQuery()->toArray(),
$this->getRequest()->getPost()->toArray()
);
- $server = new $serverClass(
- $this->serviceLocator->get('VuFind\Search\Results\PluginManager'),
- $this->serviceLocator->get('VuFind\Record\Loader'),
- $this->serviceLocator->get('VuFind\Db\Table\PluginManager'),
- $config, $baseURL, $params
- );
+ $server = $this->serviceLocator->get('VuFind\OAI\Server');
+ $server->init($config, $baseURL, $params);
$server->setRecordLinkHelper(
$this->getViewRenderer()->plugin('recordLink')
); | 1 | <?php
/**
* OAI Module Controller
*
* PHP version 7
*
* Copyright (C) Villanova University 2011.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Controller
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:controllers Wiki
*/
namespace VuFind\Controller;
/**
* OAIController Class
*
* Controls the OAI server
*
* @category VuFind
* @package Controller
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org/wiki/development:plugins:controllers Wiki
*/
class OaiController extends AbstractBase
{
/**
* Display OAI server form.
*
* @return \Zend\View\Model\ViewModel
*/
public function homeAction()
{
// no action needed
return $this->createViewModel();
}
/**
* Standard OAI server.
*
* @return \Zend\Http\Response
*/
public function authserverAction()
{
return $this->handleOAI('VuFind\OAI\Server\Auth');
}
/**
* Standard OAI server.
*
* @return \Zend\Http\Response
*/
public function serverAction()
{
return $this->handleOAI('VuFind\OAI\Server');
}
/**
* Shared OAI logic.
*
* @param string $serverClass Class to load for handling OAI requests.
*
* @return \Zend\Http\Response
*/
protected function handleOAI($serverClass)
{
// Check if the OAI Server is enabled before continuing
$config = $this->getConfig();
$response = $this->getResponse();
if (!isset($config->OAI)) {
$response->setStatusCode(404);
$response->setContent('OAI Server Not Configured.');
return $response;
}
// Collect relevant parameters for OAI server:
$url = explode('?', $this->getServerUrl());
$baseURL = $url[0];
// Build OAI response or die trying:
try {
$params = array_merge(
$this->getRequest()->getQuery()->toArray(),
$this->getRequest()->getPost()->toArray()
);
$server = new $serverClass(
$this->serviceLocator->get('VuFind\Search\Results\PluginManager'),
$this->serviceLocator->get('VuFind\Record\Loader'),
$this->serviceLocator->get('VuFind\Db\Table\PluginManager'),
$config, $baseURL, $params
);
$server->setRecordLinkHelper(
$this->getViewRenderer()->plugin('recordLink')
);
$xml = $server->getResponse();
} catch (\Exception $e) {
$response->setStatusCode(500);
$response->setContent($e->getMessage());
return $response;
}
// Return response:
$headers = $response->getHeaders();
$headers->addHeaderLine('Content-type', 'text/xml; charset=UTF-8');
$response->setContent($xml);
return $response;
}
}
| 1 | 26,937 | Note that there are multiple OAI servers -- that's why `$serverClass` is a variable here. You'll want to fetch `$serverClass` from the service manager rather than a hard-coded value, and also set up a module.config.php for the authority record version, `VuFind\OAI\Server\Auth`. Should be easy enough since it can share the same factory. | vufind-org-vufind | php |
@@ -265,6 +265,12 @@ describe Beaker do
context "validate_host" do
subject { dummy_class.new }
+ before(:each) do
+ # Must reset additional_pkgs between each test as it hangs around
+ #Beaker::HostPrebuiltSteps.class_variable_set(:@@additional_pkgs, [])
+ Beaker::HostPrebuiltSteps.module_eval(%q{@@additional_pkgs = []})
+ end
+
it "can validate unix hosts" do
hosts.each do |host| | 1 | require 'spec_helper'
describe Beaker do
let( :options ) { make_opts.merge({ 'logger' => double().as_null_object }) }
let( :ntpserver ) { Beaker::HostPrebuiltSteps::NTPSERVER }
let( :apt_cfg ) { Beaker::HostPrebuiltSteps::APT_CFG }
let( :ips_pkg_repo ) { Beaker::HostPrebuiltSteps::IPS_PKG_REPO }
let( :sync_cmd ) { Beaker::HostPrebuiltSteps::ROOT_KEYS_SYNC_CMD }
let( :windows_pkgs ) { Beaker::HostPrebuiltSteps::WINDOWS_PACKAGES }
let( :unix_only_pkgs ) { Beaker::HostPrebuiltSteps::UNIX_PACKAGES }
let( :sles_only_pkgs ) { Beaker::HostPrebuiltSteps::SLES_PACKAGES }
let( :platform ) { @platform || 'unix' }
let( :ip ) { "ip.address.0.0" }
let( :stdout) { @stdout || ip }
let( :hosts ) { hosts = make_hosts( { :stdout => stdout, :platform => platform } )
hosts[0][:roles] = ['agent']
hosts[1][:roles] = ['master', 'dashboard', 'agent', 'database']
hosts[2][:roles] = ['agent']
hosts }
let( :dummy_class ) { Class.new { include Beaker::HostPrebuiltSteps
include Beaker::DSL::Patterns } }
context 'timesync' do
subject { dummy_class.new }
it "can sync time on unix hosts" do
hosts = make_hosts( { :platform => 'unix' } )
Beaker::Command.should_receive( :new ).with("ntpdate -t 20 #{ntpserver}").exactly( 3 ).times
subject.timesync( hosts, options )
end
it "can retry on failure on unix hosts" do
hosts = make_hosts( { :platform => 'unix', :exit_code => [1, 0] } )
subject.stub( :sleep ).and_return(true)
Beaker::Command.should_receive( :new ).with("ntpdate -t 20 #{ntpserver}").exactly( 6 ).times
subject.timesync( hosts, options )
end
it "eventually gives up and raises an error when unix hosts can't be synched" do
hosts = make_hosts( { :platform => 'unix', :exit_code => 1 } )
subject.stub( :sleep ).and_return(true)
Beaker::Command.should_receive( :new ).with("ntpdate -t 20 #{ntpserver}").exactly( 5 ).times
expect{ subject.timesync( hosts, options ) }.to raise_error
end
it "can sync time on solaris-10 hosts" do
hosts = make_hosts( { :platform => 'solaris-10' } )
Beaker::Command.should_receive( :new ).with("sleep 10 && ntpdate -w #{ntpserver}").exactly( 3 ).times
subject.timesync( hosts, options )
end
it "can sync time on windows hosts" do
hosts = make_hosts( { :platform => 'windows' } )
Beaker::Command.should_receive( :new ).with("w32tm /register").exactly( 3 ).times
Beaker::Command.should_receive( :new ).with("net start w32time").exactly( 3 ).times
Beaker::Command.should_receive( :new ).with("w32tm /config /manualpeerlist:#{ntpserver} /syncfromflags:manual /update").exactly( 3 ).times
Beaker::Command.should_receive( :new ).with("w32tm /resync").exactly( 3 ).times
subject.timesync( hosts, options )
end
it "can sync time on Sles hosts" do
hosts = make_hosts( { :platform => 'sles-13.1-x64' } )
Beaker::Command.should_receive( :new ).with("sntp #{ntpserver}").exactly( 3 ).times
subject.timesync( hosts, options )
end
end
context "epel_info_for!" do
subject { dummy_class.new }
it "can return the correct url for an el-6 host" do
host = make_host( 'testhost', { :platform => 'el-6-platform' } )
expect( subject.epel_info_for!( host )).to be === "http://mirror.itc.virginia.edu/fedora-epel/6/i386/epel-release-6-8.noarch.rpm"
end
it "can return the correct url for an el-5 host" do
host = make_host( 'testhost', { :platform => 'el-5-platform' } )
expect( subject.epel_info_for!( host )).to be === "http://archive.linux.duke.edu/pub/epel/5/i386/epel-release-5-4.noarch.rpm"
end
it "raises an error on non el-5/6 host" do
host = make_host( 'testhost', { :platform => 'el-4-platform' } )
expect{ subject.epel_info_for!( host )}.to raise_error
end
end
context "apt_get_update" do
subject { dummy_class.new }
it "can perform apt-get on ubuntu hosts" do
host = make_host( 'testhost', { :platform => 'ubuntu' } )
Beaker::Command.should_receive( :new ).with("apt-get update").once
subject.apt_get_update( host )
end
it "can perform apt-get on debian hosts" do
host = make_host( 'testhost', { :platform => 'debian' } )
Beaker::Command.should_receive( :new ).with("apt-get update").once
subject.apt_get_update( host )
end
it "does nothing on non debian/ubuntu hosts" do
host = make_host( 'testhost', { :platform => 'windows' } )
Beaker::Command.should_receive( :new ).never
subject.apt_get_update( host )
end
end
context "copy_file_to_remote" do
subject { dummy_class.new }
it "can copy a file to a remote host" do
content = "this is the content"
tempfilepath = "/path/to/tempfile"
filepath = "/path/to/file"
host = make_host( 'testhost', { :platform => 'windows' })
tempfile = double( 'tempfile' )
tempfile.stub( :path ).and_return( tempfilepath )
Tempfile.stub( :open ).and_yield( tempfile )
file = double( 'file' )
File.stub( :open ).and_yield( file )
file.should_receive( :puts ).with( content ).once
host.should_receive( :do_scp_to ).with( tempfilepath, filepath, subject.instance_variable_get( :@options ) ).once
subject.copy_file_to_remote(host, filepath, content)
end
end
context "proxy_config" do
subject { dummy_class.new }
it "correctly configures ubuntu hosts" do
hosts = make_hosts( { :platform => 'ubuntu', :exit_code => 1 } )
Beaker::Command.should_receive( :new ).with( "if test -f /etc/apt/apt.conf; then mv /etc/apt/apt.conf /etc/apt/apt.conf.bk; fi" ).exactly( 3 )
hosts.each do |host|
subject.should_receive( :copy_file_to_remote ).with( host, '/etc/apt/apt.conf', apt_cfg ).once
subject.should_receive( :apt_get_update ).with( host ).once
end
subject.proxy_config( hosts, options )
end
it "correctly configures debian hosts" do
hosts = make_hosts( { :platform => 'debian' } )
Beaker::Command.should_receive( :new ).with( "if test -f /etc/apt/apt.conf; then mv /etc/apt/apt.conf /etc/apt/apt.conf.bk; fi" ).exactly( 3 ).times
hosts.each do |host|
subject.should_receive( :copy_file_to_remote ).with( host, '/etc/apt/apt.conf', apt_cfg ).once
subject.should_receive( :apt_get_update ).with( host ).once
end
subject.proxy_config( hosts, options )
end
it "correctly configures solaris-11 hosts" do
hosts = make_hosts( { :platform => 'solaris-11' } )
Beaker::Command.should_receive( :new ).with( "/usr/bin/pkg unset-publisher solaris || :" ).exactly( 3 ).times
hosts.each do |host|
Beaker::Command.should_receive( :new ).with( "/usr/bin/pkg set-publisher -g %s solaris" % ips_pkg_repo ).once
end
subject.proxy_config( hosts, options )
end
it "does nothing for non ubuntu/debian/solaris-11 hosts" do
hosts = make_hosts( { :platform => 'windows' } )
Beaker::Command.should_receive( :new ).never
subject.proxy_config( hosts, options )
end
end
context "add_el_extras" do
subject { dummy_class.new }
it "add extras for el-5/6 hosts" do
hosts = make_hosts( { :platform => 'el-5', :exit_code => 1 } )
hosts[0][:platform] = 'el-6'
url = "http://el_extras_url"
subject.stub( :epel_info_for! ).and_return( url )
Beaker::Command.should_receive( :new ).with("rpm -qa | grep epel-release").exactly( 3 ).times
Beaker::Command.should_receive( :new ).with("rpm -i #{url}").exactly( 3 ).times
Beaker::Command.should_receive( :new ).with("yum clean all && yum makecache").exactly( 3 ).times
subject.add_el_extras( hosts, options )
end
it "should do nothing for non el-5/6 hosts" do
hosts = make_hosts( { :platform => 'windows' } )
Beaker::Command.should_receive( :new ).never
subject.add_el_extras( hosts, options )
end
end
context "sync_root_keys" do
subject { dummy_class.new }
it "can sync keys on a solaris host" do
@platform = 'solaris'
Beaker::Command.should_receive( :new ).with( sync_cmd % "bash" ).exactly( 3 ).times
subject.sync_root_keys( hosts, options )
end
it "can sync keys on a non-solaris host" do
Beaker::Command.should_receive( :new ).with( sync_cmd % "env PATH=/usr/gnu/bin:$PATH bash" ).exactly( 3 ).times
subject.sync_root_keys( hosts, options )
end
end
context "validate_host" do
subject { dummy_class.new }
it "can validate unix hosts" do
hosts.each do |host|
unix_only_pkgs.each do |pkg|
host.should_receive( :check_for_package ).with( pkg ).once.and_return( false )
host.should_receive( :install_package ).with( pkg ).once
end
end
subject.validate_host(hosts, options)
end
it "can validate unix hosts that need sysstat installed" do
total_pkgs = Array.new(unix_only_pkgs);
total_pkgs << "sysstat"
hosts.each do |host|
total_pkgs.each do |pkg|
host.should_receive( :check_for_package ).with( pkg ).once.and_return( false )
host.should_receive( :install_package ).with( pkg ).once
end
end
opts = options.merge({:collect_perf_data => true})
subject.validate_host(hosts, opts)
end
it "can validate windows hosts" do
@platform = 'windows'
hosts.each do |host|
windows_pkgs.each do |pkg|
host.should_receive( :check_for_package ).with( pkg ).once.and_return( false )
host.should_receive( :install_package ).with( pkg ).once
end
end
subject.validate_host(hosts, options)
end
it "can validate SLES hosts" do
@platform = 'sles-13.1-x64'
hosts.each do |host|
sles_only_pkgs.each do |pkg|
host.should_receive( :check_for_package).with( pkg ).once.and_return( false )
host.should_receive( :install_package ).with( pkg ).once
end
end
subject.validate_host(hosts, options)
end
end
context 'get_domain_name' do
subject { dummy_class.new }
it "can find the domain for a host" do
host = make_host('name', { :stdout => "domain labs.lan d.labs.net dc1.labs.net labs.com\nnameserver 10.16.22.10\nnameserver 10.16.22.11" } )
Beaker::Command.should_receive( :new ).with( "cat /etc/resolv.conf" ).once
expect( subject.get_domain_name( host ) ).to be === "labs.lan"
end
it "can find the search for a host" do
host = make_host('name', { :stdout => "search labs.lan d.labs.net dc1.labs.net labs.com\nnameserver 10.16.22.10\nnameserver 10.16.22.11" } )
Beaker::Command.should_receive( :new ).with( "cat /etc/resolv.conf" ).once
expect( subject.get_domain_name( host ) ).to be === "labs.lan"
end
end
context "get_ip" do
subject { dummy_class.new }
it "can exec the get_ip command" do
host = make_host('name', { :stdout => "192.168.2.130\n" } )
Beaker::Command.should_receive( :new ).with( "ip a|awk '/global/{print$2}' | cut -d/ -f1 | head -1" ).once
expect( subject.get_ip( host ) ).to be === "192.168.2.130"
end
end
context "set_etc_hosts" do
subject { dummy_class.new }
it "can set the /etc/hosts string on a host" do
host = make_host('name', {})
etc_hosts = "127.0.0.1 localhost\n192.168.2.130 pe-ubuntu-lucid\n192.168.2.128 pe-centos6\n192.168.2.131 pe-debian6"
Beaker::Command.should_receive( :new ).with( "echo '#{etc_hosts}' > /etc/hosts" ).once
host.should_receive( :exec ).once
subject.set_etc_hosts(host, etc_hosts)
end
end
context "package_proxy" do
subject { dummy_class.new }
proxyurl = "http://192.168.2.100:3128"
it "can set proxy config on a debian/ubuntu host" do
host = make_host('name', { :platform => 'ubuntu' } )
Beaker::Command.should_receive( :new ).with( "echo 'Acquire::http::Proxy \"#{proxyurl}/\";' >> /etc/apt/apt.conf.d/10proxy" ).once
host.should_receive( :exec ).once
subject.package_proxy(host, options.merge( {'package_proxy' => proxyurl}) )
end
it "can set proxy config on a redhat/centos host" do
host = make_host('name', { :platform => 'centos' } )
Beaker::Command.should_receive( :new ).with( "echo 'proxy=#{proxyurl}/' >> /etc/yum.conf" ).once
host.should_receive( :exec ).once
subject.package_proxy(host, options.merge( {'package_proxy' => proxyurl}) )
end
end
end
| 1 | 7,213 | This is no longer needed and should be removed. | voxpupuli-beaker | rb |
@@ -57,8 +57,12 @@ func ParseFlags(ctx *cli.Context) service.Options {
}
// ParseJSONOptions function fills in Openvpn options from JSON request
-func ParseJSONOptions(request json.RawMessage) (service.Options, error) {
+func ParseJSONOptions(request *json.RawMessage) (service.Options, error) {
+ if request == nil {
+ return Options{}, nil
+ }
+
var opts Options
- err := json.Unmarshal(request, &opts)
+ err := json.Unmarshal(*request, &opts)
return opts, err
} | 1 | /*
* Copyright (C) 2018 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package service
import (
"encoding/json"
"github.com/mysteriumnetwork/node/core/service"
"github.com/urfave/cli"
)
// Options describes options which are required to start Openvpn service
type Options struct {
OpenvpnProtocol string `json:"protocol"`
OpenvpnPort int `json:"port"`
}
var (
protocolFlag = cli.StringFlag{
Name: "openvpn.proto",
Usage: "Openvpn protocol to use. Options: { udp, tcp }",
Value: "udp",
}
portFlag = cli.IntFlag{
Name: "openvpn.port",
Usage: "Openvpn port to use. Default 1194",
Value: 1194,
}
)
// RegisterFlags function register Openvpn flags to flag list
func RegisterFlags(flags *[]cli.Flag) {
*flags = append(*flags, protocolFlag, portFlag)
}
// ParseFlags function fills in Openvpn options from CLI context
func ParseFlags(ctx *cli.Context) service.Options {
return Options{
OpenvpnProtocol: ctx.String(protocolFlag.Name),
OpenvpnPort: ctx.Int(portFlag.Name),
}
}
// ParseJSONOptions function fills in Openvpn options from JSON request
func ParseJSONOptions(request json.RawMessage) (service.Options, error) {
var opts Options
err := json.Unmarshal(request, &opts)
return opts, err
}
| 1 | 13,727 | could use named return params here to avoid declaring the opts, and using naked returns instead. | mysteriumnetwork-node | go |
@@ -84,6 +84,11 @@ module.exports = class Instagram extends Plugin {
}
getItemIcon (item) {
+ if (!item.images) {
+ return <svg viewBox="0 0 58 58" opacity="0.6">
+ <path d="M36.537 28.156l-11-7a1.005 1.005 0 0 0-1.02-.033C24.2 21.3 24 21.635 24 22v14a1 1 0 0 0 1.537.844l11-7a1.002 1.002 0 0 0 0-1.688zM26 34.18V23.82L34.137 29 26 34.18z" /><path d="M57 6H1a1 1 0 0 0-1 1v44a1 1 0 0 0 1 1h56a1 1 0 0 0 1-1V7a1 1 0 0 0-1-1zM10 28H2v-9h8v9zm-8 2h8v9H2v-9zm10 10V8h34v42H12V40zm44-12h-8v-9h8v9zm-8 2h8v9h-8v-9zm8-22v9h-8V8h8zM2 8h8v9H2V8zm0 42v-9h8v9H2zm54 0h-8v-9h8v9z" />
+ </svg>
+ }
return <img src={item.images.thumbnail.url} />
}
| 1 | const Plugin = require('../../core/Plugin')
const { Provider } = require('../../server')
const { ProviderView } = require('../../views')
const { h } = require('preact')
module.exports = class Instagram extends Plugin {
constructor (uppy, opts) {
super(uppy, opts)
this.type = 'acquirer'
this.id = this.opts.id || 'Instagram'
this.title = 'Instagram'
this.icon = () => (
<svg aria-hidden="true" class="UppyIcon" width="28" height="28" viewBox="0 0 512 512">
<path d="M256,49.471c67.266,0,75.233.257,101.8,1.469,24.562,1.121,37.9,5.224,46.778,8.674a78.052,78.052,0,0,1,28.966,18.845,78.052,78.052,0,0,1,18.845,28.966c3.45,8.877,7.554,22.216,8.674,46.778,1.212,26.565,1.469,34.532,1.469,101.8s-0.257,75.233-1.469,101.8c-1.121,24.562-5.225,37.9-8.674,46.778a83.427,83.427,0,0,1-47.811,47.811c-8.877,3.45-22.216,7.554-46.778,8.674-26.56,1.212-34.527,1.469-101.8,1.469s-75.237-.257-101.8-1.469c-24.562-1.121-37.9-5.225-46.778-8.674a78.051,78.051,0,0,1-28.966-18.845,78.053,78.053,0,0,1-18.845-28.966c-3.45-8.877-7.554-22.216-8.674-46.778-1.212-26.564-1.469-34.532-1.469-101.8s0.257-75.233,1.469-101.8c1.121-24.562,5.224-37.9,8.674-46.778A78.052,78.052,0,0,1,78.458,78.458a78.053,78.053,0,0,1,28.966-18.845c8.877-3.45,22.216-7.554,46.778-8.674,26.565-1.212,34.532-1.469,101.8-1.469m0-45.391c-68.418,0-77,.29-103.866,1.516-26.815,1.224-45.127,5.482-61.151,11.71a123.488,123.488,0,0,0-44.62,29.057A123.488,123.488,0,0,0,17.3,90.982C11.077,107.007,6.819,125.319,5.6,152.134,4.369,179,4.079,187.582,4.079,256S4.369,333,5.6,359.866c1.224,26.815,5.482,45.127,11.71,61.151a123.489,123.489,0,0,0,29.057,44.62,123.486,123.486,0,0,0,44.62,29.057c16.025,6.228,34.337,10.486,61.151,11.71,26.87,1.226,35.449,1.516,103.866,1.516s77-.29,103.866-1.516c26.815-1.224,45.127-5.482,61.151-11.71a128.817,128.817,0,0,0,73.677-73.677c6.228-16.025,10.486-34.337,11.71-61.151,1.226-26.87,1.516-35.449,1.516-103.866s-0.29-77-1.516-103.866c-1.224-26.815-5.482-45.127-11.71-61.151a123.486,123.486,0,0,0-29.057-44.62A123.487,123.487,0,0,0,421.018,17.3C404.993,11.077,386.681,6.819,359.866,5.6,333,4.369,324.418,4.079,256,4.079h0Z" />
<path d="M256,126.635A129.365,129.365,0,1,0,385.365,256,129.365,129.365,0,0,0,256,126.635Zm0,213.338A83.973,83.973,0,1,1,339.974,256,83.974,83.974,0,0,1,256,339.973Z" />
<circle cx="390.476" cy="121.524" r="30.23" />
</svg>
)
this[this.id] = new Provider(uppy, {
host: this.opts.host,
provider: 'instagram',
authProvider: 'instagram'
})
this.files = []
this.onAuth = this.onAuth.bind(this)
this.render = this.render.bind(this)
// set default options
const defaultOptions = {}
// merge default options with the ones set by user
this.opts = Object.assign({}, defaultOptions, opts)
}
install () {
this.view = new ProviderView(this, {
viewType: 'grid',
showTitles: false,
showFilter: false,
showBreadcrumbs: false
})
// Set default state for Instagram
this.setPluginState({
authenticated: false,
files: [],
folders: [],
directories: [],
activeRow: -1,
filterInput: '',
isSearchVisible: false
})
const target = this.opts.target
if (target) {
this.mount(target, this)
}
}
uninstall () {
this.view.tearDown()
this.unmount()
}
onAuth (authenticated) {
this.setPluginState({ authenticated })
if (authenticated) {
this.view.getFolder('recent')
}
}
getUsername (data) {
return data.data[0].user.username
}
isFolder (item) {
return false
}
getItemData (item) {
return item
}
getItemIcon (item) {
return <img src={item.images.thumbnail.url} />
}
getItemSubList (item) {
const subItems = []
item.data.forEach((subItem) => {
if (subItem.carousel_media) {
subItem.carousel_media.forEach((i, index) => {
const { id, created_time } = subItem
const newSubItem = Object.assign({}, i, { id, created_time })
newSubItem.carousel_id = index
subItems.push(newSubItem)
})
} else {
subItems.push(subItem)
}
})
return subItems
}
getItemName (item) {
if (item && item['created_time']) {
const ext = item.type === 'video' ? 'mp4' : 'jpeg'
let date = new Date(item['created_time'] * 1000)
date = date.toLocaleDateString([], {
year: 'numeric',
month: 'short',
day: 'numeric',
hour: 'numeric',
minute: 'numeric'
})
// adding both date and carousel_id, so the name is unique
return `Instagram ${date} ${item.carousel_id || ''}.${ext}`
}
return ''
}
getMimeType (item) {
return item.type === 'video' ? 'video/mp4' : 'image/jpeg'
}
getItemId (item) {
return `${item.id}${item.carousel_id || ''}`
}
getItemRequestPath (item) {
const suffix = isNaN(item.carousel_id) ? '' : `?carousel_id=${item.carousel_id}`
return `${item.id}${suffix}`
}
getItemModifiedDate (item) {
return item.created_time
}
getItemThumbnailUrl (item) {
return item.images.thumbnail.url
}
getNextPagePath () {
const { files } = this.getPluginState()
return `recent?max_id=${this.getItemId(files[files.length - 1])}`
}
render (state) {
return this.view.render(state)
}
}
| 1 | 10,773 | this is an unrelated fix. I noticed when an instagram carousel post is mixed with images and videos, the videos don't come with thumbnails, so I am adding a fallback thumbnail for this case. | transloadit-uppy | js |
@@ -58,12 +58,14 @@ type syncChainSelector interface {
// IsHeaver returns true if tipset a is heavier than tipset b and false if
// tipset b is heavier than tipset a.
IsHeavier(ctx context.Context, a, b types.TipSet, aStateID, bStateID cid.Cid) (bool, error)
+ // NewWeight returns the weight of a tipset
+ NewWeight(ctx context.Context, ts types.TipSet, stRoot cid.Cid) (uint64, error)
}
type syncStateEvaluator interface {
// RunStateTransition returns the state root CID resulting from applying the input ts to the
// prior `stateRoot`. It returns an error if the transition is invalid.
- RunStateTransition(ctx context.Context, ts types.TipSet, tsMessages [][]*types.SignedMessage, tsReceipts [][]*types.MessageReceipt, ancestors []types.TipSet, stateID cid.Cid) (cid.Cid, error)
+ RunStateTransition(ctx context.Context, ts types.TipSet, tsMessages [][]*types.SignedMessage, tsReceipts [][]*types.MessageReceipt, ancestors []types.TipSet, parentWeight uint64, stateID cid.Cid) (cid.Cid, error)
}
// Syncer updates its chain.Store according to the methods of its | 1 | package chain
import (
"context"
"sync"
"github.com/ipfs/go-cid"
logging "github.com/ipfs/go-log"
"github.com/pkg/errors"
"go.opencensus.io/trace"
"github.com/filecoin-project/go-filecoin/clock"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/metrics"
"github.com/filecoin-project/go-filecoin/metrics/tracing"
"github.com/filecoin-project/go-filecoin/net"
"github.com/filecoin-project/go-filecoin/types"
)
var reorgCnt *metrics.Int64Counter
func init() {
reorgCnt = metrics.NewInt64Counter("chain/reorg_count", "The number of reorgs that have occurred.")
}
// UntrustedChainHeightLimit is the maximum number of blocks ahead of the current consensus
// chain height to accept if syncing without trust.
var UntrustedChainHeightLimit = 600
var (
// ErrChainHasBadTipSet is returned when the syncer traverses a chain with a cached bad tipset.
ErrChainHasBadTipSet = errors.New("input chain contains a cached bad tipset")
// ErrNewChainTooLong is returned when processing a fork that split off from the main chain too many blocks ago.
ErrNewChainTooLong = errors.New("input chain forked from best chain too far in the past")
// ErrUnexpectedStoreState indicates that the syncer's chain store is violating expected invariants.
ErrUnexpectedStoreState = errors.New("the chain store is in an unexpected state")
)
var syncOneTimer *metrics.Float64Timer
func init() {
syncOneTimer = metrics.NewTimerMs("syncer/sync_one", "Duration of single tipset validation in milliseconds")
}
var logSyncer = logging.Logger("chain.syncer")
type syncerChainReaderWriter interface {
GetHead() types.TipSetKey
GetTipSet(tsKey types.TipSetKey) (types.TipSet, error)
GetTipSetStateRoot(tsKey types.TipSetKey) (cid.Cid, error)
HasTipSetAndState(ctx context.Context, tsKey types.TipSetKey) bool
PutTipSetAndState(ctx context.Context, tsas *TipSetAndState) error
SetHead(ctx context.Context, s types.TipSet) error
HasTipSetAndStatesWithParentsAndHeight(pTsKey types.TipSetKey, h uint64) bool
GetTipSetAndStatesByParentsAndHeight(pTsKey types.TipSetKey, h uint64) ([]*TipSetAndState, error)
}
type syncChainSelector interface {
// IsHeaver returns true if tipset a is heavier than tipset b and false if
// tipset b is heavier than tipset a.
IsHeavier(ctx context.Context, a, b types.TipSet, aStateID, bStateID cid.Cid) (bool, error)
}
type syncStateEvaluator interface {
// RunStateTransition returns the state root CID resulting from applying the input ts to the
// prior `stateRoot`. It returns an error if the transition is invalid.
RunStateTransition(ctx context.Context, ts types.TipSet, tsMessages [][]*types.SignedMessage, tsReceipts [][]*types.MessageReceipt, ancestors []types.TipSet, stateID cid.Cid) (cid.Cid, error)
}
// Syncer updates its chain.Store according to the methods of its
// consensus.Protocol. It uses a bad tipset cache and a limit on new
// blocks to traverse during chain collection. The Syncer can query the
// network for blocks. The Syncer maintains the following invariant on
// its store: all tipsets that pass the syncer's validity checks are added to the
// chain store along with their state root CID.
//
// Ideally the code that syncs the chain according to consensus rules should
// be independent of any particular implementation of consensus. Currently the
// Syncer is coupled to details of Expected Consensus. This dependence
// exists in the widen function, the fact that widen is called on only one
// tipset in the incoming chain, and assumptions regarding the existence of
// grandparent state in the store.
type Syncer struct {
// This mutex ensures at most one call to HandleNewTipSet executes at
// any time. This is important because at least two sections of the
// code otherwise have races:
// 1. syncOne assumes that chainStore.Head() does not change when
// comparing tipset weights and updating the store
// 2. HandleNewTipSet assumes that calls to widen and then syncOne
// are not run concurrently with other calls to widen to ensure
// that the syncer always finds the heaviest existing tipset.
mu sync.Mutex
// fetcher is the networked block fetching service for fetching blocks
// and messages.
fetcher net.Fetcher
// badTipSetCache is used to filter out collections of invalid blocks.
badTipSets *badTipSetCache
// Evaluates tipset messages and stores the resulting states.
stateEvaluator syncStateEvaluator
// Selects the heaviest of two chains
chainSelector syncChainSelector
// Provides and stores validated tipsets and their state roots.
chainStore syncerChainReaderWriter
// Provides message collections given cids
messageProvider MessageProvider
clock clock.Clock
// Reporter is used by the syncer to update the current status of the chain.
reporter Reporter
}
// NewSyncer constructs a Syncer ready for use.
func NewSyncer(e syncStateEvaluator, cs syncChainSelector, s syncerChainReaderWriter, m MessageProvider, f net.Fetcher, sr Reporter, c clock.Clock) *Syncer {
return &Syncer{
fetcher: f,
badTipSets: &badTipSetCache{
bad: make(map[string]struct{}),
},
stateEvaluator: e,
chainSelector: cs,
chainStore: s,
messageProvider: m,
clock: c,
reporter: sr,
}
}
// syncOne syncs a single tipset with the chain store. syncOne calculates the
// parent state of the tipset and calls into consensus to run a state transition
// in order to validate the tipset. In the case the input tipset is valid,
// syncOne calls into consensus to check its weight, and then updates the head
// of the store if this tipset is the heaviest.
//
// Precondition: the caller of syncOne must hold the syncer's lock (syncer.mu) to
// ensure head is not modified by another goroutine during run.
func (syncer *Syncer) syncOne(ctx context.Context, parent, next types.TipSet) error {
priorHeadKey := syncer.chainStore.GetHead()
// if tipset is already priorHeadKey, we've been here before. do nothing.
if priorHeadKey.Equals(next.Key()) {
return nil
}
stopwatch := syncOneTimer.Start(ctx)
defer stopwatch.Stop(ctx)
// Lookup parent state root. It is guaranteed by the syncer that it is in the chainStore.
stateRoot, err := syncer.chainStore.GetTipSetStateRoot(parent.Key())
if err != nil {
return err
}
// Gather ancestor chain needed to process state transition.
h, err := next.Height()
if err != nil {
return err
}
ancestorHeight := types.NewBlockHeight(h).Sub(types.NewBlockHeight(consensus.AncestorRoundsNeeded))
ancestors, err := GetRecentAncestors(ctx, parent, syncer.chainStore, ancestorHeight)
if err != nil {
return err
}
var nextMessages [][]*types.SignedMessage
var nextReceipts [][]*types.MessageReceipt
for i := 0; i < next.Len(); i++ {
blk := next.At(i)
msgs, err := syncer.messageProvider.LoadMessages(ctx, blk.Messages)
if err != nil {
return errors.Wrapf(err, "syncing tip %s failed loading message list %s for block %s", next.Key(), blk.Messages, blk.Cid())
}
rcpts, err := syncer.messageProvider.LoadReceipts(ctx, blk.MessageReceipts)
if err != nil {
return errors.Wrapf(err, "syncing tip %s failed loading receipts list %s for block %s", next.Key(), blk.MessageReceipts, blk.Cid())
}
nextMessages = append(nextMessages, msgs)
nextReceipts = append(nextReceipts, rcpts)
}
// Run a state transition to validate the tipset and compute
// a new state to add to the store.
root, err := syncer.stateEvaluator.RunStateTransition(ctx, next, nextMessages, nextReceipts, ancestors, stateRoot)
if err != nil {
return err
}
err = syncer.chainStore.PutTipSetAndState(ctx, &TipSetAndState{
TipSet: next,
TipSetStateRoot: root,
})
if err != nil {
return err
}
logSyncer.Debugf("Successfully updated store with %s", next.String())
// TipSet is validated and added to store, now check if it is the heaviest.
nextParentStateID, err := syncer.chainStore.GetTipSetStateRoot(parent.Key())
if err != nil {
return err
}
headTipSet, err := syncer.chainStore.GetTipSet(priorHeadKey)
if err != nil {
return err
}
headParentKey, err := headTipSet.Parents()
if err != nil {
return err
}
var headParentStateID cid.Cid
if !headParentKey.Empty() { // head is not genesis
headParentStateID, err = syncer.chainStore.GetTipSetStateRoot(headParentKey)
if err != nil {
return err
}
}
heavier, err := syncer.chainSelector.IsHeavier(ctx, next, headTipSet, nextParentStateID, headParentStateID)
if err != nil {
return err
}
// If it is the heaviest update the chainStore.
if heavier {
if err = syncer.chainStore.SetHead(ctx, next); err != nil {
return err
}
// Gather the entire new chain for reorg comparison and logging.
syncer.logReorg(ctx, headTipSet, next)
}
return nil
}
func (syncer *Syncer) logReorg(ctx context.Context, curHead, newHead types.TipSet) {
curHeadIter := IterAncestors(ctx, syncer.chainStore, curHead)
newHeadIter := IterAncestors(ctx, syncer.chainStore, newHead)
commonAncestor, err := FindCommonAncestor(curHeadIter, newHeadIter)
if err != nil {
// Should never get here because reorgs should always have a
// common ancestor..
logSyncer.Warningf("unexpected error when running FindCommonAncestor for reorg log: %s", err.Error())
return
}
reorg := IsReorg(curHead, newHead, commonAncestor)
if reorg {
reorgCnt.Inc(ctx, 1)
dropped, added, err := ReorgDiff(curHead, newHead, commonAncestor)
if err == nil {
logSyncer.Infof("reorg dropping %d height and adding %d height from %s to %s", dropped, added, curHead.String(), newHead.String())
} else {
logSyncer.Infof("reorg from %s to %s", curHead.String(), newHead.String())
logSyncer.Errorf("unexpected error from ReorgDiff during log: %s", err.Error())
}
}
}
// widen computes a tipset implied by the input tipset and the store that
// could potentially be the heaviest tipset. In the context of EC, widen
// returns the union of the input tipset and the biggest tipset with the same
// parents from the store.
// TODO: this leaks EC abstractions into the syncer, we should think about this.
func (syncer *Syncer) widen(ctx context.Context, ts types.TipSet) (types.TipSet, error) {
// Lookup tipsets with the same parents from the store.
parentSet, err := ts.Parents()
if err != nil {
return types.UndefTipSet, err
}
height, err := ts.Height()
if err != nil {
return types.UndefTipSet, err
}
if !syncer.chainStore.HasTipSetAndStatesWithParentsAndHeight(parentSet, height) {
return types.UndefTipSet, nil
}
candidates, err := syncer.chainStore.GetTipSetAndStatesByParentsAndHeight(parentSet, height)
if err != nil {
return types.UndefTipSet, err
}
if len(candidates) == 0 {
return types.UndefTipSet, nil
}
// Only take the tipset with the most blocks (this is EC specific logic)
max := candidates[0].TipSet
for _, candidate := range candidates[0:] {
if candidate.TipSet.Len() > max.Len() {
max = candidate.TipSet
}
}
// Form a new tipset from the union of ts and the largest in the store, de-duped.
var blockSlice []*types.Block
blockCids := make(map[cid.Cid]struct{})
for i := 0; i < ts.Len(); i++ {
blk := ts.At(i)
blockCids[blk.Cid()] = struct{}{}
blockSlice = append(blockSlice, blk)
}
for i := 0; i < max.Len(); i++ {
blk := max.At(i)
if _, found := blockCids[blk.Cid()]; !found {
blockSlice = append(blockSlice, blk)
blockCids[blk.Cid()] = struct{}{}
}
}
wts, err := types.NewTipSet(blockSlice...)
if err != nil {
return types.UndefTipSet, err
}
// check that the tipset is distinct from the input and tipsets from the store.
if wts.String() == ts.String() || wts.String() == max.String() {
return types.UndefTipSet, nil
}
return wts, nil
}
// HandleNewTipSet extends the Syncer's chain store with the given tipset if they
// represent a valid extension. It limits the length of new chains it will
// attempt to validate and caches invalid blocks it has encountered to
// help prevent DOS.
func (syncer *Syncer) HandleNewTipSet(ctx context.Context, ci *types.ChainInfo, trusted bool) (err error) {
logSyncer.Debugf("Begin fetch and sync of chain with head %v", ci.Head)
ctx, span := trace.StartSpan(ctx, "Syncer.HandleNewTipSet")
span.AddAttributes(trace.StringAttribute("tipset", ci.Head.String()))
defer tracing.AddErrorEndSpan(ctx, span, &err)
// This lock could last a long time as we fetch all the blocks needed to block the chain.
// This is justified because the app is pretty useless until it is synced.
// It's better for multiple calls to wait here than to try to fetch the chain independently.
syncer.mu.Lock()
defer syncer.mu.Unlock()
// If the store already has this tipset then the syncer is finished.
if syncer.chainStore.HasTipSetAndState(ctx, ci.Head) {
return nil
}
curHead, err := syncer.chainStore.GetTipSet(syncer.chainStore.GetHead())
if err != nil {
return err
}
curHeight, err := curHead.Height()
if err != nil {
return err
}
syncer.reporter.UpdateStatus(syncingStarted(syncer.clock.Now().Unix()), syncHead(ci.Head), syncHeight(ci.Height), syncTrusted(trusted), syncComplete(false))
defer syncer.reporter.UpdateStatus(syncComplete(true))
// If we do not trust the peer head check finality
if !trusted && ExceedsUntrustedChainLength(curHeight, ci.Height) {
return ErrNewChainTooLong
}
syncer.reporter.UpdateStatus(syncFetchComplete(false))
chain, err := syncer.fetcher.FetchTipSets(ctx, ci.Head, ci.Peer, func(t types.TipSet) (bool, error) {
parents, err := t.Parents()
if err != nil {
return true, err
}
height, err := t.Height()
if err != nil {
return false, err
}
// update status with latest fetched head and height
syncer.reporter.UpdateStatus(fetchHead(t.Key()), fetchHeight(height))
return syncer.chainStore.HasTipSetAndState(ctx, parents), nil
})
syncer.reporter.UpdateStatus(syncFetchComplete(true))
if err != nil {
return err
}
// Fetcher returns chain in Traversal order, reverse it to height order
Reverse(chain)
parentCids, err := chain[0].Parents()
if err != nil {
return err
}
parent, err := syncer.chainStore.GetTipSet(parentCids)
if err != nil {
return err
}
// Try adding the tipsets of the chain to the store, checking for new
// heaviest tipsets.
for i, ts := range chain {
// TODO: this "i==0" leaks EC specifics into syncer abstraction
// for the sake of efficiency, consider plugging up this leak.
var wts types.TipSet
if i == 0 {
wts, err = syncer.widen(ctx, ts)
if err != nil {
return err
}
if wts.Defined() {
logSyncer.Debug("attempt to sync after widen")
err = syncer.syncOne(ctx, parent, wts)
if err != nil {
return err
}
}
}
// If the chain has length greater than 1, then we need to sync each tipset
// in the chain in order to process the chain fully, including the non-widened
// first tipset.
// If the chan has length == 1, we can avoid processing the non-widened tipset
// as a performance optimization, because this tipset cannot be heavier
// than the widened first tipset.
if !wts.Defined() || len(chain) > 1 {
err = syncer.syncOne(ctx, parent, ts)
if err != nil {
// While `syncOne` can indeed fail for reasons other than consensus,
// adding to the badTipSets at this point is the simplest, since we
// have access to the chain. If syncOne fails for non-consensus reasons,
// there is no assumption that the running node's data is valid at all,
// so we don't really lose anything with this simplification.
syncer.badTipSets.AddChain(chain[i:])
return err
}
}
if i%500 == 0 {
logSyncer.Infof("processing block %d of %v for chain with head at %v", i, len(chain), ci.Head.String())
}
parent = ts
}
return nil
}
// Status returns the current chain status.
func (syncer *Syncer) Status() Status {
return syncer.reporter.Status()
}
// ExceedsUntrustedChainLength returns true if the delta between curHeight and newHeight
// exceeds the maximum number of blocks to accept if syncing without trust, false otherwise.
func ExceedsUntrustedChainLength(curHeight, newHeight uint64) bool {
maxChainLength := curHeight + uint64(UntrustedChainHeightLimit)
return newHeight > maxChainLength
}
| 1 | 21,766 | "... after protocol version 1"? | filecoin-project-venus | go |
@@ -32,7 +32,7 @@ from scapy.compat import (
_mib_re_integer = re.compile(r"^[0-9]+$")
_mib_re_both = re.compile(r"^([a-zA-Z_][a-zA-Z0-9_-]*)\(([0-9]+)\)$")
_mib_re_oiddecl = re.compile(
- r"$\s*([a-zA-Z0-9_-]+)\s+OBJECT([^:\{\}]|\{[^:]+\})+::=\s*\{([^\}]+)\}", re.M)
+ r"$\s*([a-zA-Z0-9_-]+)\s+OBJECT[^:\{\}]+::=\s*\{([^\}]+)\}", re.M)
_mib_re_strings = re.compile(r'"[^"]*"')
_mib_re_comments = re.compile(r'--.*(\r|\n)')
| 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# Modified by Maxence Tury <[email protected]>
# This program is published under a GPLv2 license
"""
Management Information Base (MIB) parsing
"""
from __future__ import absolute_import
import re
from glob import glob
from scapy.dadict import DADict, fixname
from scapy.config import conf
from scapy.utils import do_graph
import scapy.modules.six as six
from scapy.compat import plain_str
from scapy.compat import (
Any,
Dict,
List,
Optional,
Tuple,
)
#################
# MIB parsing #
#################
_mib_re_integer = re.compile(r"^[0-9]+$")
_mib_re_both = re.compile(r"^([a-zA-Z_][a-zA-Z0-9_-]*)\(([0-9]+)\)$")
_mib_re_oiddecl = re.compile(
r"$\s*([a-zA-Z0-9_-]+)\s+OBJECT([^:\{\}]|\{[^:]+\})+::=\s*\{([^\}]+)\}", re.M)
_mib_re_strings = re.compile(r'"[^"]*"')
_mib_re_comments = re.compile(r'--.*(\r|\n)')
class MIBDict(DADict[str, str]):
def _findroot(self, x):
# type: (str) -> Tuple[str, str, str]
"""Internal MIBDict function used to find a partial OID"""
if x.startswith("."):
x = x[1:]
if not x.endswith("."):
x += "."
max = 0
root = "."
root_key = ""
for k in six.iterkeys(self):
if x.startswith(k + "."):
if max < len(k):
max = len(k)
root = self[k]
root_key = k
return root, root_key, x[max:-1]
def _oidname(self, x):
# type: (str) -> str
"""Deduce the OID name from its OID ID"""
root, _, remainder = self._findroot(x)
return root + remainder
def _oid(self, x):
# type: (str) -> str
"""Parse the OID id/OID generator, and return real OID"""
xl = x.strip(".").split(".")
p = len(xl) - 1
while p >= 0 and _mib_re_integer.match(xl[p]):
p -= 1
if p != 0 or xl[p] not in six.itervalues(self.d):
return x
xl[p] = next(k for k, v in six.iteritems(self.d) if v == xl[p])
return ".".join(xl[p:])
def _make_graph(self, other_keys=None, **kargs):
# type: (Optional[Any], **Any) -> None
if other_keys is None:
other_keys = []
nodes = [(self[key], key) for key in self.iterkeys()]
oids = set(self.iterkeys())
for k in other_keys:
if k not in oids:
nodes.append((self._oidname(k), k))
s = 'digraph "mib" {\n\trankdir=LR;\n\n'
for k, o in nodes:
s += '\t"%s" [ label="%s" ];\n' % (o, k)
s += "\n"
for k, o in nodes:
parent, parent_key, remainder = self._findroot(o[:-1])
remainder = remainder[1:] + o[-1]
if parent != ".":
parent = parent_key
s += '\t"%s" -> "%s" [label="%s"];\n' % (parent, o, remainder)
s += "}\n"
do_graph(s, **kargs)
def _mib_register(ident, # type: str
value, # type: List[str]
the_mib, # type: Dict[str, List[str]]
unresolved, # type: Dict[str, List[str]]
alias, # type: Dict[str, str]
):
# type: (...) -> bool
"""
Internal function used to register an OID and its name in a MIBDict
"""
if ident in the_mib:
# We have already resolved this one. Store the alias
alias[".".join(value)] = ident
return True
if ident in unresolved:
# We know we can't resolve this one
return False
resval = []
not_resolved = 0
# Resolve the OID
# (e.g. 2.basicConstraints.3 -> 2.2.5.29.19.3)
for v in value:
if _mib_re_integer.match(v):
resval.append(v)
else:
v = fixname(plain_str(v))
if v not in the_mib:
not_resolved = 1
if v in the_mib:
resval += the_mib[v]
elif v in unresolved:
resval += unresolved[v]
else:
resval.append(v)
if not_resolved:
# Unresolved
unresolved[ident] = resval
return False
else:
# Fully resolved
the_mib[ident] = resval
keys = list(unresolved)
i = 0
# Go through the unresolved to update the ones that
# depended on the one we just did
while i < len(keys):
k = keys[i]
if _mib_register(k, unresolved[k], the_mib, {}, alias):
# Now resolved: we can remove it from unresolved
del(unresolved[k])
del(keys[i])
i = 0
else:
i += 1
return True
def load_mib(filenames):
# type: (str) -> None
"""
Load the conf.mib dict from a list of filenames
"""
the_mib = {'iso': ['1']}
unresolved = {} # type: Dict[str, List[str]]
alias = {} # type: Dict[str, str]
# Export the current MIB to a working dictionary
for k in six.iterkeys(conf.mib):
_mib_register(conf.mib[k], k.split("."), the_mib, unresolved, alias)
# Read the files
if isinstance(filenames, (str, bytes)):
files_list = [filenames]
else:
files_list = filenames
for fnames in files_list:
for fname in glob(fnames):
with open(fname) as f:
text = f.read()
cleantext = " ".join(
_mib_re_strings.split(" ".join(_mib_re_comments.split(text)))
)
for m in _mib_re_oiddecl.finditer(cleantext):
gr = m.groups()
ident, oid_s = gr[0], gr[-1]
ident = fixname(ident)
oid_l = oid_s.split()
for i, elt in enumerate(oid_l):
m2 = _mib_re_both.match(elt)
if m2:
oid_l[i] = m2.groups()[1]
_mib_register(ident, oid_l, the_mib, unresolved, alias)
# Create the new MIB
newmib = MIBDict(_name="MIB")
# Add resolved values
for oid, key in six.iteritems(the_mib):
newmib[".".join(key)] = oid
# Add unresolved values
for oid, key in six.iteritems(unresolved):
newmib[".".join(key)] = oid
# Add aliases
for key, oid in six.iteritems(alias):
newmib[key] = oid
conf.mib = newmib
####################
# OID references #
####################
# pkcs1 #
pkcs1_oids = {
"1.2.840.113549.1.1.1": "rsaEncryption",
"1.2.840.113549.1.1.2": "md2WithRSAEncryption",
"1.2.840.113549.1.1.3": "md4WithRSAEncryption",
"1.2.840.113549.1.1.4": "md5WithRSAEncryption",
"1.2.840.113549.1.1.5": "sha1-with-rsa-signature",
"1.2.840.113549.1.1.6": "rsaOAEPEncryptionSET",
"1.2.840.113549.1.1.7": "id-RSAES-OAEP",
"1.2.840.113549.1.1.8": "id-mgf1",
"1.2.840.113549.1.1.9": "id-pSpecified",
"1.2.840.113549.1.1.10": "rsassa-pss",
"1.2.840.113549.1.1.11": "sha256WithRSAEncryption",
"1.2.840.113549.1.1.12": "sha384WithRSAEncryption",
"1.2.840.113549.1.1.13": "sha512WithRSAEncryption",
"1.2.840.113549.1.1.14": "sha224WithRSAEncryption"
}
# secsig oiw #
secsig_oids = {
"1.3.14.3.2.26": "sha1"
}
# pkcs9 #
pkcs9_oids = {
"1.2.840.113549.1.9.0": "modules",
"1.2.840.113549.1.9.1": "emailAddress",
"1.2.840.113549.1.9.2": "unstructuredName",
"1.2.840.113549.1.9.3": "contentType",
"1.2.840.113549.1.9.4": "messageDigest",
"1.2.840.113549.1.9.5": "signing-time",
"1.2.840.113549.1.9.6": "countersignature",
"1.2.840.113549.1.9.7": "challengePassword",
"1.2.840.113549.1.9.8": "unstructuredAddress",
"1.2.840.113549.1.9.9": "extendedCertificateAttributes",
"1.2.840.113549.1.9.13": "signingDescription",
"1.2.840.113549.1.9.14": "extensionRequest",
"1.2.840.113549.1.9.15": "smimeCapabilities",
"1.2.840.113549.1.9.16": "smime",
"1.2.840.113549.1.9.17": "pgpKeyID",
"1.2.840.113549.1.9.20": "friendlyName",
"1.2.840.113549.1.9.21": "localKeyID",
"1.2.840.113549.1.9.22": "certTypes",
"1.2.840.113549.1.9.23": "crlTypes",
"1.2.840.113549.1.9.24": "pkcs-9-oc",
"1.2.840.113549.1.9.25": "pkcs-9-at",
"1.2.840.113549.1.9.26": "pkcs-9-sx",
"1.2.840.113549.1.9.27": "pkcs-9-mr",
"1.2.840.113549.1.9.52": "id-aa-CMSAlgorithmProtection"
}
# x509 #
attributeType_oids = {
"2.5.4.0": "objectClass",
"2.5.4.1": "aliasedEntryName",
"2.5.4.2": "knowledgeInformation",
"2.5.4.3": "commonName",
"2.5.4.4": "surname",
"2.5.4.5": "serialNumber",
"2.5.4.6": "countryName",
"2.5.4.7": "localityName",
"2.5.4.8": "stateOrProvinceName",
"2.5.4.9": "streetAddress",
"2.5.4.10": "organizationName",
"2.5.4.11": "organizationUnitName",
"2.5.4.12": "title",
"2.5.4.13": "description",
"2.5.4.14": "searchGuide",
"2.5.4.15": "businessCategory",
"2.5.4.16": "postalAddress",
"2.5.4.17": "postalCode",
"2.5.4.18": "postOfficeBox",
"2.5.4.19": "physicalDeliveryOfficeName",
"2.5.4.20": "telephoneNumber",
"2.5.4.21": "telexNumber",
"2.5.4.22": "teletexTerminalIdentifier",
"2.5.4.23": "facsimileTelephoneNumber",
"2.5.4.24": "x121Address",
"2.5.4.25": "internationalISDNNumber",
"2.5.4.26": "registeredAddress",
"2.5.4.27": "destinationIndicator",
"2.5.4.28": "preferredDeliveryMethod",
"2.5.4.29": "presentationAddress",
"2.5.4.30": "supportedApplicationContext",
"2.5.4.31": "member",
"2.5.4.32": "owner",
"2.5.4.33": "roleOccupant",
"2.5.4.34": "seeAlso",
"2.5.4.35": "userPassword",
"2.5.4.36": "userCertificate",
"2.5.4.37": "cACertificate",
"2.5.4.38": "authorityRevocationList",
"2.5.4.39": "certificateRevocationList",
"2.5.4.40": "crossCertificatePair",
"2.5.4.41": "name",
"2.5.4.42": "givenName",
"2.5.4.43": "initials",
"2.5.4.44": "generationQualifier",
"2.5.4.45": "uniqueIdentifier",
"2.5.4.46": "dnQualifier",
"2.5.4.47": "enhancedSearchGuide",
"2.5.4.48": "protocolInformation",
"2.5.4.49": "distinguishedName",
"2.5.4.50": "uniqueMember",
"2.5.4.51": "houseIdentifier",
"2.5.4.52": "supportedAlgorithms",
"2.5.4.53": "deltaRevocationList",
"2.5.4.54": "dmdName",
"2.5.4.55": "clearance",
"2.5.4.56": "defaultDirQop",
"2.5.4.57": "attributeIntegrityInfo",
"2.5.4.58": "attributeCertificate",
"2.5.4.59": "attributeCertificateRevocationList",
"2.5.4.60": "confKeyInfo",
"2.5.4.61": "aACertificate",
"2.5.4.62": "attributeDescriptorCertificate",
"2.5.4.63": "attributeAuthorityRevocationList",
"2.5.4.64": "family-information",
"2.5.4.65": "pseudonym",
"2.5.4.66": "communicationsService",
"2.5.4.67": "communicationsNetwork",
"2.5.4.68": "certificationPracticeStmt",
"2.5.4.69": "certificatePolicy",
"2.5.4.70": "pkiPath",
"2.5.4.71": "privPolicy",
"2.5.4.72": "role",
"2.5.4.73": "delegationPath",
"2.5.4.74": "protPrivPolicy",
"2.5.4.75": "xMLPrivilegeInfo",
"2.5.4.76": "xmlPrivPolicy",
"2.5.4.77": "uuidpair",
"2.5.4.78": "tagOid",
"2.5.4.79": "uiiFormat",
"2.5.4.80": "uiiInUrh",
"2.5.4.81": "contentUrl",
"2.5.4.82": "permission",
"2.5.4.83": "uri",
"2.5.4.84": "pwdAttribute",
"2.5.4.85": "userPwd",
"2.5.4.86": "urn",
"2.5.4.87": "url",
"2.5.4.88": "utmCoordinates",
"2.5.4.89": "urnC",
"2.5.4.90": "uii",
"2.5.4.91": "epc",
"2.5.4.92": "tagAfi",
"2.5.4.93": "epcFormat",
"2.5.4.94": "epcInUrn",
"2.5.4.95": "ldapUrl",
"2.5.4.96": "ldapUrl",
"2.5.4.97": "organizationIdentifier"
}
certificateExtension_oids = {
"2.5.29.1": "authorityKeyIdentifier",
"2.5.29.2": "keyAttributes",
"2.5.29.3": "certificatePolicies",
"2.5.29.4": "keyUsageRestriction",
"2.5.29.5": "policyMapping",
"2.5.29.6": "subtreesConstraint",
"2.5.29.7": "subjectAltName",
"2.5.29.8": "issuerAltName",
"2.5.29.9": "subjectDirectoryAttributes",
"2.5.29.10": "basicConstraints",
"2.5.29.14": "subjectKeyIdentifier",
"2.5.29.15": "keyUsage",
"2.5.29.16": "privateKeyUsagePeriod",
"2.5.29.17": "subjectAltName",
"2.5.29.18": "issuerAltName",
"2.5.29.19": "basicConstraints",
"2.5.29.20": "cRLNumber",
"2.5.29.21": "reasonCode",
"2.5.29.22": "expirationDate",
"2.5.29.23": "instructionCode",
"2.5.29.24": "invalidityDate",
"2.5.29.25": "cRLDistributionPoints",
"2.5.29.26": "issuingDistributionPoint",
"2.5.29.27": "deltaCRLIndicator",
"2.5.29.28": "issuingDistributionPoint",
"2.5.29.29": "certificateIssuer",
"2.5.29.30": "nameConstraints",
"2.5.29.31": "cRLDistributionPoints",
"2.5.29.32": "certificatePolicies",
"2.5.29.33": "policyMappings",
"2.5.29.34": "policyConstraints",
"2.5.29.35": "authorityKeyIdentifier",
"2.5.29.36": "policyConstraints",
"2.5.29.37": "extKeyUsage",
"2.5.29.38": "authorityAttributeIdentifier",
"2.5.29.39": "roleSpecCertIdentifier",
"2.5.29.40": "cRLStreamIdentifier",
"2.5.29.41": "basicAttConstraints",
"2.5.29.42": "delegatedNameConstraints",
"2.5.29.43": "timeSpecification",
"2.5.29.44": "cRLScope",
"2.5.29.45": "statusReferrals",
"2.5.29.46": "freshestCRL",
"2.5.29.47": "orderedList",
"2.5.29.48": "attributeDescriptor",
"2.5.29.49": "userNotice",
"2.5.29.50": "sOAIdentifier",
"2.5.29.51": "baseUpdateTime",
"2.5.29.52": "acceptableCertPolicies",
"2.5.29.53": "deltaInfo",
"2.5.29.54": "inhibitAnyPolicy",
"2.5.29.55": "targetInformation",
"2.5.29.56": "noRevAvail",
"2.5.29.57": "acceptablePrivilegePolicies",
"2.5.29.58": "id-ce-toBeRevoked",
"2.5.29.59": "id-ce-RevokedGroups",
"2.5.29.60": "id-ce-expiredCertsOnCRL",
"2.5.29.61": "indirectIssuer",
"2.5.29.62": "id-ce-noAssertion",
"2.5.29.63": "id-ce-aAissuingDistributionPoint",
"2.5.29.64": "id-ce-issuedOnBehaIFOF",
"2.5.29.65": "id-ce-singleUse",
"2.5.29.66": "id-ce-groupAC",
"2.5.29.67": "id-ce-allowedAttAss",
"2.5.29.68": "id-ce-attributeMappings",
"2.5.29.69": "id-ce-holderNameConstraints"
}
certExt_oids = {
"2.16.840.1.113730.1.1": "cert-type",
"2.16.840.1.113730.1.2": "base-url",
"2.16.840.1.113730.1.3": "revocation-url",
"2.16.840.1.113730.1.4": "ca-revocation-url",
"2.16.840.1.113730.1.5": "ca-crl-url",
"2.16.840.1.113730.1.6": "ca-cert-url",
"2.16.840.1.113730.1.7": "renewal-url",
"2.16.840.1.113730.1.8": "ca-policy-url",
"2.16.840.1.113730.1.9": "homepage-url",
"2.16.840.1.113730.1.10": "entity-logo",
"2.16.840.1.113730.1.11": "user-picture",
"2.16.840.1.113730.1.12": "ssl-server-name",
"2.16.840.1.113730.1.13": "comment",
"2.16.840.1.113730.1.14": "lost-password-url",
"2.16.840.1.113730.1.15": "cert-renewal-time",
"2.16.840.1.113730.1.16": "aia",
"2.16.840.1.113730.1.17": "cert-scope-of-use",
}
certPkixPe_oids = {
"1.3.6.1.5.5.7.1.1": "authorityInfoAccess",
"1.3.6.1.5.5.7.1.2": "biometricInfo",
"1.3.6.1.5.5.7.1.3": "qcStatements",
"1.3.6.1.5.5.7.1.4": "auditIdentity",
"1.3.6.1.5.5.7.1.6": "aaControls",
"1.3.6.1.5.5.7.1.10": "proxying",
"1.3.6.1.5.5.7.1.11": "subjectInfoAccess"
}
certPkixQt_oids = {
"1.3.6.1.5.5.7.2.1": "cps",
"1.3.6.1.5.5.7.2.2": "unotice"
}
certPkixKp_oids = {
"1.3.6.1.5.5.7.3.1": "serverAuth",
"1.3.6.1.5.5.7.3.2": "clientAuth",
"1.3.6.1.5.5.7.3.3": "codeSigning",
"1.3.6.1.5.5.7.3.4": "emailProtection",
"1.3.6.1.5.5.7.3.5": "ipsecEndSystem",
"1.3.6.1.5.5.7.3.6": "ipsecTunnel",
"1.3.6.1.5.5.7.3.7": "ipsecUser",
"1.3.6.1.5.5.7.3.8": "timeStamping",
"1.3.6.1.5.5.7.3.9": "ocspSigning",
"1.3.6.1.5.5.7.3.10": "dvcs",
"1.3.6.1.5.5.7.3.21": "secureShellClient",
"1.3.6.1.5.5.7.3.22": "secureShellServer"
}
certPkixAd_oids = {
"1.3.6.1.5.5.7.48.1": "ocsp",
"1.3.6.1.5.5.7.48.2": "caIssuers",
"1.3.6.1.5.5.7.48.3": "timestamping",
"1.3.6.1.5.5.7.48.4": "id-ad-dvcs",
"1.3.6.1.5.5.7.48.5": "id-ad-caRepository",
"1.3.6.1.5.5.7.48.6": "id-pkix-ocsp-archive-cutoff",
"1.3.6.1.5.5.7.48.7": "id-pkix-ocsp-service-locator",
"1.3.6.1.5.5.7.48.12": "id-ad-cmc",
"1.3.6.1.5.5.7.48.1.1": "basic-response"
}
# ansi-x962 #
x962KeyType_oids = {
"1.2.840.10045.1.1": "prime-field",
"1.2.840.10045.1.2": "characteristic-two-field",
"1.2.840.10045.2.1": "ecPublicKey",
}
x962Signature_oids = {
"1.2.840.10045.4.1": "ecdsa-with-SHA1",
"1.2.840.10045.4.2": "ecdsa-with-Recommended",
"1.2.840.10045.4.3.1": "ecdsa-with-SHA224",
"1.2.840.10045.4.3.2": "ecdsa-with-SHA256",
"1.2.840.10045.4.3.3": "ecdsa-with-SHA384",
"1.2.840.10045.4.3.4": "ecdsa-with-SHA512"
}
# elliptic curves #
ansiX962Curve_oids = {
"1.2.840.10045.3.1.1": "prime192v1",
"1.2.840.10045.3.1.2": "prime192v2",
"1.2.840.10045.3.1.3": "prime192v3",
"1.2.840.10045.3.1.4": "prime239v1",
"1.2.840.10045.3.1.5": "prime239v2",
"1.2.840.10045.3.1.6": "prime239v3",
"1.2.840.10045.3.1.7": "prime256v1"
}
certicomCurve_oids = {
"1.3.132.0.1": "ansit163k1",
"1.3.132.0.2": "ansit163r1",
"1.3.132.0.3": "ansit239k1",
"1.3.132.0.4": "sect113r1",
"1.3.132.0.5": "sect113r2",
"1.3.132.0.6": "secp112r1",
"1.3.132.0.7": "secp112r2",
"1.3.132.0.8": "ansip160r1",
"1.3.132.0.9": "ansip160k1",
"1.3.132.0.10": "ansip256k1",
"1.3.132.0.15": "ansit163r2",
"1.3.132.0.16": "ansit283k1",
"1.3.132.0.17": "ansit283r1",
"1.3.132.0.22": "sect131r1",
"1.3.132.0.24": "ansit193r1",
"1.3.132.0.25": "ansit193r2",
"1.3.132.0.26": "ansit233k1",
"1.3.132.0.27": "ansit233r1",
"1.3.132.0.28": "secp128r1",
"1.3.132.0.29": "secp128r2",
"1.3.132.0.30": "ansip160r2",
"1.3.132.0.31": "ansip192k1",
"1.3.132.0.32": "ansip224k1",
"1.3.132.0.33": "ansip224r1",
"1.3.132.0.34": "ansip384r1",
"1.3.132.0.35": "ansip521r1",
"1.3.132.0.36": "ansit409k1",
"1.3.132.0.37": "ansit409r1",
"1.3.132.0.38": "ansit571k1",
"1.3.132.0.39": "ansit571r1"
}
# policies #
certPolicy_oids = {
"2.5.29.32.0": "anyPolicy"
}
# from Chromium source code (ev_root_ca_metadata.cc)
evPolicy_oids = {
'1.2.392.200091.100.721.1': 'EV Security Communication RootCA1',
'1.2.616.1.113527.2.5.1.1': 'EV Certum Trusted Network CA',
'1.3.159.1.17.1': 'EV Actualis Authentication Root CA',
'1.3.6.1.4.1.13177.10.1.3.10': 'EV Autoridad de Certificacion Firmaprofesional CIF A62634068',
'1.3.6.1.4.1.14370.1.6': 'EV GeoTrust Primary Certification Authority',
'1.3.6.1.4.1.14777.6.1.1': 'EV Izenpe.com roots Business',
'1.3.6.1.4.1.14777.6.1.2': 'EV Izenpe.com roots Government',
'1.3.6.1.4.1.17326.10.14.2.1.2': 'EV AC Camerfirma S.A. Chambers of Commerce Root - 2008',
'1.3.6.1.4.1.17326.10.14.2.2.2': 'EV AC Camerfirma S.A. Chambers of Commerce Root - 2008',
'1.3.6.1.4.1.17326.10.8.12.1.2': 'EV AC Camerfirma S.A. Global Chambersign Root - 2008',
'1.3.6.1.4.1.17326.10.8.12.2.2': 'EV AC Camerfirma S.A. Global Chambersign Root - 2008',
'1.3.6.1.4.1.22234.2.5.2.3.1': 'EV CertPlus Class 2 Primary CA (KEYNECTIS)',
'1.3.6.1.4.1.23223.1.1.1': 'EV StartCom Certification Authority',
'1.3.6.1.4.1.29836.1.10': 'EV China Internet Network Information Center EV Certificates Root',
'1.3.6.1.4.1.311.60.2.1.1': 'jurisdictionOfIncorporationLocalityName',
'1.3.6.1.4.1.311.60.2.1.2': 'jurisdictionOfIncorporationStateOrProvinceName',
'1.3.6.1.4.1.311.60.2.1.3': 'jurisdictionOfIncorporationCountryName',
'1.3.6.1.4.1.34697.2.1': 'EV AffirmTrust Commercial',
'1.3.6.1.4.1.34697.2.2': 'EV AffirmTrust Networking',
'1.3.6.1.4.1.34697.2.3': 'EV AffirmTrust Premium',
'1.3.6.1.4.1.34697.2.4': 'EV AffirmTrust Premium ECC',
'1.3.6.1.4.1.36305.2': 'EV Certificate Authority of WoSign',
'1.3.6.1.4.1.40869.1.1.22.3': 'EV TWCA Roots',
'1.3.6.1.4.1.4146.1.1': 'EV GlobalSign Root CAs',
'1.3.6.1.4.1.4788.2.202.1': 'EV D-TRUST Root Class 3 CA 2 EV 2009',
'1.3.6.1.4.1.6334.1.100.1': 'EV Cybertrust Global Root',
'1.3.6.1.4.1.6449.1.2.1.5.1': 'EV USERTrust Certification Authorities',
'1.3.6.1.4.1.781.1.2.1.8.1': 'EV Network Solutions Certificate Authority',
'1.3.6.1.4.1.782.1.2.1.8.1': 'EV AddTrust External CA Root',
'1.3.6.1.4.1.7879.13.24.1': 'EV T-Telessec GlobalRoot Class 3',
'1.3.6.1.4.1.8024.0.2.100.1.2': 'EV QuoVadis Roots',
'2.16.528.1.1003.1.2.7': 'EV Staat der Nederlanden EV Root CA',
'2.16.578.1.26.1.3.3': 'EV Buypass Class 3',
'2.16.756.1.83.21.0': 'EV Swisscom Root EV CA 2',
'2.16.756.1.89.1.2.1.1': 'EV SwissSign Gold CA - G2',
'2.16.792.3.0.4.1.1.4': 'EV E-Tugra Certification Authority',
'2.16.840.1.113733.1.7.23.6': 'EV VeriSign Certification Authorities',
'2.16.840.1.113733.1.7.48.1': 'EV thawte CAs',
'2.16.840.1.114028.10.1.2': 'EV Entrust Certification Authority',
'2.16.840.1.114171.500.9': 'EV Wells Fargo WellsSecure Public Root Certification Authority',
'2.16.840.1.114404.1.1.2.4.1': 'EV XRamp Global Certification Authority',
'2.16.840.1.114412.2.1': 'EV DigiCert High Assurance EV Root CA',
'2.16.840.1.114413.1.7.23.3': 'EV ValiCert Class 2 Policy Validation Authority',
'2.16.840.1.114414.1.7.23.3': 'EV Starfield Certificate Authority',
'2.16.840.1.114414.1.7.24.3': 'EV Starfield Service Certificate Authority'
}
#
gssapi_oids = {
'1.3.6.1.5.5.2': 'SPNEGO - Simple Protected Negotiation',
'1.3.6.1.4.1.311.2.2.10': 'NTLMSSP - Microsoft NTLM Security Support Provider',
'1.3.6.1.4.1.311.2.2.30': 'NEGOEX - SPNEGO Extended Negotiation Security Mechanism',
}
x509_oids_sets = [
pkcs1_oids,
secsig_oids,
pkcs9_oids,
attributeType_oids,
certificateExtension_oids,
certExt_oids,
certPkixPe_oids,
certPkixQt_oids,
certPkixKp_oids,
certPkixAd_oids,
certPolicy_oids,
evPolicy_oids,
x962KeyType_oids,
x962Signature_oids,
ansiX962Curve_oids,
certicomCurve_oids,
gssapi_oids,
]
x509_oids = {}
for oids_set in x509_oids_sets:
x509_oids.update(oids_set)
conf.mib = MIBDict(_name="MIB", **x509_oids)
#########################
# Hash mapping helper #
#########################
# This dict enables static access to string references to the hash functions
# of some algorithms from pkcs1_oids and x962Signature_oids.
hash_by_oid = {
"1.2.840.113549.1.1.2": "md2",
"1.2.840.113549.1.1.3": "md4",
"1.2.840.113549.1.1.4": "md5",
"1.2.840.113549.1.1.5": "sha1",
"1.2.840.113549.1.1.11": "sha256",
"1.2.840.113549.1.1.12": "sha384",
"1.2.840.113549.1.1.13": "sha512",
"1.2.840.113549.1.1.14": "sha224",
"1.2.840.10045.4.1": "sha1",
"1.2.840.10045.4.3.1": "sha224",
"1.2.840.10045.4.3.2": "sha256",
"1.2.840.10045.4.3.3": "sha384",
"1.2.840.10045.4.3.4": "sha512"
}
| 1 | 20,208 | Is there a unit test that covers this regexp? I am afraid that changing it might break things =/ | secdev-scapy | py |
@@ -57,7 +57,7 @@ public class RubyNameFormatter implements NameFormatter {
@Override
public String varReference(Name name) {
- return "@" + name.toLowerUnderscore();
+ return name.toLowerUnderscore();
}
@Override | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.util.ruby;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.NameFormatter;
import com.google.api.codegen.util.NamePath;
import com.google.common.collect.ImmutableSet;
/** The NameFormatter for Ruby. */
public class RubyNameFormatter implements NameFormatter {
private String wrapIfKeywordOrBuiltIn(String name) {
if (RESERVED_IDENTIFIER_SET.contains(name)) {
return name + "_";
} else {
return name;
}
}
@Override
public String publicClassName(Name name) {
return wrapIfKeywordOrBuiltIn(name.toUpperCamel());
}
@Override
public String privateClassName(Name name) {
return publicClassName(name);
}
@Override
public String localVarName(Name name) {
return wrapIfKeywordOrBuiltIn(name.toLowerUnderscore());
}
@Override
public String privateFieldName(Name name) {
return name.toLowerUnderscore();
}
@Override
public String publicFieldName(Name name) {
return wrapIfKeywordOrBuiltIn(name.toLowerUnderscore());
}
@Override
public String varReference(Name name) {
return "@" + name.toLowerUnderscore();
}
@Override
public String publicMethodName(Name name) {
return wrapIfKeywordOrBuiltIn(name.toLowerUnderscore());
}
@Override
public String privateMethodName(Name name) {
return wrapIfKeywordOrBuiltIn(name.toLowerUnderscore());
}
@Override
public String staticFunctionName(Name name) {
return wrapIfKeywordOrBuiltIn(name.toLowerUnderscore());
}
@Override
public String inittedConstantName(Name name) {
return wrapIfKeywordOrBuiltIn(name.toUpperUnderscore());
}
@Override
public String keyName(Name name) {
return name.toLowerUnderscore();
}
@Override
public String qualifiedName(NamePath namePath) {
return namePath.withUpperPieces().toDoubleColoned();
}
@Override
public String packageFilePathPiece(Name name) {
return name.toLowerUnderscore();
}
@Override
public String classFileNameBase(Name name) {
return name.toLowerUnderscore();
}
/**
* A set of Ruby keywords and built-ins. See:
* http://docs.ruby-lang.org/en/2.3.0/keywords_rdoc.html
*/
public static final ImmutableSet<String> RESERVED_IDENTIFIER_SET =
ImmutableSet.<String>builder()
.add(
"__ENCODING__",
"__LINE__",
"__FILE__",
"BEGIN",
"END",
"alias",
"and",
"begin",
"break",
"case",
"class",
"def",
"defined?",
"do",
"else",
"elsif",
"end",
"ensure",
"false",
"for",
"if",
"in",
"module",
"next",
"nil",
"not",
"or",
"redo",
"rescue",
"retry",
"return",
"self",
"super",
"then",
"true",
"undef",
"unless",
"until",
"when",
"while",
"yield",
// "options" is here because it's a common keyword argument to
// specify a CallOptions instance.
"options")
.build();
}
| 1 | 23,956 | This change is okay IMO because an `@` variable in Ruby is not a reference -- `@` variables are instance variables. Also, it's never used by the Ruby transformers. | googleapis-gapic-generator | java |
@@ -27,9 +27,9 @@ class PaymentImageUploadCest
$me->amOnPage('/admin/payment/edit/1');
$entityEditPage->uploadTestImage(self::IMAGE_UPLOAD_FIELD_ID, self::TEST_IMAGE_NAME);
$me->clickByName(self::SAVE_BUTTON_NAME);
- $me->seeTranslationAdmin('Payment <strong><a href="{{ url }}">%name%</a></strong> modified', 'messages', [
+ $me->seeTranslationAdmin('Payment <strong><a href="{{ url }}">{{ name }}</a></strong> modified', 'messages', [
'{{ url }}' => '',
- '%name%' => t('Credit card', [], 'dataFixtures', $me->getAdminLocale()),
+ '{{ name }}' => t('Credit card', [], 'dataFixtures', $me->getAdminLocale()),
]);
}
} | 1 | <?php
declare(strict_types=1);
namespace Tests\ShopBundle\Acceptance\acceptance;
use Tests\ShopBundle\Acceptance\acceptance\PageObject\Admin\EntityEditPage;
use Tests\ShopBundle\Acceptance\acceptance\PageObject\Admin\LoginPage;
use Tests\ShopBundle\Test\Codeception\AcceptanceTester;
class PaymentImageUploadCest
{
protected const IMAGE_UPLOAD_FIELD_ID = 'payment_form_image_image_file';
protected const SAVE_BUTTON_NAME = 'payment_form[save]';
protected const TEST_IMAGE_NAME = 'paymentTestImage.png';
/**
* @param \Tests\ShopBundle\Test\Codeception\AcceptanceTester $me
* @param \Tests\ShopBundle\Acceptance\acceptance\PageObject\Admin\EntityEditPage $entityEditPage
* @param \Tests\ShopBundle\Acceptance\acceptance\PageObject\Admin\LoginPage $loginPage
*/
public function testSuccessfulImageUpload(AcceptanceTester $me, EntityEditPage $entityEditPage, LoginPage $loginPage)
{
$me->wantTo('Upload an image in admin payment edit page');
$loginPage->loginAsAdmin();
$me->amOnPage('/admin/payment/edit/1');
$entityEditPage->uploadTestImage(self::IMAGE_UPLOAD_FIELD_ID, self::TEST_IMAGE_NAME);
$me->clickByName(self::SAVE_BUTTON_NAME);
$me->seeTranslationAdmin('Payment <strong><a href="{{ url }}">%name%</a></strong> modified', 'messages', [
'{{ url }}' => '',
'%name%' => t('Credit card', [], 'dataFixtures', $me->getAdminLocale()),
]);
}
}
| 1 | 18,335 | Why this change was needed? Because in `po` files it's with curly braces? | shopsys-shopsys | php |
@@ -370,14 +370,14 @@ patch_branch(dr_isa_mode_t isa_mode, cache_pc branch_pc, cache_pc target_pc,
ASSERT(ALIGNED(branch_pc, 4) && ALIGNED(target_pc, 4));
if ((enc & 0xfc000000) == 0x14000000) { /* B */
ASSERT(off + 0x8000000 < 0x10000000);
- *pc_writable = (0x14000000 | (0x03ffffff & off >> 2));
+ *pc_writable = (0x14000000 | (0x03ffffff & (off >> 2)));
} else if ((enc & 0xff000010) == 0x54000000 ||
(enc & 0x7e000000) == 0x34000000) { /* B.cond, CBNZ, CBZ */
- ASSERT(off + 0x40000 < 0x80000);
- *pc_writable = (enc & 0xff00001f) | (0x00ffffe0 & off >> 2 << 5);
+ ASSERT(off + 0x100000 < 0x200000);
+ *pc_writable = (enc & 0xff00001f) | (0x00ffffe0 & (off >> 2 << 5));
} else if ((enc & 0x7e000000) == 0x36000000) { /* TBNZ, TBZ */
- ASSERT(off + 0x2000 < 0x4000);
- *pc_writable = (enc & 0xfff8001f) | (0x0007ffe0 & off >> 2 << 5);
+ ASSERT(off + 0x8000 < 0x10000);
+ *pc_writable = (enc & 0xfff8001f) | (0x0007ffe0 & (off >> 2 << 5));
} else
ASSERT(false);
if (hot_patch) | 1 | /* **********************************************************
* Copyright (c) 2014-2021 Google, Inc. All rights reserved.
* Copyright (c) 2016 ARM Limited. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "../globals.h"
#include "arch.h"
#include "instr.h"
#include "instr_create_shared.h"
#include "instrlist.h"
#include "instrument.h"
/* shorten code generation lines */
#define APP instrlist_meta_append
#define PRE instrlist_meta_preinsert
#define OPREG opnd_create_reg
#define BR_X1_INST (0xd61f0000 | 1 << 5) /* br x1 */
/***************************************************************************/
/* EXIT STUB */
/***************************************************************************/
/* We use multiple approaches to linking based on how far away the target
* fragment is:
*
* Unlinked:
* exit_cti stub
* ...
* stub:
* stp x0, x1, [x28]
* movz x0, #&linkstub[0, 16), lsl #0x00
* movk x0, #&linkstub[16, 32), lsl #0x10
* movk x0, #&linkstub[32, 48), lsl #0x20
* movk x0, #&linkstub[48, 64), lsl #0x30
* ldr x1, [#8/#12]
* br x1
* <fcache-return>
*
* Linked, exit_cti_reaches_target (near fragment):
* exit_cti target_fragment
* ...
* stub:
* stp x0, x1, [x28]
* movz x0, #&linkstub[0, 16), lsl #0x00
* movk x0, #&linkstub[16, 32), lsl #0x10
* movk x0, #&linkstub[32, 48), lsl #0x20
* movk x0, #&linkstub[48, 64), lsl #0x30
* ldr x1, [#8/#12]
* br x1
* <fcache-return>
*
* Linked, unconditional branch reaches target (intermediate fragment):
* exit_cti stub
* ...
* stub:
* b target_fragment
* movz x0, #&linkstub[0, 16), lsl #0x00
* movk x0, #&linkstub[16, 32), lsl #0x10
* movk x0, #&linkstub[32, 48), lsl #0x20
* movk x0, #&linkstub[48, 64), lsl #0x30
* ldr x1, [#8/#12]
* br x1
* <fcache-return>
*
* Linked, !unconditional branch reaches target (far fragment):
* exit_cti stub
* ...
* stub:
* stp x0, x1, [x28]
* movz x0, #&linkstub[0, 16), lsl #0x00
* movk x0, #&linkstub[16, 32), lsl #0x10
* movk x0, #&linkstub[32, 48), lsl #0x20
* movk x0, #&linkstub[48, 64), lsl #0x30
* ldr x1, [#8/#12]
* br x1
* <target_fragment_prefix>
*
* To ensure atomicity of <target> patching, the data slot must be 8-byte
* aligned. We do this by reserving 12 bytes for the data slot and using the
* appropriate offset in ldr for the 8-byte aligned 8 byte region within it.
*
* For complete design details, see the following wiki
* https://dynamorio.org/page_aarch64_far.html
*/
byte *
insert_relative_target(byte *pc, cache_pc target, bool hot_patch)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
byte *
insert_relative_jump(byte *pc, cache_pc target, bool hot_patch)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
uint
nop_pad_ilist(dcontext_t *dcontext, fragment_t *f, instrlist_t *ilist, bool emitting)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return 0;
}
size_t
get_fcache_return_tls_offs(dcontext_t *dcontext, uint flags)
{
/* AArch64 always uses shared gencode so we ignore FRAG_DB_SHARED(flags) */
if (TEST(FRAG_COARSE_GRAIN, flags)) {
/* FIXME i#1575: coarse-grain NYI on AArch64 */
ASSERT_NOT_IMPLEMENTED(false);
return 0;
}
return TLS_FCACHE_RETURN_SLOT;
}
/* Generate move (immediate) of a 64-bit value using at most 4 instructions.
* pc must be a writable (vmcode) pc.
*/
uint *
insert_mov_imm(uint *pc, reg_id_t dst, ptr_int_t val)
{
uint rt = dst - DR_REG_X0;
ASSERT(rt < 31);
*pc++ = 0xd2800000 | rt | (val & 0xffff) << 5; /* movz x(rt), #x */
if ((val >> 16 & 0xffff) != 0)
*pc++ = 0xf2a00000 | rt | (val >> 16 & 0xffff) << 5; /* movk x(rt), #x, lsl #16 */
if ((val >> 32 & 0xffff) != 0)
*pc++ = 0xf2c00000 | rt | (val >> 32 & 0xffff) << 5; /* movk x(rt), #x, lsl #32 */
if ((val >> 48 & 0xffff) != 0)
*pc++ = 0xf2e00000 | rt | (val >> 48 & 0xffff) << 5; /* movk x(rt), #x, lsl #48 */
return pc;
}
/* Returns addr for the target_pc data slot of the given stub. The slot starts at the
* 8-byte aligned region in the 12-byte slot reserved in the stub.
*/
static ptr_uint_t *
get_target_pc_slot(fragment_t *f, cache_pc stub_pc)
{
return (ptr_uint_t *)ALIGN_FORWARD(
vmcode_get_writable_addr(stub_pc + DIRECT_EXIT_STUB_SIZE(f->flags) -
DIRECT_EXIT_STUB_DATA_SZ),
8);
}
/* Emit code for the exit stub at stub_pc. Return the size of the
* emitted code in bytes. This routine assumes that the caller will
* take care of any cache synchronization necessary.
* The stub is unlinked initially, except coarse grain indirect exits,
* which are always linked.
*/
int
insert_exit_stub_other_flags(dcontext_t *dcontext, fragment_t *f, linkstub_t *l,
cache_pc stub_pc, ushort l_flags)
{
uint *write_stub_pc = (uint *)vmcode_get_writable_addr(stub_pc);
uint *pc = write_stub_pc;
uint num_nops_needed = 0;
/* FIXME i#1575: coarse-grain NYI on ARM */
ASSERT_NOT_IMPLEMENTED(!TEST(FRAG_COARSE_GRAIN, f->flags));
if (LINKSTUB_DIRECT(l_flags)) {
/* stp x0, x1, [x(stolen), #(offs)] */
*pc++ = (0xa9000000 | 0 | 1 << 10 | (dr_reg_stolen - DR_REG_X0) << 5 |
TLS_REG0_SLOT >> 3 << 15);
/* mov x0, ... */
pc = insert_mov_imm(pc, DR_REG_X0, (ptr_int_t)l);
num_nops_needed = 4 - (pc - write_stub_pc - 1);
ptr_uint_t *target_pc_slot = get_target_pc_slot(f, stub_pc);
ASSERT(pc < (uint *)target_pc_slot);
uint target_pc_slot_offs = (uint *)target_pc_slot - pc;
/* ldr x1, [pc, target_pc_slot_offs * AARCH64_INSTR_SIZE] */
*pc++ = (0x58000000 | (DR_REG_X1 - DR_REG_X0) | target_pc_slot_offs << 5);
/* br x1 */
*pc++ = BR_X1_INST;
/* Fill up with NOPs, depending on how many instructions we needed to move
* the immediate into a register. Ideally we would skip adding NOPs, but
* lots of places expect the stub size to be fixed.
*/
for (uint j = 0; j < num_nops_needed; j++)
*pc++ = RAW_NOP_INST;
/* The final slot is a data slot, which will hold the address of either
* the fcache-return routine or the linked fragment. We reserve 12 bytes
* and use the 8-byte aligned region of 8 bytes within it.
*/
ASSERT(pc == (uint *)target_pc_slot || pc + 1 == (uint *)target_pc_slot);
ASSERT(sizeof(app_pc) == 8);
pc += DIRECT_EXIT_STUB_DATA_SZ / sizeof(uint);
/* We start off with the fcache-return routine address in the slot. */
/* AArch64 uses shared gencode. So, fcache_return routine address should be
* same, no matter which thread creates/unpatches the stub.
*/
ASSERT(fcache_return_routine(dcontext) == fcache_return_routine(GLOBAL_DCONTEXT));
*target_pc_slot = (ptr_uint_t)fcache_return_routine(dcontext);
ASSERT((ptr_int_t)((byte *)pc - (byte *)write_stub_pc) ==
DIRECT_EXIT_STUB_SIZE(l_flags));
} else {
/* Stub starts out unlinked. */
cache_pc exit_target =
get_unlinked_entry(dcontext, EXIT_TARGET_TAG(dcontext, f, l));
/* stp x0, x1, [x(stolen), #(offs)] */
*pc++ = (0xa9000000 | 0 | 1 << 10 | (dr_reg_stolen - DR_REG_X0) << 5 |
TLS_REG0_SLOT >> 3 << 15);
/* mov x0, ... */
pc = insert_mov_imm(pc, DR_REG_X0, (ptr_int_t)l);
num_nops_needed = 4 - (pc - write_stub_pc - 1);
/* ldr x1, [x(stolen), #(offs)] */
*pc++ = (0xf9400000 | 1 | (dr_reg_stolen - DR_REG_X0) << 5 |
get_ibl_entry_tls_offs(dcontext, exit_target) >> 3 << 10);
/* br x1 */
*pc++ = BR_X1_INST;
/* Fill up with NOPs, depending on how many instructions we needed to move
* the immediate into a register. Ideally we would skip adding NOPs, but
* lots of places expect the stub size to be fixed.
*/
for (uint j = 0; j < num_nops_needed; j++)
*pc++ = RAW_NOP_INST;
}
return (int)((byte *)pc - (byte *)write_stub_pc);
}
bool
exit_cti_reaches_target(dcontext_t *dcontext, fragment_t *f, linkstub_t *l,
cache_pc target_pc)
{
cache_pc branch_pc = EXIT_CTI_PC(f, l);
/* Compute offset as unsigned, modulo arithmetic. */
ptr_uint_t off = (ptr_uint_t)target_pc - (ptr_uint_t)branch_pc;
uint enc = *(uint *)branch_pc;
ASSERT(ALIGNED(branch_pc, 4) && ALIGNED(target_pc, 4));
if ((enc & 0xfc000000) == 0x14000000) /* B (OP_b)*/
return (off + 0x8000000 < 0x10000000);
else if ((enc & 0xff000010) == 0x54000000 ||
(enc & 0x7e000000) == 0x34000000) /* B.cond, CBNZ, CBZ */
return (off + 0x40000 < 0x80000);
else if ((enc & 0x7e000000) == 0x36000000) /* TBNZ, TBZ */
return (off + 0x2000 < 0x4000);
ASSERT(false);
return false;
}
void
patch_stub(fragment_t *f, cache_pc stub_pc, cache_pc target_pc, cache_pc target_prefix_pc,
bool hot_patch)
{
/* Compute offset as unsigned, modulo arithmetic. */
ptr_uint_t off = (ptr_uint_t)target_pc - (ptr_uint_t)stub_pc;
if (off + 0x8000000 < 0x10000000) {
/* target_pc is a near fragment. We can get there with a B
* (OP_b, 26-bit signed immediate offset).
* i#1911: Patching arbitrary instructions to an unconditional branch
* is theoretically not sound. Architectural specifications do not
* guarantee safe behaviour or any bound on when the change will be
* visible to other processor elements.
*/
*(uint *)vmcode_get_writable_addr(stub_pc) =
(0x14000000 | (0x03ffffff & off >> 2));
if (hot_patch)
machine_cache_sync(stub_pc, stub_pc + 4, true);
return;
}
/* target_pc is a far fragment. We must use an indirect branch. Note that the indirect
* branch needs to be to the fragment prefix, as we need to restore the clobbered
* regs.
*/
/* We set hot_patch to false as we are not modifying code. */
ATOMIC_8BYTE_ALIGNED_WRITE(get_target_pc_slot(f, stub_pc),
(ptr_uint_t)target_prefix_pc,
/*hot_patch=*/false);
return;
}
static bool
stub_is_patched_for_intermediate_fragment_link(dcontext_t *dcontext, cache_pc stub_pc)
{
uint enc;
ATOMIC_4BYTE_ALIGNED_READ(stub_pc, &enc);
return (enc & 0xfc000000) == 0x14000000; /* B (OP_b)*/
}
static bool
stub_is_patched_for_far_fragment_link(dcontext_t *dcontext, fragment_t *f,
cache_pc stub_pc)
{
ptr_uint_t target_pc;
ATOMIC_8BYTE_ALIGNED_READ(get_target_pc_slot(f, stub_pc), &target_pc);
return target_pc != (ptr_uint_t)fcache_return_routine(dcontext);
}
bool
stub_is_patched(dcontext_t *dcontext, fragment_t *f, cache_pc stub_pc)
{
return stub_is_patched_for_intermediate_fragment_link(dcontext, stub_pc) ||
stub_is_patched_for_far_fragment_link(dcontext, f, stub_pc);
}
void
unpatch_stub(dcontext_t *dcontext, fragment_t *f, cache_pc stub_pc, bool hot_patch)
{
/* At any time, at most one patching strategy will be in effect: the one for
* intermediate fragments or the one for far fragments.
*/
if (stub_is_patched_for_intermediate_fragment_link(dcontext, stub_pc)) {
/* Restore the stp x0, x1, [x(stolen), #(offs)]
* i#1911: Patching unconditional branch to some arbitrary instruction
* is theoretically not sound. Architectural specifications do not
* guarantee safe behaviour or any bound on when the change will be
* visible to other processor elements.
*/
*(uint *)vmcode_get_writable_addr(stub_pc) =
(0xa9000000 | 0 | 1 << 10 | (dr_reg_stolen - DR_REG_X0) << 5 |
TLS_REG0_SLOT >> 3 << 15);
if (hot_patch)
machine_cache_sync(stub_pc, stub_pc + AARCH64_INSTR_SIZE, true);
} else if (stub_is_patched_for_far_fragment_link(dcontext, f, stub_pc)) {
/* Restore the data slot to fcache return address. */
/* AArch64 uses shared gencode. So, fcache_return routine address should be
* same, no matter which thread creates/unpatches the stub.
*/
ASSERT(fcache_return_routine(dcontext) == fcache_return_routine(GLOBAL_DCONTEXT));
/* We set hot_patch to false as we are not modifying code. */
ATOMIC_8BYTE_ALIGNED_WRITE(get_target_pc_slot(f, stub_pc),
(ptr_uint_t)fcache_return_routine(dcontext),
/*hot_patch=*/false);
}
}
void
patch_branch(dr_isa_mode_t isa_mode, cache_pc branch_pc, cache_pc target_pc,
bool hot_patch)
{
/* Compute offset as unsigned, modulo arithmetic. */
ptr_uint_t off = (ptr_uint_t)target_pc - (ptr_uint_t)branch_pc;
uint *pc_writable = (uint *)vmcode_get_writable_addr(branch_pc);
uint enc = *pc_writable;
ASSERT(ALIGNED(branch_pc, 4) && ALIGNED(target_pc, 4));
if ((enc & 0xfc000000) == 0x14000000) { /* B */
ASSERT(off + 0x8000000 < 0x10000000);
*pc_writable = (0x14000000 | (0x03ffffff & off >> 2));
} else if ((enc & 0xff000010) == 0x54000000 ||
(enc & 0x7e000000) == 0x34000000) { /* B.cond, CBNZ, CBZ */
ASSERT(off + 0x40000 < 0x80000);
*pc_writable = (enc & 0xff00001f) | (0x00ffffe0 & off >> 2 << 5);
} else if ((enc & 0x7e000000) == 0x36000000) { /* TBNZ, TBZ */
ASSERT(off + 0x2000 < 0x4000);
*pc_writable = (enc & 0xfff8001f) | (0x0007ffe0 & off >> 2 << 5);
} else
ASSERT(false);
if (hot_patch)
machine_cache_sync(branch_pc, branch_pc + 4, true);
return;
}
uint
patchable_exit_cti_align_offs(dcontext_t *dcontext, instr_t *inst, cache_pc pc)
{
return 0; /* always aligned */
}
cache_pc
exit_cti_disp_pc(cache_pc branch_pc)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
/* Skips NOP instructions backwards until the first non-NOP instruction is found. */
static uint *
get_stub_branch(uint *pc)
{
/* Skip NOP instructions backwards. */
while (*pc == RAW_NOP_INST)
pc--;
/* The First non-NOP instruction must be the branch. */
ASSERT(*pc == BR_X1_INST);
return pc;
}
void
link_indirect_exit_arch(dcontext_t *dcontext, fragment_t *f, linkstub_t *l,
bool hot_patch, app_pc target_tag)
{
byte *stub_pc = (byte *)EXIT_STUB_PC(dcontext, f, l);
uint *pc;
cache_pc exit_target;
ibl_type_t ibl_type = { 0 };
DEBUG_DECLARE(bool is_ibl =)
get_ibl_routine_type_ex(dcontext, target_tag, &ibl_type);
ASSERT(is_ibl);
if (IS_IBL_LINKED(ibl_type.link_state))
exit_target = target_tag;
else
exit_target = get_linked_entry(dcontext, target_tag);
/* Set pc to the last instruction in the stub. */
pc = (uint *)(stub_pc + exit_stub_size(dcontext, target_tag, f->flags) -
AARCH64_INSTR_SIZE);
pc = get_stub_branch(pc) - 1;
/* ldr x1, [x(stolen), #(offs)] */
*(uint *)vmcode_get_writable_addr((byte *)pc) =
(0xf9400000 | 1 | (dr_reg_stolen - DR_REG_X0) << 5 |
get_ibl_entry_tls_offs(dcontext, exit_target) >> 3 << 10);
if (hot_patch)
machine_cache_sync(pc, pc + 1, true);
}
cache_pc
indirect_linkstub_stub_pc(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
cache_pc cti = EXIT_CTI_PC(f, l);
if (!EXIT_HAS_STUB(l->flags, f->flags))
return NULL;
ASSERT(decode_raw_is_jmp(dcontext, cti));
return decode_raw_jmp_target(dcontext, cti);
}
cache_pc
cbr_fallthrough_exit_cti(cache_pc prev_cti_pc)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
void
unlink_indirect_exit(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
byte *stub_pc = (byte *)EXIT_STUB_PC(dcontext, f, l);
uint *pc;
cache_pc exit_target;
ibl_code_t *ibl_code = NULL;
ASSERT(linkstub_owned_by_fragment(dcontext, f, l));
ASSERT(LINKSTUB_INDIRECT(l->flags));
/* Target is always the same, so if it's already unlinked, this is a nop. */
if (!TEST(LINK_LINKED, l->flags))
return;
ibl_code = get_ibl_routine_code(dcontext, extract_branchtype(l->flags), f->flags);
exit_target = ibl_code->unlinked_ibl_entry;
/* Set pc to the last instruction in the stub. */
pc = (uint *)(stub_pc +
exit_stub_size(dcontext, ibl_code->indirect_branch_lookup_routine,
f->flags) -
AARCH64_INSTR_SIZE);
pc = get_stub_branch(pc) - 1;
/* ldr x1, [x(stolen), #(offs)] */
*(uint *)vmcode_get_writable_addr((byte *)pc) =
(0xf9400000 | 1 | (dr_reg_stolen - DR_REG_X0) << 5 |
get_ibl_entry_tls_offs(dcontext, exit_target) >> 3 << 10);
machine_cache_sync(pc, pc + 1, true);
}
/*******************************************************************************
* COARSE-GRAIN FRAGMENT SUPPORT
*/
cache_pc
entrance_stub_jmp(cache_pc stub)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return NULL;
}
bool
coarse_is_entrance_stub(cache_pc stub)
{
/* FIXME i#1575: coarse-grain NYI on AArch64 */
return false;
}
/*###########################################################################
*
* fragment_t Prefixes
*/
int
fragment_ibt_prefix_size(uint flags)
{
/* Nothing extra for ibt as we don't have flags to restore */
return FRAGMENT_BASE_PREFIX_SIZE(flags);
}
void
insert_fragment_prefix(dcontext_t *dcontext, fragment_t *f)
{
/* Always use prefix on AArch64 as there is no load to PC. */
byte *write_start = vmcode_get_writable_addr(f->start_pc);
byte *pc = write_start;
ASSERT(f->prefix_size == 0);
/* ldp x0, x1, [x(stolen), #(off)] */
*(uint *)pc = (0xa9400000 | (DR_REG_X0 - DR_REG_X0) | (DR_REG_X1 - DR_REG_X0) << 10 |
(dr_reg_stolen - DR_REG_X0) << 5 | TLS_REG0_SLOT >> 3 << 10);
pc += 4;
f->prefix_size = (byte)(((cache_pc)pc) - write_start);
ASSERT(f->prefix_size == fragment_prefix_size(f->flags));
}
/***************************************************************************/
/* THREAD-PRIVATE/SHARED ROUTINE GENERATION */
/***************************************************************************/
void
append_call_exit_dr_hook(dcontext_t *dcontext, instrlist_t *ilist, bool absolute,
bool shared)
{
/* i#1569: DR_HOOK is not supported on AArch64 */
ASSERT_NOT_IMPLEMENTED(EXIT_DR_HOOK == NULL);
}
void
append_restore_xflags(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
APP(ilist, RESTORE_FROM_DC(dcontext, DR_REG_W0, XFLAGS_OFFSET));
APP(ilist, RESTORE_FROM_DC(dcontext, DR_REG_W1, XFLAGS_OFFSET + 4));
APP(ilist, RESTORE_FROM_DC(dcontext, DR_REG_W2, XFLAGS_OFFSET + 8));
APP(ilist,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_NZCV),
opnd_create_reg(DR_REG_X0)));
APP(ilist,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_FPCR),
opnd_create_reg(DR_REG_X1)));
APP(ilist,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_FPSR),
opnd_create_reg(DR_REG_X2)));
}
/* dcontext is in REG_DCXT; other registers can be used as scratch.
*/
void
append_restore_simd_reg(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
int i;
/* add x1, x(dcxt), #(off) */
APP(ilist,
XINST_CREATE_add_2src(dcontext, opnd_create_reg(DR_REG_X1),
opnd_create_reg(REG_DCXT),
OPND_CREATE_INTPTR(offsetof(priv_mcontext_t, simd))));
for (i = 0; i < 32; i += 2) {
/* ldp q(i), q(i + 1), [x1, #(i * 16)] */
APP(ilist,
INSTR_CREATE_ldp(
dcontext, opnd_create_reg(DR_REG_Q0 + i),
opnd_create_reg(DR_REG_Q0 + i + 1),
opnd_create_base_disp(DR_REG_X1, DR_REG_NULL, 0, i * 16, OPSZ_32)));
}
}
/* Append instructions to restore gpr on fcache enter, to be executed
* right before jump to fcache target.
* - dcontext is in REG_DCXT
* - DR's tls base is in dr_reg_stolen
* - all other registers can be used as scratch, and we are using X0.
*/
void
append_restore_gpr(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
int i;
/* FIXME i#1573: NYI on ARM with SELFPROT_DCONTEXT */
ASSERT_NOT_IMPLEMENTED(!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask));
ASSERT(dr_reg_stolen != SCRATCH_REG0);
/* Store stolen reg value into TLS slot. */
APP(ilist, RESTORE_FROM_DC(dcontext, SCRATCH_REG0, REG_OFFSET(dr_reg_stolen)));
APP(ilist, SAVE_TO_TLS(dcontext, SCRATCH_REG0, TLS_REG_STOLEN_SLOT));
/* Save DR's tls base into mcontext so we can blindly include it in the
* loop of OP_ldp instructions below.
* This means that the mcontext stolen reg slot holds DR's base instead of
* the app's value while we're in the cache, which can be confusing: but we have
* to get the official value from TLS on signal and other transitions anyway,
* and DR's base makes it easier to spot bugs than a prior app value.
*/
APP(ilist, SAVE_TO_DC(dcontext, dr_reg_stolen, REG_OFFSET(dr_reg_stolen)));
i = (REG_DCXT == DR_REG_X0);
/* ldp x30, x(i), [x(dcxt), #x30_offset] */
APP(ilist,
INSTR_CREATE_ldp(dcontext, opnd_create_reg(DR_REG_X30),
opnd_create_reg(DR_REG_X0 + i),
opnd_create_base_disp(REG_DCXT, DR_REG_NULL, 0,
REG_OFFSET(DR_REG_X30), OPSZ_16)));
/* mov sp, x(i) */
APP(ilist,
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_SP),
opnd_create_reg(DR_REG_X0 + i)));
for (i = 0; i < 30; i += 2) {
if ((REG_DCXT - DR_REG_X0) >> 1 != i >> 1) {
/* ldp x(i), x(i+1), [x(dcxt), #xi_offset] */
APP(ilist,
INSTR_CREATE_ldp(dcontext, opnd_create_reg(DR_REG_X0 + i),
opnd_create_reg(DR_REG_X0 + i + 1),
opnd_create_base_disp(REG_DCXT, DR_REG_NULL, 0,
REG_OFFSET(DR_REG_X0 + i),
OPSZ_16)));
}
}
i = (REG_DCXT - DR_REG_X0) & ~1;
/* ldp x(i), x(i+1), [x(dcxt), #xi_offset] */
APP(ilist,
INSTR_CREATE_ldp(dcontext, opnd_create_reg(DR_REG_X0 + i),
opnd_create_reg(DR_REG_X0 + i + 1),
opnd_create_base_disp(REG_DCXT, DR_REG_NULL, 0,
REG_OFFSET(DR_REG_X0 + i), OPSZ_16)));
}
/* Append instructions to save gpr on fcache return, called after
* append_fcache_return_prologue.
* Assuming the execution comes from an exit stub via br DR_REG_X1,
* dcontext base is held in REG_DCXT, and exit stub in X0.
* App's x0 and x1 is stored in TLS_REG0_SLOT and TLS_REG1_SLOT
* - store all registers into dcontext's mcontext
* - restore REG_DCXT app value from TLS slot to mcontext
* - restore dr_reg_stolen app value from TLS slot to mcontext
*/
void
append_save_gpr(dcontext_t *dcontext, instrlist_t *ilist, bool ibl_end, bool absolute,
generated_code_t *code, linkstub_t *linkstub, bool coarse_info)
{
int i;
/* X0 and X1 will always have been saved in TLS slots before executing
* the code generated here. See, for example:
* emit_do_syscall_common, emit_indirect_branch_lookup, handle_sigreturn,
* insert_exit_stub_other_flags, execute_handler_from_{cache,dispatch},
* transfer_from_sig_handler_to_fcache_return
*/
for (i = 2; i < 30; i += 2) {
/* stp x(i), x(i+1), [x(dcxt), #xi_offset] */
APP(ilist,
INSTR_CREATE_stp(dcontext,
opnd_create_base_disp(REG_DCXT, DR_REG_NULL, 0,
REG_OFFSET(DR_REG_X0 + i), OPSZ_16),
opnd_create_reg(DR_REG_X0 + i),
opnd_create_reg(DR_REG_X0 + i + 1)));
}
/* mov x1, sp */
APP(ilist,
XINST_CREATE_move(dcontext, opnd_create_reg(DR_REG_X1),
opnd_create_reg(DR_REG_SP)));
/* stp x30, x1, [x(dcxt), #x30_offset] */
APP(ilist,
INSTR_CREATE_stp(dcontext,
opnd_create_base_disp(REG_DCXT, DR_REG_NULL, 0,
REG_OFFSET(DR_REG_X30), OPSZ_16),
opnd_create_reg(DR_REG_X30), opnd_create_reg(DR_REG_X1)));
/* ldp x1, x2, [x(stolen)]
* stp x1, x2, [x(dcxt)]
*/
APP(ilist,
INSTR_CREATE_ldp(
dcontext, opnd_create_reg(DR_REG_X1), opnd_create_reg(DR_REG_X2),
opnd_create_base_disp(dr_reg_stolen, DR_REG_NULL, 0, 0, OPSZ_16)));
APP(ilist,
INSTR_CREATE_stp(dcontext,
opnd_create_base_disp(REG_DCXT, DR_REG_NULL, 0, 0, OPSZ_16),
opnd_create_reg(DR_REG_X1), opnd_create_reg(DR_REG_X2)));
if (linkstub != NULL) {
/* FIXME i#1575: NYI for coarse-grain stub */
ASSERT_NOT_IMPLEMENTED(false);
}
/* REG_DCXT's app value is stored in DCONTEXT_BASE_SPILL_SLOT by
* append_prepare_fcache_return, so copy it to mcontext.
*/
APP(ilist, RESTORE_FROM_TLS(dcontext, SCRATCH_REG1, DCONTEXT_BASE_SPILL_SLOT));
APP(ilist, SAVE_TO_DC(dcontext, SCRATCH_REG1, REG_DCXT_OFFS));
/* dr_reg_stolen's app value is always stored in the TLS spill slot,
* and we restore its value back to mcontext on fcache return.
*/
APP(ilist, RESTORE_FROM_TLS(dcontext, SCRATCH_REG1, TLS_REG_STOLEN_SLOT));
APP(ilist, SAVE_TO_DC(dcontext, SCRATCH_REG1, REG_OFFSET(dr_reg_stolen)));
}
/* dcontext base is held in REG_DCXT, and exit stub in X0.
* GPR's are already saved.
*/
void
append_save_simd_reg(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
int i;
/* add x1, x(DCXT), #(off) */
APP(ilist,
XINST_CREATE_add_2src(dcontext, opnd_create_reg(DR_REG_X1),
opnd_create_reg(REG_DCXT),
OPND_CREATE_INTPTR(offsetof(priv_mcontext_t, simd))));
for (i = 0; i < 32; i += 2) {
/* stp q(i), q(i + 1), [x1, #(i * 16)] */
APP(ilist,
INSTR_CREATE_stp(
dcontext,
opnd_create_base_disp(DR_REG_X1, DR_REG_NULL, 0, i * 16, OPSZ_32),
opnd_create_reg(DR_REG_Q0 + i), opnd_create_reg(DR_REG_Q0 + i + 1)));
}
}
/* Scratch reg0 is holding exit stub. */
void
append_save_clear_xflags(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
APP(ilist,
INSTR_CREATE_mrs(dcontext, opnd_create_reg(DR_REG_X1),
opnd_create_reg(DR_REG_NZCV)));
APP(ilist,
INSTR_CREATE_mrs(dcontext, opnd_create_reg(DR_REG_X2),
opnd_create_reg(DR_REG_FPCR)));
APP(ilist,
INSTR_CREATE_mrs(dcontext, opnd_create_reg(DR_REG_X3),
opnd_create_reg(DR_REG_FPSR)));
APP(ilist, SAVE_TO_DC(dcontext, DR_REG_W1, XFLAGS_OFFSET));
APP(ilist, SAVE_TO_DC(dcontext, DR_REG_W2, XFLAGS_OFFSET + 4));
APP(ilist, SAVE_TO_DC(dcontext, DR_REG_W3, XFLAGS_OFFSET + 8));
}
bool
append_call_enter_dr_hook(dcontext_t *dcontext, instrlist_t *ilist, bool ibl_end,
bool absolute)
{
/* i#1569: DR_HOOK is not supported on AArch64 */
ASSERT_NOT_IMPLEMENTED(EXIT_DR_HOOK == NULL);
return false;
}
void
insert_save_eflags(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, uint flags,
bool tls, bool absolute _IF_X86_64(bool x86_to_x64_ibl_opt))
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
}
void
insert_restore_eflags(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
uint flags, bool tls,
bool absolute _IF_X86_64(bool x86_to_x64_ibl_opt))
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
}
byte *
emit_inline_ibl_stub(dcontext_t *dcontext, byte *pc, ibl_code_t *ibl_code,
bool target_trace_table)
{
ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */
return pc;
}
bool
instr_is_ibl_hit_jump(instr_t *instr)
{
return instr_get_opcode(instr) == OP_br &&
opnd_get_reg(instr_get_target(instr)) == DR_REG_X0;
}
byte *
emit_indirect_branch_lookup(dcontext_t *dc, generated_code_t *code, byte *pc,
byte *fcache_return_pc, bool target_trace_table,
bool inline_ibl_head, ibl_code_t *ibl_code /* IN/OUT */)
{
bool absolute = false;
instrlist_t ilist;
instrlist_init(&ilist);
patch_list_t *patch = &ibl_code->ibl_patch;
init_patch_list(patch, PATCH_TYPE_INDIRECT_TLS);
instr_t *load_tag = INSTR_CREATE_label(dc);
instr_t *compare_tag = INSTR_CREATE_label(dc);
instr_t *try_next = INSTR_CREATE_label(dc);
instr_t *miss = INSTR_CREATE_label(dc);
instr_t *not_hit = INSTR_CREATE_label(dc);
instr_t *target_delete_entry = INSTR_CREATE_label(dc);
instr_t *unlinked = INSTR_CREATE_label(dc);
/* FIXME i#1569: Use INSTR_CREATE macros when encoder is implemented. */
/* On entry we expect:
* x0: link_stub entry
* x1: scratch reg, arrived from br x1
* x2: indirect branch target
* TLS_REG0_SLOT: app's x0
* TLS_REG1_SLOT: app's x1
* TLS_REG2_SLOT: app's x2
* TLS_REG3_SLOT: scratch space
* There are following entries with the same context:
* indirect_branch_lookup
* unlink_stub_entry
* target_delete_entry:
* x0: scratch
* x1: table entry pointer from ibl lookup hit path
* x2: app's x2
* TLS_REG0_SLOT: app's x0
* TLS_REG1_SLOT: app's x1
* TLS_REG2_SLOT: app's x2
* On miss exit we output:
* x0: the dcontext->last_exit
* x1: br x1
* x2: app's x2
* TLS_REG0_SLOT: app's x0 (recovered by fcache_return)
* TLS_REG1_SLOT: app's x1 (recovered by fcache_return)
* On hit exit we output:
* x0: fragment_start_pc (points to the fragment prefix)
* x1: scratch reg
* x2: app's x2
* TLS_REG0_SLOT: app's x0 (recovered by fragment_prefix)
* TLS_REG1_SLOT: app's x1 (recovered by fragment_prefix)
*/
/* Spill x0. */
APP(&ilist, instr_create_save_to_tls(dc, DR_REG_R0, TLS_REG3_SLOT));
/* Load-acquire hash mask. We need a load-acquire to ensure we see updates
* properly; the corresponding store-release is in update_lookuptable_tls().
*/
/* add x1, x28 + hash_mask_offs; ldar x1, [x1] (ldar doesn't take an offs.) */
APP(&ilist,
INSTR_CREATE_add(dc, opnd_create_reg(DR_REG_X1), opnd_create_reg(dr_reg_stolen),
OPND_CREATE_INT32(TLS_MASK_SLOT(ibl_code->branch_type))));
APP(&ilist,
INSTR_CREATE_ldar(dc, opnd_create_reg(DR_REG_X1),
OPND_CREATE_MEMPTR(DR_REG_X1, 0)));
/* ldr x0, [x28, hash_table] */
APP(&ilist,
INSTR_CREATE_ldr(dc, opnd_create_reg(DR_REG_X0),
opnd_create_base_disp(dr_reg_stolen, DR_REG_NULL, 0,
TLS_TABLE_SLOT(ibl_code->branch_type),
OPSZ_8)));
/* and x1, x1, x2 */
APP(&ilist,
INSTR_CREATE_and(dc, opnd_create_reg(DR_REG_X1), opnd_create_reg(DR_REG_X1),
opnd_create_reg(DR_REG_X2)));
/* Get table entry. */
/* add x1, x0, x1, LSL #4 */
APP(&ilist,
INSTR_CREATE_add_shift(
dc, opnd_create_reg(DR_REG_X1), opnd_create_reg(DR_REG_X0),
opnd_create_reg(DR_REG_X1), OPND_CREATE_INT8(DR_SHIFT_LSL),
OPND_CREATE_INT8(4 - HASHTABLE_IBL_OFFSET(ibl_code->branch_type))));
/* x1 now holds the fragment_entry_t* in the hashtable. */
APP(&ilist, load_tag);
/* Load tag from fragment_entry_t* in the hashtable to x0. */
/* ldr x0, [x1, #tag_fragment_offset] */
APP(&ilist,
INSTR_CREATE_ldr(
dc, opnd_create_reg(DR_REG_X0),
OPND_CREATE_MEMPTR(DR_REG_X1, offsetof(fragment_entry_t, tag_fragment))));
/* Did we hit? */
APP(&ilist, compare_tag);
/* cbz x0, not_hit */
APP(&ilist,
INSTR_CREATE_cbz(dc, opnd_create_instr(not_hit), opnd_create_reg(DR_REG_X0)));
/* sub x0, x0, x2 */
APP(&ilist,
XINST_CREATE_sub(dc, opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X2)));
/* cbnz x0, try_next */
APP(&ilist,
INSTR_CREATE_cbnz(dc, opnd_create_instr(try_next), opnd_create_reg(DR_REG_X0)));
/* Hit path. */
/* App's original values of x0 and x1 are already in respective TLS slots, and
* will be restored by the fragment prefix.
*/
/* Recover app's original x2. */
APP(&ilist, instr_create_restore_from_tls(dc, DR_REG_R2, TLS_REG2_SLOT));
/* ldr x0, [x1, #start_pc_fragment_offset] */
APP(&ilist,
INSTR_CREATE_ldr(dc, opnd_create_reg(DR_REG_X0),
OPND_CREATE_MEMPTR(
DR_REG_X1, offsetof(fragment_entry_t, start_pc_fragment))));
/* br x0
* (keep in sync with instr_is_ibl_hit_jump())
*/
APP(&ilist, INSTR_CREATE_br(dc, opnd_create_reg(DR_REG_X0)));
APP(&ilist, try_next);
/* Try next entry, in case of collision. No wraparound check is needed
* because of the sentinel at the end.
* ldr x0, [x1, #tag_fragment_offset]! */
APP(&ilist,
instr_create_2dst_3src(
dc, OP_ldr, opnd_create_reg(DR_REG_X0), opnd_create_reg(DR_REG_X1),
OPND_CREATE_MEMPTR(DR_REG_X1, sizeof(fragment_entry_t)),
opnd_create_reg(DR_REG_X1), OPND_CREATE_INTPTR(sizeof(fragment_entry_t))));
/* b compare_tag */
APP(&ilist, INSTR_CREATE_b(dc, opnd_create_instr(compare_tag)));
APP(&ilist, not_hit);
if (INTERNAL_OPTION(ibl_sentinel_check)) {
/* Load start_pc from fragment_entry_t* in the hashtable to x0. */
/* ldr x0, [x1, #start_pc_fragment] */
APP(&ilist,
XINST_CREATE_load(
dc, opnd_create_reg(DR_REG_X0),
OPND_CREATE_MEMPTR(DR_REG_X1,
offsetof(fragment_entry_t, start_pc_fragment))));
/* To compare with an arbitrary constant we'd need a 4th scratch reg.
* Instead we rely on the sentinel start PC being 1.
*/
ASSERT(HASHLOOKUP_SENTINEL_START_PC == (cache_pc)PTR_UINT_1);
/* sub x0, x0, #1 */
APP(&ilist,
XINST_CREATE_sub(dc, opnd_create_reg(DR_REG_X0), OPND_CREATE_INT8(1)));
/* cbnz x0, miss */
APP(&ilist,
INSTR_CREATE_cbnz(dc, opnd_create_instr(miss), opnd_create_reg(DR_REG_R0)));
/* Point at the first table slot and then go load and compare its tag */
/* ldr x1, [x28, #table_base] */
APP(&ilist,
XINST_CREATE_load(dc, opnd_create_reg(DR_REG_X1),
OPND_CREATE_MEMPTR(dr_reg_stolen,
TLS_TABLE_SLOT(ibl_code->branch_type))));
/* branch to load_tag */
APP(&ilist, INSTR_CREATE_b(dc, opnd_create_instr(load_tag)));
}
/* Target delete entry */
APP(&ilist, target_delete_entry);
add_patch_marker(patch, target_delete_entry, PATCH_ASSEMBLE_ABSOLUTE,
0 /* beginning of instruction */,
(ptr_uint_t *)&ibl_code->target_delete_entry);
/* Load next_tag from table entry. */
APP(&ilist,
INSTR_CREATE_ldr(
dc, opnd_create_reg(DR_REG_R2),
OPND_CREATE_MEMPTR(DR_REG_R1, offsetof(fragment_entry_t, tag_fragment))));
/* Store &linkstub_ibl_deleted in r0, instead of last exit linkstub by skipped code
* below.
*/
instrlist_insert_mov_immed_ptrsz(dc, (ptr_uint_t)get_ibl_deleted_linkstub(),
opnd_create_reg(DR_REG_R0), &ilist, NULL, NULL,
NULL);
APP(&ilist, INSTR_CREATE_b(dc, opnd_create_instr(unlinked)));
APP(&ilist, miss);
/* Recover the dcontext->last_exit to x0 */
APP(&ilist, instr_create_restore_from_tls(dc, DR_REG_R0, TLS_REG3_SLOT));
/* Unlink path: entry from stub */
APP(&ilist, unlinked);
add_patch_marker(patch, unlinked, PATCH_ASSEMBLE_ABSOLUTE,
0 /* beginning of instruction */,
(ptr_uint_t *)&ibl_code->unlinked_ibl_entry);
/* Put ib tgt into dcontext->next_tag */
insert_shared_get_dcontext(dc, &ilist, NULL, true);
APP(&ilist, SAVE_TO_DC(dc, DR_REG_R2, NEXT_TAG_OFFSET));
APP(&ilist, instr_create_restore_from_tls(dc, DR_REG_R5, DCONTEXT_BASE_SPILL_SLOT));
APP(&ilist, instr_create_restore_from_tls(dc, DR_REG_R2, TLS_REG2_SLOT));
/* ldr x1, [x(stolen), #(offs)] */
APP(&ilist,
INSTR_CREATE_ldr(dc, opnd_create_reg(DR_REG_X1),
OPND_TLS_FIELD(TLS_FCACHE_RETURN_SLOT)));
/* br x1 */
APP(&ilist, INSTR_CREATE_br(dc, opnd_create_reg(DR_REG_X1)));
ibl_code->ibl_routine_length = encode_with_patch_list(dc, patch, &ilist, pc);
instrlist_clear(dc, &ilist);
return pc + ibl_code->ibl_routine_length;
}
void
relink_special_ibl_xfer(dcontext_t *dcontext, int index,
ibl_entry_point_type_t entry_type, ibl_branch_type_t ibl_type)
{
generated_code_t *code;
byte *ibl_tgt;
uint *pc;
if (dcontext == GLOBAL_DCONTEXT) {
ASSERT(!special_ibl_xfer_is_thread_private()); /* else shouldn't be called */
code = SHARED_GENCODE_MATCH_THREAD(get_thread_private_dcontext());
} else {
ASSERT(special_ibl_xfer_is_thread_private()); /* else shouldn't be called */
code = THREAD_GENCODE(dcontext);
}
if (code == NULL) /* thread private that we don't need */
return;
ibl_tgt = special_ibl_xfer_tgt(dcontext, code, entry_type, ibl_type);
ASSERT(code->special_ibl_xfer[index] != NULL);
pc = (uint *)(code->special_ibl_xfer[index] + code->special_ibl_unlink_offs[index]);
uint *write_pc = (uint *)vmcode_get_writable_addr((byte *)pc);
protect_generated_code(code, WRITABLE);
/* ldr x1, [x(stolen), #(offs)] */
write_pc[0] = (0xf9400000 | 1 | (dr_reg_stolen - DR_REG_X0) << 5 |
get_ibl_entry_tls_offs(dcontext, ibl_tgt) >> 3 << 10);
/* br x1 */
write_pc[1] = 0xd61f0000 | 1 << 5;
machine_cache_sync(pc, pc + 2, true);
protect_generated_code(code, READONLY);
}
/* addr must be a writable (vmcode) address. */
bool
fill_with_nops(dr_isa_mode_t isa_mode, byte *addr, size_t size)
{
byte *pc;
if (!ALIGNED(addr, 4) || !ALIGNED(addr + size, 4)) {
ASSERT_NOT_REACHED();
return false;
}
for (pc = addr; pc < addr + size; pc += 4)
*(uint *)pc = RAW_NOP_INST; /* nop */
return true;
}
| 1 | 24,495 | Wait -- where did this change come from? This seems unrelated to trace building and seems like it could affect basic execution, unlike the rest of this PR which is all under the off-by-default trace option. Please separate this into its own PR and probably ask @AssadHashmi to review. | DynamoRIO-dynamorio | c |
@@ -0,0 +1,3 @@
+module.exports = {
+ presets: [require.resolve('@docusaurus/core/lib/babel/preset')],
+}; | 1 | 1 | 18,016 | seems to be redundant (mention `docusaurus` ) | handsontable-handsontable | js |
|
@@ -206,6 +206,12 @@ public interface Table {
*/
Rollback rollback();
+ /**
+ *
+ * @return
+ */
+ CherryPick cherrypick();
+
/**
* Create a new {@link Transaction transaction API} to commit multiple table operations at once.
* | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
/**
* Represents a table.
*/
public interface Table {
/**
* Refresh the current table metadata.
*/
void refresh();
/**
* Create a new {@link TableScan scan} for this table.
* <p>
* Once a table scan is created, it can be refined to project columns and filter data.
*
* @return a table scan for this table
*/
TableScan newScan();
/**
* Return the {@link Schema schema} for this table.
*
* @return this table's schema
*/
Schema schema();
/**
* Return the {@link PartitionSpec partition spec} for this table.
*
* @return this table's partition spec
*/
PartitionSpec spec();
/**
* Return a map of {@link PartitionSpec partition specs} for this table.
*
* @return this table's partition specs map
*/
Map<Integer, PartitionSpec> specs();
/**
* Return a map of string properties for this table.
*
* @return this table's properties map
*/
Map<String, String> properties();
/**
* Return the table's base location.
*
* @return this table's location
*/
String location();
/**
* Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots.
*
* @return the current table Snapshot.
*/
Snapshot currentSnapshot();
/**
* Get the {@link Snapshot snapshot} of this table with the given id, or null if there is no
* matching snapshot.
*
* @return the {@link Snapshot} with the given id.
*/
Snapshot snapshot(long snapshotId);
/**
* Get the {@link Snapshot snapshots} of this table.
*
* @return an Iterable of snapshots of this table.
*/
Iterable<Snapshot> snapshots();
/**
* Get the snapshot history of this table.
*
* @return a list of {@link HistoryEntry history entries}
*/
List<HistoryEntry> history();
/**
* Create a new {@link UpdateSchema} to alter the columns of this table and commit the change.
*
* @return a new {@link UpdateSchema}
*/
UpdateSchema updateSchema();
/**
* Create a new {@link UpdateProperties} to update table properties and commit the changes.
*
* @return a new {@link UpdateProperties}
*/
UpdateProperties updateProperties();
/**
* Create a new {@link UpdateLocation} to update table location and commit the changes.
*
* @return a new {@link UpdateLocation}
*/
UpdateLocation updateLocation();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
*
* @return a new {@link AppendFiles}
*/
AppendFiles newAppend();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
* <p>
* Using this method signals to the underlying implementation that the append should not perform
* extra work in order to commit quickly. Fast appends are not recommended for normal writes
* because the fast commit may cause split planning to slow down over time.
* <p>
* Implementations may not support fast appends, in which case this will return the same appender
* as {@link #newAppend()}.
*
* @return a new {@link AppendFiles}
*/
default AppendFiles newFastAppend() {
return newAppend();
}
/**
* Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit.
*
* @return a new {@link RewriteFiles}
*/
RewriteFiles newRewrite();
/**
* Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this
* table and commit.
*
* @return a new {@link RewriteManifests}
*/
RewriteManifests rewriteManifests();
/**
* Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression.
*
* @return a new {@link OverwriteFiles}
*/
OverwriteFiles newOverwrite();
/**
* Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
* overwrite partitions in the table with new data.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
*
* @return a new {@link ReplacePartitions}
*/
ReplacePartitions newReplacePartitions();
/**
* Create a new {@link DeleteFiles delete API} to replace files in this table and commit.
*
* @return a new {@link DeleteFiles}
*/
DeleteFiles newDelete();
/**
* Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit.
*
* @return a new {@link ExpireSnapshots}
*/
ExpireSnapshots expireSnapshots();
/**
* Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit.
*
* @return a new {@link Rollback}
*/
Rollback rollback();
/**
* Create a new {@link Transaction transaction API} to commit multiple table operations at once.
*
* @return a new {@link Transaction}
*/
Transaction newTransaction();
/**
* @return a {@link FileIO} to read and write table data and metadata files
*/
FileIO io();
/**
* @return an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt
* data files.
*/
EncryptionManager encryption();
/**
* @return a {@link LocationProvider} to provide locations for new data files
*/
LocationProvider locationProvider();
}
| 1 | 17,106 | Can we combine this with the Rollback API? We could still support the `rollback` method here, but combine `Rollback` and `CherryPick` into something like `ManageSnapshots`. Then we could reuse logic for enforcing checks of the current snapshot. What do you think? | apache-iceberg | java |
@@ -52,7 +52,7 @@ static struct internal_amazon_batch_amazon_ids{
char* aws_secret_access_key;
char* aws_region;
char* aws_email;
- char* master_env_prefix;
+ char* manager_env_prefix;
}initialized_data;
static unsigned int gen_guid(){ | 1 | /*
* Copyright (C) 2018- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include "batch_job_internal.h"
#include "process.h"
#include "batch_job.h"
#include "stringtools.h"
#include "debug.h"
#include "jx_parse.h"
#include "jx.h"
#include "jx_pretty_print.h"
#include "itable.h"
#include "hash_table.h"
#include "fast_popen.h"
#include "sh_popen.h"
#include "list.h"
#include <time.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/types.h>
#include <errno.h>
static int initialized = 0;
static char* queue_name = NULL;
static char* compute_env_name = NULL;
static char* vpc = NULL;
static char* sec_group = NULL;
static char* subnet = NULL;
static struct itable* done_jobs;
static struct itable* amazon_job_ids;
static struct itable* done_files;
static int instID;
static char* bucket_name = NULL;
static struct hash_table* submitted_files = NULL;
static int HAS_SUBMITTED_VALUE = 1;
union amazon_batch_ccl_guid{
char c[8];
unsigned int ul;
};
static struct internal_amazon_batch_amazon_ids{
char* aws_access_key_id;
char* aws_secret_access_key;
char* aws_region;
char* aws_email;
char* master_env_prefix;
}initialized_data;
static unsigned int gen_guid(){
FILE* ran = fopen("/dev/urandom","r");
if(!ran)
fatal("Cannot open /dev/urandom");
union amazon_batch_ccl_guid guid;
size_t k = fread(guid.c,sizeof(char),8,ran);
if(k<8)
fatal("couldn't read 8 bytes from /dev/urandom/");
fclose(ran);
return guid.ul;
}
static struct jx* run_command(char* cmd){
FILE* out = sh_popen(cmd);
if(out == NULL){
fatal("fast_popen returned a null FILE* pointer");
}
struct jx* jx = jx_parse_stream(out);
if(jx == NULL){
fatal("JX parse stream out returned a null jx object");
}
sh_pclose(out);
return jx;
}
static struct list* extract_file_names_from_list(char* in){
struct list* output = list_create();
char* tmp = strdup(in);
char* ta = strtok(tmp,",");
while(ta != NULL){
int push_success = list_push_tail(output,strdup(ta));
if(!push_success){
fatal("Error appending file name to list due to being out of memory");
}
ta = strtok(0,",");
}
return output;
}
static int upload_input_files_to_s3(char* files,char* jobname){
int success = 1;
char* env_var = initialized_data.master_env_prefix;
struct list* file_list = extract_file_names_from_list(files);
debug(D_BATCH,"extra input files list: %s, len: %i",files, list_size(file_list));
list_first_item(file_list);
char* cur_file = NULL;
while((cur_file = list_next_item(file_list)) != NULL){
if(hash_table_lookup(submitted_files,cur_file) == &HAS_SUBMITTED_VALUE){
continue;
}
debug(D_BATCH,"Submitting file: %s",cur_file);
char* put_file_command = string_format("tar -cvf %s.txz %s && %s aws s3 cp %s.txz s3://%s/%s.txz ",cur_file,cur_file,env_var,cur_file,bucket_name,cur_file);
int ret = sh_system(put_file_command);
if(ret != 0){
debug(D_BATCH,"File Submission: %s FAILURE return code: %i",cur_file,ret);
success = 0;
}else{
debug(D_BATCH,"File Submission: %s SUCCESS return code: %i",cur_file,ret);
}
free(put_file_command);
put_file_command = string_format("rm %s.txz",cur_file);
sh_system(put_file_command);
free(put_file_command);
//assume everything went well?
hash_table_insert(submitted_files,cur_file,&HAS_SUBMITTED_VALUE);
}
list_free(file_list);
list_delete(file_list);
return success;
}
static struct internal_amazon_batch_amazon_ids initialize(struct batch_queue* q){
if(initialized){
return initialized_data;
}
char* config_file = hash_table_lookup(q->options,"amazon-batch-config");
if(!config_file) {
fatal("No amazon config file passed!");
}
struct jx* config = jx_parse_file(config_file);
initialized = 1;
instID = time(NULL);
queue_name = string_format("%i_ccl_amazon_batch_queue",instID);//should be unique
done_jobs = itable_create(0);//default size
amazon_job_ids = itable_create(0);
done_files = itable_create(0);
submitted_files = hash_table_create(0,0);
char* amazon_ami = hash_table_lookup(q->options,"amazon-batch-img");
if(amazon_ami == NULL) {
fatal("No image id passed. Please pass file containing ami image id using --amazon-batch-img flag");
}
char* aws_access_key_id = (char*)jx_lookup_string(config, "aws_id");
char* aws_secret_access_key = (char*)jx_lookup_string(config, "aws_key");
char* aws_region = (char*)jx_lookup_string(config,"aws_reg");
bucket_name = (char*)jx_lookup_string(config,"bucket");
vpc = (char*)jx_lookup_string(config,"vpc");
sec_group = (char*)jx_lookup_string(config,"sec_group");
queue_name = (char*)jx_lookup_string(config,"queue_name");
compute_env_name = (char*)jx_lookup_string(config,"env_name");
subnet = (char*)jx_lookup_string(config,"subnet");
if(!aws_access_key_id)
fatal("credentials file %s does not contain aws_id",config_file);
if(!aws_secret_access_key)
fatal("credentials file %s does not contain aws_key",config_file);
if(!aws_region)
fatal("credentials file %s does not contain aws_reg",config_file);
if(!bucket_name)
fatal("credentials file %s does not contain bucket",config_file);
if(!queue_name)
fatal("credentials file %s does not contain queue_name",config_file);
if(!compute_env_name)
fatal("credentials file %s does not contain env_name",config_file);
if(!vpc)
fatal("credentials file %s does not contain vpc",config_file);
if(!subnet)
fatal("credentials file %s does not contain subnet",config_file);
char* env_var = string_format("AWS_ACCESS_KEY_ID=%s AWS_SECRET_ACCESS_KEY=%s AWS_DEFAULT_REGION=%s ",aws_access_key_id,aws_secret_access_key,aws_region);
initialized_data.aws_access_key_id = aws_access_key_id;
initialized_data.aws_secret_access_key = aws_secret_access_key;
initialized_data.aws_region=aws_region;
initialized_data.master_env_prefix = env_var;
return initialized_data;
}
static char* generate_s3_cp_cmds(char* files, char* src, char* dst){
char* env_var = initialized_data.master_env_prefix;
struct list* file_list = extract_file_names_from_list(files);
list_first_item(file_list);
char* new_cmd=malloc(sizeof(char)*1);
new_cmd[0]='\0';
if(list_size(file_list)> 0){
char* copy_cmd_prefix = string_format("%s aws s3 cp ", env_var);
char* cur_file = NULL;
while((cur_file=list_next_item(file_list)) != NULL){
char* tmp;
if(strstr(dst,"s3")){
tmp = string_format("tar -cvf %s.txz %s && %s %s/%s.txz %s/%s.txz",cur_file,cur_file,copy_cmd_prefix, src, cur_file, dst, cur_file);
}else{
tmp = string_format("%s %s/%s.txz %s/%s.txz && tar -xvf %s.txz",copy_cmd_prefix, src, cur_file, dst, cur_file, cur_file);
}
char* tmp2 = string_format("%s\n%s\n",new_cmd,tmp);
free(new_cmd);
free(tmp);
new_cmd = tmp2;
}
}
list_free(file_list);
list_delete(file_list);
return new_cmd;
}
static char* chmod_all(char* files){
struct list* file_list = extract_file_names_from_list(files);
list_first_item(file_list);
char* new_cmd=malloc(sizeof(char)*1);
new_cmd[0]='\0';
char* cur_file = NULL;
if(list_size(file_list) > 0){
while((cur_file=list_next_item(file_list)) != NULL){
char* tmp = string_format("chmod +x %s",cur_file);
char* tmp2 = string_format("%s\n%s",new_cmd,tmp);
free(new_cmd);
free(tmp);
new_cmd=tmp2;
}
}
list_free(file_list);
list_delete(file_list);
return new_cmd;
}
static void upload_cmd_file(char* bucket_name, char* input_files, char* output_files, char* cmd, unsigned int jobid){
char* env_var = initialized_data.master_env_prefix;
//Create command to pull files from s3 and into local space to work on
char* bucket = string_format("s3://%s",bucket_name);
char* cpy_in = generate_s3_cp_cmds(input_files,bucket,"./");
char* chmod = chmod_all(input_files);
//run the actual command
char* cmd_tmp = string_format("%s\n%s\n%s\n",cpy_in,chmod,cmd);
free(cpy_in);
//copy out any external files
char* cpy_out = generate_s3_cp_cmds(output_files,"./",bucket);
cmd_tmp = string_format("%s\n%s\n",cmd_tmp,cpy_out);
//add headder
char* final_cmd = string_format("#!/bin/sh\n%s",cmd_tmp);
free(cmd_tmp);
//write out to file
unsigned int tempuid = gen_guid();
char* tmpfile_string = string_format("TEMPFILE-%u.sh",tempuid);
FILE* tmpfile = fopen(tmpfile_string,"w+");
fwrite(final_cmd,sizeof(char),strlen(final_cmd),tmpfile);
fclose(tmpfile);
free(final_cmd);
//make executable and put into s3
cmd_tmp = string_format("chmod +x %s",tmpfile_string);
sh_system(cmd_tmp);
free(cmd_tmp);
cmd_tmp = string_format("%s aws s3 cp %s s3://%s/COMAND_FILE_%u.sh",env_var,tmpfile_string,bucket_name,jobid);
sh_system(cmd_tmp);
free(cmd_tmp);
remove(tmpfile_string);
free(tmpfile_string);
}
static char* aws_submit_job(char* job_name, char* properties_string){
char* queue = queue_name;
char* env_var = initialized_data.master_env_prefix;
//submit the job-def
char* tmp = string_format("%s aws batch register-job-definition --job-definition-name %s_def --type container --container-properties \"%s\"",env_var,job_name, properties_string);
debug(D_BATCH,"Creating the Job Definition: %s",tmp);
struct jx* jx = run_command(tmp);
free(tmp);
char* arn = (char*)jx_lookup_string(jx,"jobDefinitionArn");
if(arn == NULL){
fatal("Fatal error when trying to create the job definition!");
}
jx_delete(jx);
//now that we have create a job-definition, we can submit the job.
tmp = string_format("%s aws batch submit-job --job-name %s --job-queue %s --job-definition %s_def",env_var,job_name,queue,job_name);
debug(D_BATCH,"Submitting the job: %s",tmp);
jx = run_command(tmp);
free(tmp);
char* jaid = strdup((char*)jx_lookup_string(jx,"jobId"));
if(!jaid)
fatal("NO JOB ID FROM AMAZON GIVEN");
jx_delete(jx);
return jaid;
}
enum{
DESCRIBE_AWS_JOB_SUCCESS = 1, //job exists, succeeded
DESCRIBE_AWS_JOB_FAILED = 0, //job exists, failed
DESCRIBE_AWS_JOB_NON_FINAL = -1, //exists, but in non-final state
DESCRIBE_AWS_JOB_NON_EXIST = -2 //job doesn't exist, should treat as a failure
};
static int finished_aws_job_exit_code(char* aws_jobid, char* env_var){
char* cmd = string_format("aws batch describe-jobs --jobs %s",aws_jobid);
struct jx* jx = run_command(cmd);
free(cmd);
struct jx* jobs_array = jx_lookup(jx,"jobs");
if(!jobs_array){
debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid);
return DESCRIBE_AWS_JOB_NON_EXIST;
}
struct jx* first_item = jx_array_index(jobs_array,0);
if(!first_item){
debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid);
return DESCRIBE_AWS_JOB_NON_EXIST;
}
int ret = (int)jx_lookup_integer(first_item,"exitCode");
jx_delete(jx);
return ret;
}
static int describe_aws_job(char* aws_jobid, char* env_var){
char* cmd = string_format("aws batch describe-jobs --jobs %s",aws_jobid);
struct jx* jx = run_command(cmd);
free(cmd);
int succeed = DESCRIBE_AWS_JOB_NON_FINAL; //default status
struct jx* jobs_array = jx_lookup(jx,"jobs");
if(!jobs_array){
debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid);
return DESCRIBE_AWS_JOB_NON_EXIST;
}
struct jx* first_item = jx_array_index(jobs_array,0);
if(!first_item){
debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid);
return DESCRIBE_AWS_JOB_NON_EXIST;
}
if(strstr((char*)jx_lookup_string(first_item,"status"),"SUCCEEDED")){
succeed = DESCRIBE_AWS_JOB_SUCCESS;
}
if(strstr((char*)jx_lookup_string(first_item,"status"),"FAILED")){
succeed = DESCRIBE_AWS_JOB_FAILED;
}
//start and stop
if(succeed == DESCRIBE_AWS_JOB_SUCCESS || succeed == DESCRIBE_AWS_JOB_FAILED){
int64_t created_string = (int64_t) jx_lookup_integer(first_item,"createdAt");
int64_t start_string = (int64_t)jx_lookup_integer(first_item,"startedAt");
int64_t end_string = (int64_t)jx_lookup_integer(first_item,"stoppedAt");
if(created_string != 0 ){
debug(D_BATCH,"Job %s was created at: %"PRIi64"",aws_jobid,created_string);
}
if(start_string != 0 ){
debug(D_BATCH,"Job %s started at: %"PRIi64"",aws_jobid,start_string);
}
if(end_string != 0 ){
debug(D_BATCH,"Job %s ended at: %"PRIi64"",aws_jobid,end_string);
}
}
jx_delete(jx);
return succeed;
}
static char* aws_job_def(char* aws_jobid){
char* cmd = string_format("aws batch describe-jobs --jobs %s",aws_jobid);
struct jx* jx = run_command(cmd);
free(cmd);
struct jx* jobs_array = jx_lookup(jx,"jobs");
if(!jobs_array){
debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid);
return NULL;
}
struct jx* first_item = jx_array_index(jobs_array,0);
if(!first_item){
debug(D_BATCH,"Problem with given aws_jobid: %s",aws_jobid);
return NULL;
}
char* ret = string_format("%s",(char*)jx_lookup_string(first_item,"jobDefinition"));
jx_delete(jx);
return ret;
}
static int del_job_def(char* jobdef){
char* cmd = string_format("aws batch deregister-job-definition --job-definition %s",jobdef);
int ret = sh_system(cmd);
free(cmd);
return ret;
}
static batch_job_id_t batch_job_amazon_batch_submit(struct batch_queue* q, const char* cmd, const char* extra_input_files, const char* extra_output_files, struct jx* envlist, const struct rmsummary* resources){
struct internal_amazon_batch_amazon_ids amazon_ids = initialize(q);
char* env_var = amazon_ids.master_env_prefix;
//so, we have the access keys, now we need to either set up the queues and exec environments, or add them.
unsigned int jobid = gen_guid();
char* job_name = string_format("%s_%u",queue_name,jobid);
//makeflow specifics
struct batch_job_info *info = malloc(sizeof(*info));
memset(info, 0, sizeof(*info));
//specs
int cpus=1;
long int mem=1000;
char* img = hash_table_lookup(q->options,"amazon-batch-img");
int disk = 1000;
if(resources){
cpus = resources->cores;
mem = resources->memory;
disk = resources->disk;
cpus = cpus > 1? cpus:1;
mem = mem > 1000? mem:1000;
disk = disk > 1000 ? disk : 1000;
}
//upload files to S3
upload_input_files_to_s3((char*)extra_input_files,job_name);
upload_cmd_file(bucket_name,(char*)extra_input_files,(char*)extra_output_files,(char*)cmd,jobid);
//create the fmd string to give to the command
char* fmt_cmd = string_format("%s aws s3 cp s3://%s/COMAND_FILE_%u.sh ./ && sh ./COMAND_FILE_%u.sh",env_var,bucket_name,jobid,jobid);
//combine all properties together
char* properties_string = string_format("{ \\\"image\\\": \\\"%s\\\", \\\"vcpus\\\": %i, \\\"memory\\\": %li, \\\"privileged\\\":true ,\\\"command\\\": [\\\"sh\\\",\\\"-c\\\",\\\"%s\\\"], \\\"environment\\\":[{\\\"name\\\":\\\"AWS_ACCESS_KEY_ID\\\",\\\"value\\\":\\\"%s\\\"},{\\\"name\\\":\\\"AWS_SECRET_ACCESS_KEY\\\",\\\"value\\\":\\\"%s\\\"},{\\\"name\\\":\\\"REGION\\\",\\\"value\\\":\\\"%s\\\"}] }", img,cpus,mem,fmt_cmd,amazon_ids.aws_access_key_id,amazon_ids.aws_secret_access_key,amazon_ids.aws_region);
char* jaid = aws_submit_job(job_name,properties_string);
itable_insert(amazon_job_ids,jobid,jaid);
debug(D_BATCH,"Job %u has amazon id: %s",jobid,jaid);
itable_insert(done_files,jobid,string_format("%s",extra_output_files));
debug(D_BATCH,"Job %u successfully Submitted",jobid);
//let makeflow know
info->submitted = time(0);
info->started = time(0);
itable_insert(q->job_table, jobid, info);
//cleanup
free(job_name);
free(fmt_cmd);
return jobid;
}
static batch_job_id_t batch_job_amazon_batch_wait(struct batch_queue *q, struct batch_job_info *info_out, time_t stoptime){
struct internal_amazon_batch_amazon_ids amazon_ids = initialize(q);
//succeeded check
int done = 0;
char* env_var = amazon_ids.master_env_prefix;
itable_firstkey(amazon_job_ids);
char* jaid;
UINT64_T jobid;
while(itable_nextkey(amazon_job_ids,&jobid,(void**)&jaid)){
done = describe_aws_job(jaid,env_var);
char* jobname = string_format("%s_%u",queue_name,(unsigned int)jobid);
unsigned int id = (unsigned int)jobid;
if(done == DESCRIBE_AWS_JOB_SUCCESS){
if(itable_lookup(done_jobs,id+1) == NULL){
//id is done, returning here
debug(D_BATCH,"Inserting id: %u into done_jobs",id);
itable_insert(done_jobs,id+1,jobname);
itable_remove(amazon_job_ids,jobid);
//pull files from s3
char* output_files = itable_lookup(done_files,id);
struct list* file_list = extract_file_names_from_list(output_files);
if(list_size(file_list)> 0){
list_first_item(file_list);
char* cur_file = NULL;
while((cur_file=list_next_item(file_list)) != NULL){
debug(D_BATCH,"Copying over %s",cur_file);
char* get_from_s3_cmd = string_format("%s aws s3 cp s3://%s/%s.txz ./%s.txz && tar -xvf %s.txz && rm %s.txz",env_var,bucket_name,cur_file,cur_file, cur_file, cur_file);
int outputcode = sh_system(get_from_s3_cmd);
debug(D_BATCH,"output code from calling S3 to pull file %s: %i",cur_file,outputcode);
FILE* tmpOut = fopen(cur_file,"r");
if(tmpOut){
debug(D_BATCH,"File does indeed exist: %s",cur_file);
fclose(tmpOut);
}else{
debug(D_BATCH,"File doesn't exist: %s",cur_file);
}
free(get_from_s3_cmd);
}
}
list_free(file_list);
list_delete(file_list);
//Let Makeflow know we're all done!
debug(D_BATCH,"Removing the job from the job_table");
struct batch_job_info* info = itable_remove(q->job_table, id);//got from batch_job_amazon.c
info->finished = time(0);//get now
info->exited_normally=1;
info->exit_code=finished_aws_job_exit_code(jaid,env_var);
debug(D_BATCH,"copying over the data to info_out");
memcpy(info_out, info, sizeof(struct batch_job_info));
free(info);
char* jobdef = aws_job_def(jaid);
del_job_def(jobdef);
free(jobdef);
return id;
}
}else if(done == DESCRIBE_AWS_JOB_FAILED || done == DESCRIBE_AWS_JOB_NON_EXIST){
if(itable_lookup(done_jobs,id+1)==NULL){
//id is done, returning here
itable_insert(done_jobs,id+1,jobname);
itable_remove(amazon_job_ids,jobid);
debug(D_BATCH,"Failed job: %i",id);
struct batch_job_info* info = itable_remove(q->job_table, id);//got from batch_job_amazon.c
info->finished = time(0); //get now
info->exited_normally=0;
int exc = finished_aws_job_exit_code(jaid,env_var);
info->exit_code= exc == 0 ? -1 : exc;
memcpy(info_out, info, sizeof(*info));
free(info);
char* jobdef = aws_job_def(jaid);
del_job_def(jobdef);
free(jobdef);
return id;
}
}else{
continue;
}
}
return -1;
}
static int batch_job_amazon_batch_remove(struct batch_queue *q, batch_job_id_t jobid){
struct internal_amazon_batch_amazon_ids amazon_ids = initialize(q);
char* env_var = amazon_ids.master_env_prefix;
if(itable_lookup(done_jobs,jobid)==NULL){
char* name = string_format("%s_%i",queue_name,(int)jobid);
itable_insert(done_jobs,jobid+1,name);
}
char* amazon_id;
if((amazon_id=itable_lookup(amazon_job_ids,jobid))==NULL){
return -1;
}
char* cmd = string_format("%s aws batch terminate-job --job-id %s --reason \"Makeflow Killed\"",env_var,amazon_id);
debug(D_BATCH,"Terminating the job: %s\n",cmd);
sh_system(cmd);
free(cmd);
return 0;
}
batch_queue_stub_create(amazon_batch);
batch_queue_stub_free(amazon_batch);
batch_queue_stub_port(amazon_batch);
batch_queue_stub_option_update(amazon_batch);
batch_fs_stub_chdir(amazon_batch);
batch_fs_stub_getcwd(amazon_batch);
batch_fs_stub_mkdir(amazon_batch);
batch_fs_stub_putfile(amazon_batch);
batch_fs_stub_rename(amazon_batch);
batch_fs_stub_stat(amazon_batch);
batch_fs_stub_unlink(amazon_batch);
const struct batch_queue_module batch_queue_amazon_batch = {
BATCH_QUEUE_TYPE_AMAZON_BATCH,
"amazon-batch",
batch_queue_amazon_batch_create,
batch_queue_amazon_batch_free,
batch_queue_amazon_batch_port,
batch_queue_amazon_batch_option_update,
{
batch_job_amazon_batch_submit,
batch_job_amazon_batch_wait,
batch_job_amazon_batch_remove,
},
{
batch_fs_amazon_batch_chdir,
batch_fs_amazon_batch_getcwd,
batch_fs_amazon_batch_mkdir,
batch_fs_amazon_batch_putfile,
batch_fs_amazon_batch_rename,
batch_fs_amazon_batch_stat,
batch_fs_amazon_batch_unlink,
},
};
| 1 | 15,071 | Maybe just `env_prefix`, I don't think this is referring to the WQ manager. | cooperative-computing-lab-cctools | c |
@@ -11,7 +11,6 @@ import android.widget.FrameLayout;
import com.bytehamster.lib.preferencesearch.SearchPreferenceResult;
import com.bytehamster.lib.preferencesearch.SearchPreferenceResultListener;
-
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.fragment.preferences.AutoDownloadPreferencesFragment; | 1 | package de.danoeh.antennapod.activity;
import android.os.Bundle;
import androidx.appcompat.app.ActionBar;
import androidx.appcompat.app.AppCompatActivity;
import androidx.preference.PreferenceFragmentCompat;
import android.view.Menu;
import android.view.MenuItem;
import android.view.ViewGroup;
import android.widget.FrameLayout;
import com.bytehamster.lib.preferencesearch.SearchPreferenceResult;
import com.bytehamster.lib.preferencesearch.SearchPreferenceResultListener;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.fragment.preferences.AutoDownloadPreferencesFragment;
import de.danoeh.antennapod.fragment.preferences.GpodderPreferencesFragment;
import de.danoeh.antennapod.fragment.preferences.ImportExportPreferencesFragment;
import de.danoeh.antennapod.fragment.preferences.MainPreferencesFragment;
import de.danoeh.antennapod.fragment.preferences.NetworkPreferencesFragment;
import de.danoeh.antennapod.fragment.preferences.NotificationPreferencesFragment;
import de.danoeh.antennapod.fragment.preferences.PlaybackPreferencesFragment;
import de.danoeh.antennapod.fragment.preferences.StoragePreferencesFragment;
import de.danoeh.antennapod.fragment.preferences.UserInterfacePreferencesFragment;
/**
* PreferenceActivity for API 11+. In order to change the behavior of the preference UI, see
* PreferenceController.
*/
public class PreferenceActivity extends AppCompatActivity implements SearchPreferenceResultListener {
private static final String FRAGMENT_TAG = "tag_preferences";
@Override
protected void onCreate(Bundle savedInstanceState) {
setTheme(UserPreferences.getTheme());
super.onCreate(savedInstanceState);
ActionBar ab = getSupportActionBar();
if (ab != null) {
ab.setDisplayHomeAsUpEnabled(true);
}
FrameLayout root = new FrameLayout(this);
root.setId(R.id.content);
root.setLayoutParams(new FrameLayout.LayoutParams(ViewGroup.LayoutParams.MATCH_PARENT,
ViewGroup.LayoutParams.MATCH_PARENT));
setContentView(root);
if (getSupportFragmentManager().findFragmentByTag(FRAGMENT_TAG) == null) {
getSupportFragmentManager().beginTransaction()
.replace(R.id.content, new MainPreferencesFragment(), FRAGMENT_TAG)
.commit();
}
}
private PreferenceFragmentCompat getPreferenceScreen(int screen) {
PreferenceFragmentCompat prefFragment = null;
if (screen == R.xml.preferences_user_interface) {
prefFragment = new UserInterfacePreferencesFragment();
} else if (screen == R.xml.preferences_network) {
prefFragment = new NetworkPreferencesFragment();
} else if (screen == R.xml.preferences_storage) {
prefFragment = new StoragePreferencesFragment();
} else if (screen == R.xml.preferences_import_export) {
prefFragment = new ImportExportPreferencesFragment();
} else if (screen == R.xml.preferences_autodownload) {
prefFragment = new AutoDownloadPreferencesFragment();
} else if (screen == R.xml.preferences_gpodder) {
prefFragment = new GpodderPreferencesFragment();
} else if (screen == R.xml.preferences_playback) {
prefFragment = new PlaybackPreferencesFragment();
} else if (screen == R.xml.preferences_notifications) {
prefFragment = new NotificationPreferencesFragment();
}
return prefFragment;
}
public static int getTitleOfPage(int preferences) {
switch (preferences) {
case R.xml.preferences_network:
return R.string.network_pref;
case R.xml.preferences_autodownload:
return R.string.pref_automatic_download_title;
case R.xml.preferences_playback:
return R.string.playback_pref;
case R.xml.preferences_storage:
return R.string.storage_pref;
case R.xml.preferences_import_export:
return R.string.import_export_pref;
case R.xml.preferences_user_interface:
return R.string.user_interface_label;
case R.xml.preferences_gpodder:
return R.string.gpodnet_main_label;
case R.xml.preferences_notifications:
return R.string.notification_pref_fragment;
default:
return R.string.settings_label;
}
}
public PreferenceFragmentCompat openScreen(int screen) {
PreferenceFragmentCompat fragment = getPreferenceScreen(screen);
getSupportFragmentManager().beginTransaction().replace(R.id.content, fragment)
.addToBackStack(getString(getTitleOfPage(screen))).commit();
return fragment;
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
super.onCreateOptionsMenu(menu);
return true;
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (item.getItemId() == android.R.id.home) {
if (getSupportFragmentManager().getBackStackEntryCount() == 0) {
finish();
} else {
getSupportFragmentManager().popBackStack();
}
return true;
}
return false;
}
@Override
public void onSearchResultClicked(SearchPreferenceResult result) {
PreferenceFragmentCompat fragment = openScreen(result.getResourceFile());
result.highlight(fragment);
}
}
| 1 | 18,375 | Unrelated line change :) | AntennaPod-AntennaPod | java |
@@ -2,9 +2,13 @@ package net
import (
"strings"
+ "syscall"
+ "time"
"github.com/coreos/go-iptables/iptables"
"github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ utilwait "k8s.io/apimachinery/pkg/util/wait"
)
// AddChainWithRules creates a chain and appends given rules to it. | 1 | package net
import (
"strings"
"github.com/coreos/go-iptables/iptables"
"github.com/pkg/errors"
)
// AddChainWithRules creates a chain and appends given rules to it.
//
// If the chain exists, but its rules are not the same as the given ones, the
// function will flush the chain and then will append the rules.
func AddChainWithRules(ipt *iptables.IPTables, table, chain string, rulespecs [][]string) error {
if err := ensureChains(ipt, table, chain); err != nil {
return err
}
currRuleSpecs, err := ipt.List(table, chain)
if err != nil {
return errors.Wrapf(err, "iptables -S. table: %q, chain: %q", table, chain)
}
// First returned rule is "-N $(chain)", so ignore it
currRules := strings.Join(currRuleSpecs[1:], "\n")
rules := make([]string, 0)
for _, r := range rulespecs {
rules = append(rules, strings.Join(r, " "))
}
reqRules := strings.Join(rules, "\n")
if currRules == reqRules {
return nil
}
if err := ipt.ClearChain(table, chain); err != nil {
return err
}
for _, r := range rulespecs {
if err := ipt.Append(table, chain, r...); err != nil {
return errors.Wrapf(err, "iptables -A. table: %q, chain: %q, rule: %s", table, chain, r)
}
}
return nil
}
// ensureChains creates given chains if they do not exist.
func ensureChains(ipt *iptables.IPTables, table string, chains ...string) error {
existingChains, err := ipt.ListChains(table)
if err != nil {
return errors.Wrapf(err, "ipt.ListChains(%s)", table)
}
chainMap := make(map[string]struct{})
for _, c := range existingChains {
chainMap[c] = struct{}{}
}
for _, c := range chains {
if _, found := chainMap[c]; !found {
if err := ipt.NewChain(table, c); err != nil {
return errors.Wrapf(err, "ipt.NewChain(%s, %s)", table, c)
}
}
}
return nil
}
// ensureRulesAtTop ensures the presence of given iptables rules.
//
// If any rule from the list is missing, the function deletes all given
// rules and re-inserts them at the top of the chain to ensure the order of the rules.
func ensureRulesAtTop(table, chain string, rulespecs [][]string, ipt *iptables.IPTables) error {
allFound := true
for _, rs := range rulespecs {
found, err := ipt.Exists(table, chain, rs...)
if err != nil {
return errors.Wrapf(err, "ipt.Exists(%s, %s, %s)", table, chain, rs)
}
if !found {
allFound = false
break
}
}
// All rules exist, do nothing.
if allFound {
return nil
}
for pos, rs := range rulespecs {
// If any is missing, then delete all, as we need to preserve the order of
// given rules. Ignore errors, as rule might not exist.
if !allFound {
ipt.Delete(table, chain, rs...)
}
if err := ipt.Insert(table, chain, pos+1, rs...); err != nil {
return errors.Wrapf(err, "ipt.Append(%s, %s, %s)", table, chain, rs)
}
}
return nil
}
| 1 | 16,029 | I raised an eyebrow at making `net` depend on `k8s.io`, but it seems we're already doing that. | weaveworks-weave | go |
@@ -228,6 +228,9 @@
* @returns {string} encode string
*/
countlyCommon.encodeSomeHtml = function(html, options) {
+ if (countlyGlobal.company) {
+ html.replace("Countly", countlyGlobal.company);
+ }
if (options) {
return filterXSS(html, options);
} | 1 | /*global store, countlyGlobal, _, Gauge, d3, moment, countlyTotalUsers, jQuery, filterXSS*/
/**
* Object with common functions to be used for multiple purposes
* @name countlyCommon
* @global
* @namespace countlyCommon
*/
(function(window, $, undefined) {
var CommonConstructor = function() {
// Private Properties
var countlyCommon = this;
var _period = (store.get("countly_date")) ? store.get("countly_date") : "30days";
var _persistentSettings;
var htmlEncodeOptions = {
"whiteList": {"a": ["href", "class", "target"], "b": [], "br": [], "strong": [], "p": [], "span": ["class"], "div": ["class"]},
onTagAttr: function(tag, name, value/* isWhiteAttr*/) {
if (tag === "a") {
if (name === "target" && !(value === "_blank" || value === "_self" || value === "_top" || value === "_parent")) {
return "target='_blank'"; //set _blank if incorrect value
}
if (name === "href" && !(value.substr(0, 1) === "#" || value.substr(0, 1) === "/" || value.substr(0, 4) === "http")) {
return "href='#'"; //set # if incorrect value
}
}
}
};
/**
* Get Browser language
* @returns {string} browser locale in iso format en-US
* @example
* //outputs en-US
* countlyCommon.browserLang()
*/
countlyCommon.browserLang = function() {
var lang = navigator.language || navigator.userLanguage;
if (lang) {
lang = lang.toLowerCase();
lang.length > 3 && (lang = lang.substring(0, 3) + lang.substring(3).toUpperCase());
}
return lang;
};
// Public Properties
/**
* Set user persistent settings to store local storage
* @param {object} data - Object param for set new data
*/
countlyCommon.setPersistentSettings = function(data) {
if (!_persistentSettings) {
_persistentSettings = localStorage.getItem("persistentSettings") ? JSON.parse(localStorage.getItem("persistentSettings")) : {};
}
for (var i in data) {
_persistentSettings[i] = data[i];
}
localStorage.setItem("persistentSettings", JSON.stringify(_persistentSettings));
};
/**
* Get user persistent settings
* @returns {object} settings
*/
countlyCommon.getPersistentSettings = function() {
if (!_persistentSettings) {
_persistentSettings = localStorage.getItem("persistentSettings") ? JSON.parse(localStorage.getItem("persistentSettings")) : {};
}
return _persistentSettings;
};
/**
* App Key of currently selected app or 0 when not initialized
* @type {string|number}
*/
countlyCommon.ACTIVE_APP_KEY = 0;
/**
* App ID of currently selected app or 0 when not initialized
* @type {string|number}
*/
countlyCommon.ACTIVE_APP_ID = 0;
/**
* Current user's selected language in form en-EN, by default will use browser's language
* @type {string}
*/
countlyCommon.BROWSER_LANG = countlyCommon.browserLang() || "en-US";
/**
* Current user's browser language in short form as "en", by default will use browser's language
* @type {string}
*/
countlyCommon.BROWSER_LANG_SHORT = countlyCommon.BROWSER_LANG.split("-")[0];
if (store.get("countly_active_app")) {
if (countlyGlobal.apps[store.get("countly_active_app")]) {
countlyCommon.ACTIVE_APP_KEY = countlyGlobal.apps[store.get("countly_active_app")].key;
countlyCommon.ACTIVE_APP_ID = store.get("countly_active_app");
}
}
if (countlyGlobal.member.lang) {
var lang = countlyGlobal.member.lang;
store.set("countly_lang", lang);
countlyCommon.BROWSER_LANG_SHORT = lang;
countlyCommon.BROWSER_LANG = lang;
}
else if (store.get("countly_lang")) {
var lang1 = store.get("countly_lang");
countlyCommon.BROWSER_LANG_SHORT = lang1;
countlyCommon.BROWSER_LANG = lang1;
}
// Public Methods
/**
* Change currently selected period
* @param {string|array} period - new period, supported values are (month, 60days, 30days, 7days, yesterday, hour or [startMiliseconds, endMiliseconds] as [1417730400000,1420149600000])
* @param {int} timeStamp - timeStamp for the period based
* @param {boolean} noSet - if set - updates countly_date
*/
countlyCommon.setPeriod = function(period, timeStamp, noSet) {
_period = period;
if (timeStamp) {
countlyCommon.periodObj = countlyCommon.calcSpecificPeriodObj(period, timeStamp);
}
else {
countlyCommon.periodObj = calculatePeriodObj(period);
}
window.app.recordEvent({
"key": "period-change",
"count": 1,
"segmentation": {is_custom: Array.isArray(period)}
});
if (noSet) {
return;
}
store.set("countly_date", period);
};
/**
* Get currently selected period
* @returns {string|array} supported values are (month, 60days, 30days, 7days, yesterday, hour or [startMiliseconds, endMiliseconds] as [1417730400000,1420149600000])
*/
countlyCommon.getPeriod = function() {
return _period;
};
/**
* Get currently selected period that can be used in ajax requests
* @returns {string} supported values are (month, 60days, 30days, 7days, yesterday, hour or [startMiliseconds, endMiliseconds] as [1417730400000,1420149600000])
*/
countlyCommon.getPeriodForAjax = function() {
if (Object.prototype.toString.call(_period) === '[object Array]') {
return JSON.stringify(_period);
}
else {
return _period;
}
};
/**
* Change currently selected app by app ID
* @param {string} appId - new app ID from @{countlyGlobal.apps} object
*/
countlyCommon.setActiveApp = function(appId) {
countlyCommon.ACTIVE_APP_KEY = countlyGlobal.apps[appId].key;
countlyCommon.ACTIVE_APP_ID = appId;
store.set("countly_active_app", appId);
$.ajax({
type: "POST",
url: countlyGlobal.path + "/user/settings/active-app",
data: {
"username": countlyGlobal.member.username,
"appId": appId,
_csrf: countlyGlobal.csrf_token
},
success: function() { }
});
};
/**
* Encode value to be passed to db as key, encoding $ symbol to $ if it is first and all . (dot) symbols to . in the string
* @param {string} str - value to encode
* @returns {string} encoded string
*/
countlyCommon.encode = function(str) {
return str.replace(/^\$/g, "$").replace(/\./g, '.');
};
/**
* Decode value from db, decoding first $ to $ and all . to . (dots). Decodes also url encoded values as &#36;.
* @param {string} str - value to decode
* @returns {string} decoded string
*/
countlyCommon.decode = function(str) {
return str.replace(/^$/g, "$").replace(/./g, '.');
};
/**
* Decode escaped HTML from db
* @param {string} html - value to decode
* @returns {string} decoded string
*/
countlyCommon.decodeHtml = function(html) {
var txt = document.createElement("textarea");
txt.innerHTML = html;
return txt.value;
};
/**
* Encode html
* @param {string} html - value to encode
* @returns {string} encode string
*/
countlyCommon.encodeHtml = function(html) {
var div = document.createElement('div');
div.innerText = html;
return div.innerHTML;
};
/**
* Encode some tags, leaving those set in whitelist as they are.
* @param {string} html - value to encode
* @param {object} options for encoding. Optional. If not passed, using default in common.
* @returns {string} encode string
*/
countlyCommon.encodeSomeHtml = function(html, options) {
if (options) {
return filterXSS(html, options);
}
else {
return filterXSS(html, htmlEncodeOptions);
}
};
/**
* Calculates the percent change between previous and current values.
* @param {number} previous - data for previous period
* @param {number} current - data for current period
* @returns {object} in the following format {"percent": "20%", "trend": "u"}
* @example
* //outputs {"percent":"100%","trend":"u"}
* countlyCommon.getPercentChange(100, 200);
*/
countlyCommon.getPercentChange = function(previous, current) {
var pChange = 0,
trend = "";
previous = parseFloat(previous);
current = parseFloat(current);
if (previous === 0) {
pChange = "NA";
trend = "u"; //upward
}
else if (current === 0) {
pChange = "∞";
trend = "d"; //downward
}
else {
var change = (((current - previous) / previous) * 100).toFixed(1);
pChange = countlyCommon.getShortNumber(change) + "%";
if (change < 0) {
trend = "d";
}
else {
trend = "u";
}
}
return { "percent": pChange, "trend": trend };
};
/**
* Fetches nested property values from an obj.
* @param {object} obj - standard countly metric object
* @param {string} my_passed_path - dot separate path to fetch from object
* @param {object} def - stub object to return if nothing is found on provided path
* @returns {object} fetched object from provided path
* @example <caption>Path found</caption>
* //outputs {"u":20,"t":20,"n":5}
* countlyCommon.getDescendantProp({"2017":{"1":{"2":{"u":20,"t":20,"n":5}}}}, "2017.1.2", {"u":0,"t":0,"n":0});
* @example <caption>Path not found</caption>
* //outputs {"u":0,"t":0,"n":0}
* countlyCommon.getDescendantProp({"2016":{"1":{"2":{"u":20,"t":20,"n":5}}}}, "2017.1.2", {"u":0,"t":0,"n":0});
*/
countlyCommon.getDescendantProp = function(obj, my_passed_path, def) {
for (var i = 0, my_path = (my_passed_path + "").split('.'), len = my_path.length; i < len; i++) {
if (!obj || typeof obj !== 'object') {
return def;
}
obj = obj[my_path[i]];
}
if (obj === undefined) {
return def;
}
return obj;
};
/**
* Draws a graph with the given dataPoints to container. Used for drawing bar and pie charts.
* @param {object} dataPoints - data poitns to draw on graph
* @param {string|object} container - selector for container or container object itself where to create graph
* @param {string} graphType - type of the graph, accepted values are bar, line, pie, separate-bar
* @param {object} inGraphProperties - object with properties to extend and use on graph library directly
* @example <caption>Drawing Pie chart</caption>
* countlyCommon.drawGraph({"dp":[
* {"data":[[0,20]],"label":"Test1","color":"#52A3EF"},
* {"data":[[0,30]],"label":"Test2","color":"#FF8700"},
* {"data":[[0,50]],"label":"Test3","color":"#0EC1B9"}
* ]}, "#dashboard-graph", "pie");
* @example <caption>Drawing bar chart, to comapre values with different color bars</caption>
* //[-1,null] and [3,null] are used for offsets from left and right
* countlyCommon.drawGraph({"dp":[
* {"data":[[-1,null],[0,20],[1,30],[2,50],[3,null]],"color":"#52A3EF"}, //first bar set
* {"data":[[-1,null],[0,50],[1,30],[2,20],[3,null]],"color":"#0EC1B9"} //second bar set
*],
* "ticks":[[-1,""],[0,"Test1"],[1,"Test2"],[2,"Test3"],[3,""]]
*}, "#dashboard-graph", "separate-bar", {"series":{"stack":null}});
* @example <caption>Drawing Separate bars chart, to comapre values with different color bars</caption>
* //[-1,null] and [3,null] are used for offsets from left and right
* countlyCommon.drawGraph({"dp":[
* {"data":[[-1,null],[0,20],[1,null],[2,null],[3,null]],"label":"Test1","color":"#52A3EF"},
* {"data":[[-1,null],[0,null],[1,30],[2,null],[3,null]],"label":"Test2","color":"#FF8700"},
* {"data":[[-1,null],[0,null],[1,null],[2,50],[3,null]],"label":"Test3","color":"#0EC1B9"}
*],
* "ticks":[[-1,""],[0,"Test1"],[1,"Test2"],[2,"Test3"],[3,""]
*]}, "#dashboard-graph", "separate-bar");
*/
countlyCommon.drawGraph = function(dataPoints, container, graphType, inGraphProperties) {
var p = 0;
if (graphType === "pie") {
var min_treshold = 0.05; //minimum treshold for graph
var break_other = 0.3; //try breaking other in smaller if at least given % from all
var sum = 0;
var i = 0;
for (i = 0; i < dataPoints.dp.length; i++) {
sum = sum + dataPoints.dp[i].data[0][1];
dataPoints.dp[i].moreInfo = "";
}
var dpLength = dataPoints.dp.length;
var treshold_value = Math.round(min_treshold * sum);
var max_other = Math.round(min_treshold * sum);
var under_treshold = [];//array of values under treshold
var left_for_other = sum;
for (i = 0; i < dataPoints.dp.length; i++) {
if (dataPoints.dp[i].data[0][1] >= treshold_value) {
left_for_other = left_for_other - dataPoints.dp[i].data[0][1];
}
else {
under_treshold.push(dataPoints.dp[i].data[0][1]);
}
}
var stop_breaking = Math.round(sum * break_other);
if (left_for_other >= stop_breaking) { //fix values if other takes more than set % of data
under_treshold = under_treshold.sort(function(a, b) {
return a - b;
});
var tresholdMap = [];
treshold_value = treshold_value - 1; //to don't group exactly 5% values later in code
tresholdMap.push({value: treshold_value, text: 5});
var in_this_one = 0;
var count_in_this = 0;
for (p = under_treshold.length - 1; p >= 0 && under_treshold[p] > 0 && left_for_other >= stop_breaking; p--) {
if (under_treshold[p] <= treshold_value) {
if (in_this_one + under_treshold[p] <= max_other || count_in_this < 5) {
count_in_this++;
in_this_one += under_treshold[p];
left_for_other -= under_treshold[p];
}
else {
if (tresholdMap[tresholdMap.length - 1].value === under_treshold[p]) {
in_this_one = 0;
count_in_this = 0;
treshold_value = under_treshold[p] - 1;
}
else {
in_this_one = under_treshold[p];
count_in_this = 1;
treshold_value = under_treshold[p];
left_for_other -= under_treshold[p];
}
tresholdMap.push({value: treshold_value, text: Math.max(0.009, Math.round(treshold_value * 10000 / sum) / 100)});
}
}
}
treshold_value = Math.max(treshold_value - 1, 0);
tresholdMap.push({value: treshold_value, text: Math.round(treshold_value * 10000 / sum) / 100});
var tresholdPointer = 0;
while (tresholdPointer < tresholdMap.length - 1) {
dataPoints.dp.push({"label": tresholdMap[tresholdPointer + 1].text + "-" + tresholdMap[tresholdPointer].text + "%", "data": [[0, 0]], "moreInfo": []});
var tresholdPlace = dataPoints.dp.length - 1;
for (i = 0; i < dpLength; i++) {
if (dataPoints.dp[i].data[0][1] <= tresholdMap[tresholdPointer].value && dataPoints.dp[i].data[0][1] > tresholdMap[tresholdPointer + 1].value) {
dataPoints.dp[tresholdPlace].moreInfo.push({"label": dataPoints.dp[i].label, "value": Math.round(dataPoints.dp[i].data[0][1] * 10000 / sum) / 100});
dataPoints.dp[tresholdPlace].data[0][1] = dataPoints.dp[tresholdPlace].data[0][1] + dataPoints.dp[i].data[0][1];
dataPoints.dp.splice(i, 1);
dpLength = dataPoints.dp.length;
i--;
tresholdPlace--;
}
}
tresholdPointer = tresholdPointer + 1;
}
}
}
_.defer(function() {
if ((!dataPoints.dp || !dataPoints.dp.length) || (graphType === "bar" && (!dataPoints.dp[0].data[0] || (typeof dataPoints.dp[0].data[0][1] === 'undefined' && typeof dataPoints.dp[0].data[1][1] === 'undefined') || (dataPoints.dp[0].data[0][1] === null && dataPoints.dp[0].data[1][1] === null)))) {
$(container).hide();
$(container).siblings(".graph-no-data").show();
return true;
}
else {
$(container).show();
$(container).siblings(".graph-no-data").hide();
}
var graphProperties = {
series: {
lines: { show: true, fill: true },
points: { show: true }
},
grid: { hoverable: true, borderColor: "null", color: "#999", borderWidth: 0, minBorderMargin: 10 },
xaxis: { minTickSize: 1, tickDecimals: "number", tickLength: 0 },
yaxis: { min: 0, minTickSize: 1, tickDecimals: "number", position: "right" },
legend: { backgroundOpacity: 0, margin: [20, -19] },
colors: countlyCommon.GRAPH_COLORS
};
switch (graphType) {
case "line":
graphProperties.series = { lines: { show: true, fill: true }, points: { show: true } };
break;
case "bar":
if (dataPoints.ticks.length > 20) {
graphProperties.xaxis.rotateTicks = 45;
}
var barWidth = 0.6;
switch (dataPoints.dp.length) {
case 2:
barWidth = 0.3;
break;
case 3:
barWidth = 0.2;
break;
}
for (i = 0; i < dataPoints.dp.length; i++) {
dataPoints.dp[i].bars = {
order: i,
barWidth: barWidth
};
}
graphProperties.series = { stack: true, bars: { show: true, barWidth: 0.6, tickLength: 0, fill: 1 } };
graphProperties.xaxis.ticks = dataPoints.ticks;
break;
case "separate-bar":
if (dataPoints.ticks.length > 20) {
graphProperties.xaxis.rotateTicks = 45;
}
graphProperties.series = { bars: { show: true, align: "center", barWidth: 0.6, tickLength: 0, fill: 1 } };
graphProperties.xaxis.ticks = dataPoints.ticks;
break;
case "pie":
graphProperties.series = {
pie: {
show: true,
lineWidth: 0,
radius: 115,
innerRadius: 0.45,
combine: {
color: '#CCC',
threshold: 0.05
},
label: {
show: true,
radius: 160
}
}
};
graphProperties.legend.show = false;
break;
default:
break;
}
if (inGraphProperties) {
$.extend(true, graphProperties, inGraphProperties);
}
$.plot($(container), dataPoints.dp, graphProperties);
if (graphType === "bar" || graphType === "separate-bar") {
$(container).unbind("plothover");
$(container).bind("plothover", function(event, pos, item) {
$("#graph-tooltip").remove();
if (item && item.datapoint && item.datapoint[1]) {
// For stacked bar chart calculate the diff
var yAxisValue = item.datapoint[1].toFixed(1).replace(".0", "") - item.datapoint[2].toFixed(1).replace(".0", "");
showTooltip({
x: pos.pageX,
y: item.pageY,
contents: yAxisValue || 0
});
}
});
}
else if (graphType === 'pie') {
$(container).unbind("plothover");
$(container).bind("plothover", function(event, pos, item) {
$("#graph-tooltip").remove();
if (item && item.series && item.series.moreInfo) {
var tooltipcontent = "<table class='pie_tooltip_table'>";
if (item.series.moreInfo.length <= 5) {
for (p = 0; p < item.series.moreInfo.length; p++) {
tooltipcontent = tooltipcontent + "<tr><td>" + item.series.moreInfo[p].label + ":</td><td>" + item.series.moreInfo[p].value + "%</td>";
}
}
else {
for (p = 0; p < 5; p = p + 1) {
tooltipcontent += "<tr><td>" + item.series.moreInfo[p].label + " :</td><td>" + item.series.moreInfo[p].value + "%</td></tr>";
}
tooltipcontent += "<tr><td colspan='2' style='text-align:center;'>...</td></tr><tr><td style='text-align:center;' colspan=2>(and " + (item.series.moreInfo.length - 5) + " other)</td></tr>";
}
tooltipcontent += "</table>";
showTooltip({
x: pos.pageX,
y: pos.pageY,
contents: tooltipcontent
});
}
});
}
else {
$(container).unbind("plothover");
}
}, dataPoints, container, graphType, inGraphProperties);
};
/**
* Draws a time line graph with the given dataPoints to container.
* @param {object} dataPoints - data poitns to draw on graph
* @param {string|object} container - selector for container or container object itself where to create graph
* @param {string=} bucket - time bucket to display on graph. See {@link countlyCommon.getTickObj}
* @param {string=} overrideBucket - time bucket to display on graph. See {@link countlyCommon.getTickObj}
* @param {boolean=} small - if graph won't be full width graph
* @example
* countlyCommon.drawTimeGraph([{
* "data":[[1,0],[2,0],[3,0],[4,0],[5,0],[6,0],[7,12],[8,9],[9,10],[10,5],[11,8],[12,7],[13,9],[14,4],[15,6]],
* "label":"Total Sessions",
* "color":"#DDDDDD",
* "mode":"ghost"
*},{
* "data":[[1,74],[2,69],[3,60],[4,17],[5,6],[6,3],[7,13],[8,25],[9,62],[10,34],[11,34],[12,33],[13,34],[14,30],[15,1]],
* "label":"Total Sessions",
* "color":"#333933"
*}], "#dashboard-graph");
*/
countlyCommon.drawTimeGraph = function(dataPoints, container, bucket, overrideBucket, small) {
_.defer(function() {
if (!dataPoints.length) {
$(container).hide();
$(container).siblings(".graph-no-data").show();
return true;
}
else {
$(container).show();
$(container).siblings(".graph-no-data").hide();
}
var i = 0;
var j = 0;
// Some data points start with [1, XXX] (should be [0, XXX]) and brakes the new tick logic
// Below loops converts the old structures to the new one
if (dataPoints[0].data[0][0] === 1) {
for (i = 0; i < dataPoints.length; i++) {
for (j = 0; j < dataPoints[i].data.length; j++) {
dataPoints[i].data[j][0] -= 1;
}
}
}
var minValue = dataPoints[0].data[0][1];
var maxValue = dataPoints[0].data[0][1];
for (i = 0; i < dataPoints.length; i++) {
for (j = 0; j < dataPoints[i].data.length; j++) {
dataPoints[i].data[j][1] = Math.round(dataPoints[i].data[j][1] * 1000) / 1000; // 3 decimal places max
if (dataPoints[i].data[j][1] < minValue) {
minValue = dataPoints[i].data[j][1];
}
if (dataPoints[i].data[j][1] > maxValue) {
maxValue = dataPoints[i].data[j][1];
}
}
}
var myTickDecimals = 0;
var myMinTickSize = 1;
if (maxValue < 1 && maxValue > 0) {
myTickDecimals = maxValue.toString().length - 2;
myMinTickSize = 0.001;
}
var graphProperties = {
series: {
lines: {
stack: false,
show: false,
fill: true,
lineWidth: 2.5,
fillColor: {
colors: [
{ opacity: 0 },
{ opacity: 0 }
]
},
shadowSize: 0
},
splines: {
show: true,
lineWidth: 2.5
},
points: { show: true, radius: 0, shadowSize: 0, lineWidth: 2 },
shadowSize: 0
},
crosshair: { mode: "x", color: "rgba(78,78,78,0.4)" },
grid: { hoverable: true, borderColor: "null", color: "#666", borderWidth: 0, minBorderMargin: 10, labelMargin: 10 },
xaxis: { tickDecimals: "number", tickSize: 0, tickLength: 0 },
yaxis: { min: 0, minTickSize: 1, tickDecimals: "number", ticks: 3, position: "right"},
legend: { show: false, margin: [-25, -44], noColumns: 3, backgroundOpacity: 0 },
colors: countlyCommon.GRAPH_COLORS,
};
//overriding values
graphProperties.yaxis.minTickSize = myMinTickSize;
graphProperties.yaxis.tickDecimals = myTickDecimals;
if (myMinTickSize < 1) {
graphProperties.yaxis.tickFormatter = function(number) {
return "0." + (Math.round(number * 1000) / 1000).toString();
};
}
graphProperties.series.points.show = (dataPoints[0].data.length <= 90);
if (overrideBucket) {
graphProperties.series.points.radius = 4;
}
var graphTicks = [],
tickObj = {};
if (_period === "month" && !bucket) {
tickObj = countlyCommon.getTickObj("monthly");
}
else {
tickObj = countlyCommon.getTickObj(bucket, overrideBucket);
}
if (small) {
for (i = 0; i < tickObj.ticks.length; i = i + 2) {
tickObj.ticks[i][1] = "";
}
graphProperties.xaxis.font = {
size: 11,
color: "#a2a2a2"
};
}
graphProperties.xaxis.max = tickObj.max;
graphProperties.xaxis.min = tickObj.min;
graphProperties.xaxis.ticks = tickObj.ticks;
graphTicks = tickObj.tickTexts;
var graphObj = $(container).data("plot"),
keyEventCounter = "A",
keyEvents = [];
//keyEventsIndex = 0;
if (graphObj && graphObj.getOptions().series && graphObj.getOptions().series.splines && graphObj.getOptions().series.splines.show && graphObj.getOptions().yaxis.minTickSize === graphProperties.yaxis.minTickSize) {
graphObj = $(container).data("plot");
if (overrideBucket) {
graphObj.getOptions().series.points.radius = 4;
}
else {
graphObj.getOptions().series.points.radius = 0;
}
graphObj.getOptions().xaxes[0].max = tickObj.max;
graphObj.getOptions().xaxes[0].min = tickObj.min;
graphObj.getOptions().xaxes[0].ticks = tickObj.ticks;
graphObj.setData(dataPoints);
graphObj.setupGrid();
graphObj.draw();
}
else {
graphObj = $.plot($(container), dataPoints, graphProperties);
}
/** function calculates min and max
* @param {number} index - index
* @param {object} el - element
* @returns {boolean} true(if not set), else return nothing
*/
var findMinMax = function(index, el) {
// data point is null, this workaround is used to start drawing graph with a certain padding
if (!el[1] && parseInt(el[1]) !== 0) {
return true;
}
el[1] = parseFloat(el[1]);
if (el[1] >= tmpMax) {
tmpMax = el[1];
tmpMaxIndex = el[0];
}
if (el[1] <= tmpMin) {
tmpMin = el[1];
tmpMinIndex = el[0];
}
};
var k = 0;
for (k = 0; k < graphObj.getData().length; k++) {
var tmpMax = 0,
tmpMaxIndex = 0,
tmpMin = 999999999999,
tmpMinIndex = 0,
label = (graphObj.getData()[k].label + "").toLowerCase();
if (graphObj.getData()[k].mode === "ghost") {
//keyEventsIndex += graphObj.getData()[k].data.length;
continue;
}
$.each(graphObj.getData()[k].data, findMinMax);
if (tmpMax === tmpMin) {
continue;
}
keyEvents[k] = [];
keyEvents[k][keyEvents[k].length] = {
data: [tmpMinIndex, tmpMin],
code: keyEventCounter,
color: graphObj.getData()[k].color,
event: "min",
desc: jQuery.i18n.prop('common.graph-min', tmpMin, label, graphTicks[tmpMinIndex])
};
keyEventCounter = String.fromCharCode(keyEventCounter.charCodeAt() + 1);
keyEvents[k][keyEvents[k].length] = {
data: [tmpMaxIndex, tmpMax],
code: keyEventCounter,
color: graphObj.getData()[k].color,
event: "max",
desc: jQuery.i18n.prop('common.graph-max', tmpMax, label, graphTicks[tmpMaxIndex])
};
keyEventCounter = String.fromCharCode(keyEventCounter.charCodeAt() + 1);
}
var graphWidth = graphObj.width();
$(container).find(".graph-key-event-label").remove();
$(container).find(".graph-note-label").remove();
for (k = 0; k < keyEvents.length; k++) {
var bgColor = graphObj.getData()[k].color;
if (!keyEvents[k]) {
continue;
}
for (var l = 0; l < keyEvents[k].length; l++) {
var o = graphObj.pointOffset({ x: keyEvents[k][l].data[0], y: keyEvents[k][l].data[1] });
if (o.left <= 15) {
o.left = 15;
}
if (o.left >= (graphWidth - 15)) {
o.left = (graphWidth - 15);
}
var keyEventLabel = $('<div class="graph-key-event-label">').text(keyEvents[k][l].code);
keyEventLabel.attr({
"title": keyEvents[k][l].desc,
"data-points": "[" + keyEvents[k][l].data + "]"
}).css({
"position": 'absolute',
"left": o.left,
"top": o.top - 33,
"display": 'none',
"background-color": bgColor
}).appendTo(graphObj.getPlaceholder()).show();
$(".tipsy").remove();
keyEventLabel.tipsy({ gravity: $.fn.tipsy.autoWE, offset: 3, html: true });
}
}
// Add note labels to the graph
if (!(bucket === "hourly" && dataPoints[0].data.length > 24) && bucket !== "weekly") {
var noteDateIds = countlyCommon.getNoteDateIds(bucket),
frontData = graphObj.getData()[graphObj.getData().length - 1],
startIndex = (!frontData.data[1] && frontData.data[1] !== 0) ? 1 : 0;
for (k = 0, l = startIndex; k < frontData.data.length; k++, l++) {
if (frontData.data[l]) {
var graphPoint = graphObj.pointOffset({ x: frontData.data[l][0], y: frontData.data[l][1] });
if (countlyCommon.getNotesForDateId(noteDateIds[k]).length) {
var graphNoteLabel = $('<div class="graph-note-label"><div class="fa fa-pencil"></div></div>');
graphNoteLabel.attr({
"title": countlyCommon.getNotesForDateId(noteDateIds[k]),
"data-points": "[" + frontData.data[l] + "]"
}).css({
"position": 'absolute',
"left": graphPoint.left,
"top": graphPoint.top - 33,
"display": 'none',
"border-color": frontData.color
}).appendTo(graphObj.getPlaceholder()).show();
$(".tipsy").remove();
graphNoteLabel.tipsy({ gravity: $.fn.tipsy.autoWE, offset: 3, html: true });
}
}
}
}
$(container).on("mouseout", function() {
graphObj.unlockCrosshair();
graphObj.clearCrosshair();
graphObj.unhighlight();
$("#graph-tooltip").fadeOut(200, function() {
$(this).remove();
});
});
/** dShows tooltip
* @param {number} dataIndex - index
* @param {object} position - position
* @param {boolean} onPoint - if point found
*/
function showCrosshairTooltip(dataIndex, position, onPoint) {
var tooltip = $("#graph-tooltip");
var crossHairPos = graphObj.p2c(position);
var tooltipLeft = (crossHairPos.left < 200) ? crossHairPos.left + 20 : crossHairPos.left - tooltip.width() - 20;
tooltip.css({ left: tooltipLeft });
if (onPoint) {
var dataSet = graphObj.getData(),
tooltipHTML = "<div class='title'>" + tickObj.tickTexts[dataIndex] + "</div>";
dataSet = _.sortBy(dataSet, function(obj) {
return obj.data[dataIndex][1];
});
for (var m = dataSet.length - 1; m >= 0; --m) {
var series = dataSet[m],
formattedValue = series.data[dataIndex][1];
// Change label to previous period if there is a ghost graph
if (series.mode === "ghost") {
series.label = jQuery.i18n.map["common.previous-period"];
}
if (formattedValue) {
formattedValue = parseFloat(formattedValue).toFixed(2).replace(/[.,]00$/, "");
}
if (series.data[dataIndex][2]) {
formattedValue = series.data[dataIndex][2]; // to show customized string value tips
}
tooltipHTML += "<div class='inner'>";
tooltipHTML += "<div class='color' style='background-color: " + series.color + "'></div>";
tooltipHTML += "<div class='series'>" + series.label + "</div>";
tooltipHTML += "<div class='value'>" + formattedValue + "</div>";
tooltipHTML += "</div>";
}
if (tooltip.length) {
tooltip.html(tooltipHTML);
}
else {
tooltip = $("<div id='graph-tooltip' class='white' style='top:-15px;'>" + tooltipHTML + "</div>");
$(container).prepend(tooltip);
}
if (tooltip.is(":visible")) {
tooltip.css({
"transition": "left .15s"
});
}
else {
tooltip.fadeIn();
}
}
}
$(container).unbind("plothover");
$(container).bind("plothover", function(event, pos) {
graphObj.unlockCrosshair();
graphObj.unhighlight();
var dataset = graphObj.getData(),
pointFound = false;
for (i = 0; i < dataset.length; ++i) {
var series = dataset[i];
// Find the nearest points, x-wise
for (j = 0; j < series.data.length; ++j) {
var currX = series.data[j][0],
currCrossX = pos.x.toFixed(2);
if ((currX - 0.10) < currCrossX && (currX + 0.10) > currCrossX) {
graphObj.lockCrosshair({
x: series.data[j][0],
y: series.data[j][1]
});
graphObj.highlight(series, j);
pointFound = true;
break;
}
}
}
showCrosshairTooltip(j, pos, pointFound);
});
}, dataPoints, container, bucket);
};
/**
* Draws a gauge with provided value on procided container.
* @param {string|object} targetEl - selector for container or container object itself where to create graph
* @param {number} value - value to display on gauge
* @param {number} maxValue - maximal value of the gauge
* @param {string} gaugeColor - color of the gauge in hexadecimal string as #ffffff
* @param {string|object} textField - selector for container or container object itself where to output textual value
*/
countlyCommon.drawGauge = function(targetEl, value, maxValue, gaugeColor, textField) {
var opts = {
lines: 12,
angle: 0.15,
lineWidth: 0.44,
pointer: {
length: 0.7,
strokeWidth: 0.05,
color: '#000000'
},
colorStart: gaugeColor,
colorStop: gaugeColor,
strokeColor: '#E0E0E0',
generateGradient: true
};
var gauge = new Gauge($(targetEl)[0]).setOptions(opts);
if (textField) {
gauge.setTextField($(textField)[0]);
}
gauge.maxValue = maxValue;
gauge.set(1);
gauge.set(value);
};
/**
* Draws horizibtally stacked bars like in platforms and density analytic sections.
* @param {array} data - data to draw in form of [{"data":[[0,85]],"label":"Test1"},{"data":[[0,79]],"label":"Test2"},{"data":[[0,78]],"label":"Test3"}]
* @param {object|string} intoElement - selector for container or container object itself where to create graph
* @param {number} colorIndex - index of color from {@link countlyCommon.GRAPH_COLORS}
*/
countlyCommon.drawHorizontalStackedBars = function(data, intoElement, colorIndex) {
var processedData = [],
tmpProcessedData = [],
totalCount = 0,
maxToDisplay = 10,
barHeight = 30;
var i = 0;
for (i = 0; i < data.length; i++) {
tmpProcessedData.push({
label: data[i].label,
count: data[i].data[0][1],
index: i
});
totalCount += data[i].data[0][1];
}
var totalPerc = 0,
proCount = 0;
for (i = 0; i < tmpProcessedData.length; i++) {
if (i >= maxToDisplay) {
processedData.push({
label: "Other",
count: totalCount - proCount,
perc: countlyCommon.round((100 - totalPerc), 2) + "%",
index: i
});
break;
}
var perc = countlyCommon.round((tmpProcessedData[i].count / totalCount) * 100, 2);
tmpProcessedData[i].perc = perc + "%";
totalPerc += perc;
proCount += tmpProcessedData[i].count;
processedData.push(tmpProcessedData[i]);
}
if (processedData.length > 0) {
var percentSoFar = 0;
var chart = d3.select(intoElement)
.attr("width", "100%")
.attr("height", barHeight);
var bar = chart.selectAll("g")
.data(processedData)
.enter().append("g");
bar.append("rect")
.attr("width", function(d) {
return ((d.count / totalCount) * 100) + "%";
})
.attr("x", function(d) {
var myPercent = percentSoFar;
percentSoFar = percentSoFar + (100 * (d.count / totalCount));
return myPercent + "%";
})
.attr("height", barHeight)
.attr("fill", function(d) {
if (colorIndex || colorIndex === 0) {
return countlyCommon.GRAPH_COLORS[colorIndex];
}
else {
return countlyCommon.GRAPH_COLORS[d.index];
}
})
.attr("stroke", "#FFF")
.attr("stroke-width", 2);
if (colorIndex || colorIndex === 0) {
bar.attr("opacity", function(d) {
return 1 - (0.05 * d.index);
});
}
percentSoFar = 0;
bar.append("foreignObject")
.attr("width", function(d) {
return ((d.count / totalCount) * 100) + "%";
})
.attr("height", barHeight)
.attr("x", function(d) {
var myPercent = percentSoFar;
percentSoFar = percentSoFar + (100 * (d.count / totalCount));
return myPercent + "%";
})
.append("xhtml:div")
.attr("class", "hsb-tip")
.html(function(d) {
return "<div>" + d.perc + "</div>";
});
percentSoFar = 0;
bar.append("text")
.attr("x", function(d) {
var myPercent = percentSoFar;
percentSoFar = percentSoFar + (100 * (d.count / totalCount));
return myPercent + 0.5 + "%";
})
.attr("dy", "1.35em")
.text(function(d) {
return d.label;
});
}
else {
var chart1 = d3.select(intoElement)
.attr("width", "100%")
.attr("height", barHeight);
var bar1 = chart1.selectAll("g")
.data([{ text: jQuery.i18n.map["common.bar.no-data"] }])
.enter().append("g");
bar1.append("rect")
.attr("width", "100%")
.attr("height", barHeight)
.attr("fill", "#FBFBFB")
.attr("stroke", "#FFF")
.attr("stroke-width", 2);
bar1.append("foreignObject")
.attr("width", "100%")
.attr("height", barHeight)
.append("xhtml:div")
.attr("class", "no-data")
.html(function(d) {
return d.text;
});
}
};
/**
* Extract range data from standard countly metric data model
* @param {object} db - countly standard metric data object
* @param {string} propertyName - name of the property to extract
* @param {object} rangeArray - array of all metrics/segments to extract (usually what is contained in meta)
* @param {function} explainRange - function to convert range/bucket index to meaningful label
* @param {array} myorder - arrays of preferred order for give keys. Optional. If not passed - sorted by values
* @returns {array} array containing extracted ranged data as [{"f":"First session","t":352,"percent":"88.4"},{"f":"2 days","t":46,"percent":"11.6"}]
* @example <caption>Extracting session frequency from users collection</caption>
* //outputs [{"f":"First session","t":352,"percent":"88.4"},{"f":"2 days","t":46,"percent":"11.6"}]
* countlyCommon.extractRangeData(_userDb, "f", _frequencies, countlySession.explainFrequencyRange);
*/
countlyCommon.extractRangeData = function(db, propertyName, rangeArray, explainRange, myorder) {
countlyCommon.periodObj = getPeriodObj();
var dataArr = [],
dataArrCounter = 0,
rangeTotal,
total = 0;
var tmp_x = 0;
if (!rangeArray) {
return dataArr;
}
for (var j = 0; j < rangeArray.length; j++) {
rangeTotal = 0;
if (!countlyCommon.periodObj.isSpecialPeriod) {
tmp_x = countlyCommon.getDescendantProp(db, countlyCommon.periodObj.activePeriod + "." + propertyName);
if (tmp_x && tmp_x[rangeArray[j]]) {
rangeTotal += tmp_x[rangeArray[j]];
}
if (rangeTotal !== 0) {
dataArr[dataArrCounter] = {};
dataArr[dataArrCounter][propertyName] = (explainRange) ? explainRange(rangeArray[j]) : rangeArray[j];
dataArr[dataArrCounter].t = rangeTotal;
total += rangeTotal;
dataArrCounter++;
}
}
else {
var tmpRangeTotal = 0;
var i = 0;
for (i = 0; i < (countlyCommon.periodObj.uniquePeriodArr.length); i++) {
tmp_x = countlyCommon.getDescendantProp(db, countlyCommon.periodObj.uniquePeriodArr[i] + "." + propertyName);
if (tmp_x && tmp_x[rangeArray[j]]) {
rangeTotal += tmp_x[rangeArray[j]];
}
}
for (i = 0; i < (countlyCommon.periodObj.uniquePeriodCheckArr.length); i++) {
tmp_x = countlyCommon.getDescendantProp(db, countlyCommon.periodObj.uniquePeriodCheckArr[i] + "." + propertyName);
if (tmp_x && tmp_x[rangeArray[j]]) {
tmpRangeTotal += tmp_x[rangeArray[j]];
}
}
if (rangeTotal > tmpRangeTotal) {
rangeTotal = tmpRangeTotal;
}
if (rangeTotal !== 0) {
dataArr[dataArrCounter] = {};
dataArr[dataArrCounter][propertyName] = (explainRange) ? explainRange(rangeArray[j]) : rangeArray[j];
dataArr[dataArrCounter].t = rangeTotal;
total += rangeTotal;
dataArrCounter++;
}
}
}
for (var z = 0; z < dataArr.length; z++) {
dataArr[z].percent = ((dataArr[z].t / total) * 100).toFixed(1);
}
if (myorder && Array.isArray(myorder)) {
dataArr.sort(function(a, b) {
return (myorder.indexOf(a[propertyName]) - myorder.indexOf(b[propertyName]));
});
}
else {
dataArr.sort(function(a, b) {
return -(a.t - b.t);
});
}
return dataArr;
};
/**
* Extract single level data without metrics/segments, like total user data from users collection
* @param {object} db - countly standard metric data object
* @param {function} clearFunction - function to prefill all expected properties as u, t, n, etc with 0, so you would not have null in the result which won't work when drawing graphs
* @param {object} chartData - prefill chart data with labels, colors, etc
* @param {object} dataProperties - describing which properties and how to extract
* @param {string} metric - metric to select
* @returns {object} object to use in timeline graph with {"chartDP":chartData, "chartData":_.compact(tableData), "keyEvents":keyEvents}
* @example <caption>Extracting total users data from users collection</caption>
* countlyCommon.extractChartData(_sessionDb, countlySession.clearObject, [
* { data:[], label:"Total Users" }
* ], [
* {
* name:"t",
* func:function (dataObj) {
* return dataObj["u"]
* }
* }
* ]);
* @example <caption>Returned data</caption>
* {"chartDP":[
* {
* "data":[[0,0],[1,0],[2,0],[3,0],[4,0],[5,0],[6,0],[7,0],[8,0],[9,0],[10,0],[11,0],[12,0],[13,0],[14,0],[15,12]],
* "label":"Total Sessions",
* "color":"#DDDDDD",
* "mode":"ghost"
* },
* {
* "data":[[0,6],[1,14],[2,11],[3,18],[4,10],[5,32],[6,53],[7,55],[8,71],[9,82],[10,74],[11,69],[12,60],[13,17],[14,6],[15,3]],
* "label":"Total Sessions",
* "color":"#333933"
* }
* ],
* "chartData":[
* {"date":"22 Dec, 2016","pt":0,"t":6},
* {"date":"23 Dec, 2016","pt":0,"t":14},
* {"date":"24 Dec, 2016","pt":0,"t":11},
* {"date":"25 Dec, 2016","pt":0,"t":18},
* {"date":"26 Dec, 2016","pt":0,"t":10},
* {"date":"27 Dec, 2016","pt":0,"t":32},
* {"date":"28 Dec, 2016","pt":0,"t":53},
* {"date":"29 Dec, 2016","pt":0,"t":55},
* {"date":"30 Dec, 2016","pt":0,"t":71},
* {"date":"31 Dec, 2016","pt":0,"t":82},
* {"date":"1 Jan, 2017","pt":0,"t":74},
* {"date":"2 Jan, 2017","pt":0,"t":69},
* {"date":"3 Jan, 2017","pt":0,"t":60},
* {"date":"4 Jan, 2017","pt":0,"t":17},
* {"date":"5 Jan, 2017","pt":0,"t":6},
* {"date":"6 Jan, 2017","pt":12,"t":3}
* ],
* "keyEvents":[{"min":0,"max":12},{"min":0,"max":82}]
* }
*/
countlyCommon.extractChartData = function(db, clearFunction, chartData, dataProperties, metric) {
if (metric) {
metric = "." + metric;
}
else {
metric = "";
}
countlyCommon.periodObj = getPeriodObj();
var periodMin = countlyCommon.periodObj.periodMin,
periodMax = (countlyCommon.periodObj.periodMax + 1),
dataObj = {},
formattedDate = "",
tableData = [],
propertyNames = _.pluck(dataProperties, "name"),
propertyFunctions = _.pluck(dataProperties, "func"),
currOrPrevious = _.pluck(dataProperties, "period"),
activeDate,
activeDateArr;
for (var j = 0; j < propertyNames.length; j++) {
if (currOrPrevious[j] === "previous") {
if (countlyCommon.periodObj.isSpecialPeriod) {
periodMin = 0;
periodMax = countlyCommon.periodObj.previousPeriodArr.length;
activeDateArr = countlyCommon.periodObj.previousPeriodArr;
}
else {
activeDate = countlyCommon.periodObj.previousPeriod;
}
}
else {
if (countlyCommon.periodObj.isSpecialPeriod) {
periodMin = 0;
periodMax = countlyCommon.periodObj.currentPeriodArr.length;
activeDateArr = countlyCommon.periodObj.currentPeriodArr;
}
else {
activeDate = countlyCommon.periodObj.activePeriod;
}
}
for (var i = periodMin; i < periodMax; i++) {
if (!countlyCommon.periodObj.isSpecialPeriod) {
if (countlyCommon.periodObj.periodMin === 0) {
formattedDate = moment((activeDate + " " + i + ":00:00").replace(/\./g, "/"), "YYYY/MM/DD HH:mm:ss");
}
else if (("" + activeDate).indexOf(".") === -1) {
formattedDate = moment((activeDate + "/" + i + "/1").replace(/\./g, "/"), "YYYY/MM/DD");
}
else {
formattedDate = moment((activeDate + "/" + i).replace(/\./g, "/"), "YYYY/MM/DD");
}
dataObj = countlyCommon.getDescendantProp(db, activeDate + "." + i + metric);
}
else {
formattedDate = moment((activeDateArr[i]).replace(/\./g, "/"), "YYYY/MM/DD");
dataObj = countlyCommon.getDescendantProp(db, activeDateArr[i] + metric);
}
dataObj = clearFunction(dataObj);
if (!tableData[i]) {
tableData[i] = {};
}
tableData[i].date = countlyCommon.formatDate(formattedDate, countlyCommon.periodObj.dateString);
var propertyValue = "";
if (propertyFunctions[j]) {
propertyValue = propertyFunctions[j](dataObj);
}
else {
propertyValue = dataObj[propertyNames[j]];
}
chartData[j].data[chartData[j].data.length] = [i, propertyValue];
tableData[i][propertyNames[j]] = propertyValue;
}
}
var keyEvents = [];
for (var k = 0; k < chartData.length; k++) {
var flatChartData = _.flatten(chartData[k].data);
var chartVals = _.reject(flatChartData, function(context, value) {
return value % 2 === 0;
});
keyEvents[k] = {};
keyEvents[k].min = _.min(chartVals);
keyEvents[k].max = _.max(chartVals);
}
return { "chartDP": chartData, "chartData": _.compact(tableData), "keyEvents": keyEvents };
};
/**
* Extract two level data with metrics/segments, like total user data from carriers collection
* @param {object} db - countly standard metric data object
* @param {object} rangeArray - array of all metrics/segments to extract (usually what is contained in meta)
* @param {function} clearFunction - function to prefill all expected properties as u, t, n, etc with 0, so you would not have null in the result which won't work when drawing graphs
* @param {object} dataProperties - describing which properties and how to extract
* @param {object=} estOverrideMetric - data from total users api request to correct unique user values
* @returns {object} object to use in bar and pie charts with {"chartData":_.compact(tableData)}
* @example <caption>Extracting carriers data from carriers collection</caption>
* var chartData = countlyCommon.extractTwoLevelData(_carrierDb, ["At&t", "Verizon"], countlyCarrier.clearObject, [
* {
* name:"carrier",
* func:function (rangeArr, dataObj) {
* return rangeArr;
* }
* },
* { "name":"t" },
* { "name":"u" },
* { "name":"n" }
* ]);
* @example <caption>Return data</caption>
* {"chartData":['
* {"carrier":"At&t","t":71,"u":62,"n":36},
* {"carrier":"Verizon","t":66,"u":60,"n":30}
* ]}
*/
countlyCommon.extractTwoLevelData = function(db, rangeArray, clearFunction, dataProperties, estOverrideMetric) {
countlyCommon.periodObj = getPeriodObj();
if (!rangeArray) {
return { "chartData": tableData };
}
var periodMin = 0,
periodMax = 0,
dataObj = {},
tableData = [],
propertyNames = _.pluck(dataProperties, "name"),
propertyFunctions = _.pluck(dataProperties, "func"),
propertyValue = 0;
if (!countlyCommon.periodObj.isSpecialPeriod) {
periodMin = countlyCommon.periodObj.periodMin;
periodMax = (countlyCommon.periodObj.periodMax + 1);
}
else {
periodMin = 0;
periodMax = countlyCommon.periodObj.currentPeriodArr.length;
}
var tableCounter = 0;
var j = 0;
var k = 0;
var i = 0;
if (!countlyCommon.periodObj.isSpecialPeriod) {
for (j = 0; j < rangeArray.length; j++) {
dataObj = countlyCommon.getDescendantProp(db, countlyCommon.periodObj.activePeriod + "." + rangeArray[j]);
if (!dataObj) {
continue;
}
var tmpPropertyObj1 = {};
dataObj = clearFunction(dataObj);
var propertySum = 0;
for (k = 0; k < propertyNames.length; k++) {
if (propertyFunctions[k]) {
propertyValue = propertyFunctions[k](rangeArray[j], dataObj);
}
else {
propertyValue = dataObj[propertyNames[k]];
}
if (typeof propertyValue !== 'string') {
propertySum += propertyValue;
}
tmpPropertyObj1[propertyNames[k]] = propertyValue;
}
if (propertySum > 0) {
tableData[tableCounter] = {};
tableData[tableCounter] = tmpPropertyObj1;
tableCounter++;
}
}
}
else {
var calculatedObj = (estOverrideMetric) ? countlyTotalUsers.get(estOverrideMetric) : {};
for (j = 0; j < rangeArray.length; j++) {
var tmp_x = {};
var tmpPropertyObj = {};
for (i = periodMin; i < periodMax; i++) {
dataObj = countlyCommon.getDescendantProp(db, countlyCommon.periodObj.currentPeriodArr[i] + "." + rangeArray[j]);
if (!dataObj) {
continue;
}
dataObj = clearFunction(dataObj);
for (k = 0; k < propertyNames.length; k++) {
if (propertyNames[k] === "u") {
propertyValue = 0;
}
else if (propertyFunctions[k]) {
propertyValue = propertyFunctions[k](rangeArray[j], dataObj);
}
else {
propertyValue = dataObj[propertyNames[k]];
}
if (!tmpPropertyObj[propertyNames[k]]) {
tmpPropertyObj[propertyNames[k]] = 0;
}
if (typeof propertyValue === 'string') {
tmpPropertyObj[propertyNames[k]] = propertyValue;
}
else {
tmpPropertyObj[propertyNames[k]] += propertyValue;
}
}
}
if (propertyNames.indexOf("u") !== -1 && Object.keys(tmpPropertyObj).length) {
if (countlyTotalUsers.isUsable() && estOverrideMetric && calculatedObj[rangeArray[j]]) {
tmpPropertyObj.u = calculatedObj[rangeArray[j]];
}
else {
var tmpUniqVal = 0,
tmpUniqValCheck = 0,
tmpCheckVal = 0,
l = 0;
for (l = 0; l < (countlyCommon.periodObj.uniquePeriodArr.length); l++) {
tmp_x = countlyCommon.getDescendantProp(db, countlyCommon.periodObj.uniquePeriodArr[l] + "." + rangeArray[j]);
if (!tmp_x) {
continue;
}
tmp_x = clearFunction(tmp_x);
propertyValue = tmp_x.u;
if (typeof propertyValue === 'string') {
tmpPropertyObj.u = propertyValue;
}
else {
tmpUniqVal += propertyValue;
tmpPropertyObj.u += propertyValue;
}
}
for (l = 0; l < (countlyCommon.periodObj.uniquePeriodCheckArr.length); l++) {
tmp_x = countlyCommon.getDescendantProp(db, countlyCommon.periodObj.uniquePeriodCheckArr[l] + "." + rangeArray[j]);
if (!tmp_x) {
continue;
}
tmp_x = clearFunction(tmp_x);
tmpCheckVal = tmp_x.u;
if (typeof tmpCheckVal !== 'string') {
tmpUniqValCheck += tmpCheckVal;
}
}
if (tmpUniqVal > tmpUniqValCheck) {
tmpPropertyObj.u = tmpUniqValCheck;
}
}
// Total users can't be less than new users
if (tmpPropertyObj.u < tmpPropertyObj.n) {
tmpPropertyObj.u = tmpPropertyObj.n;
}
// Total users can't be more than total sessions
if (tmpPropertyObj.u > tmpPropertyObj.t) {
tmpPropertyObj.u = tmpPropertyObj.t;
}
}
tableData[tableCounter] = {};
tableData[tableCounter] = tmpPropertyObj;
tableCounter++;
}
}
for (i = 0; i < tableData.length; i++) {
if (_.isEmpty(tableData[i])) {
tableData[i] = null;
}
}
tableData = _.compact(tableData);
if (propertyNames.indexOf("u") !== -1) {
countlyCommon.sortByProperty(tableData, "u");
}
else if (propertyNames.indexOf("t") !== -1) {
countlyCommon.sortByProperty(tableData, "t");
}
else if (propertyNames.indexOf("c") !== -1) {
countlyCommon.sortByProperty(tableData, "c");
}
return { "chartData": tableData };
};
countlyCommon.sortByProperty = function(tableData, prop) {
tableData.sort(function(a, b) {
a = (a && a[prop]) ? a[prop] : 0;
b = (b && b[prop]) ? b[prop] : 0;
return b - a;
});
};
/**
* Merge metric data in chartData returned by @{link countlyCommon.extractChartData} or @{link countlyCommon.extractTwoLevelData }, just in case if after data transformation of countly standard metric data model, resulting chartData contains duplicated values, as for example converting null, undefined and unknown values to unknown
* @param {object} chartData - chartData returned by @{link countlyCommon.extractChartData} or @{link countlyCommon.extractTwoLevelData }
* @param {string} metric - metric name to merge
* @returns {object} chartData object with same metrics summed up
* @example <caption>Sample input</caption>
* {"chartData":[
* {"metric":"Test","t":71,"u":62,"n":36},
* {"metric":"Test1","t":66,"u":60,"n":30},
* {"metric":"Test","t":2,"u":3,"n":4}
* ]}
* @example <caption>Sample output</caption>
* {"chartData":[
* {"metric":"Test","t":73,"u":65,"n":40},
* {"metric":"Test1","t":66,"u":60,"n":30}
* ]}
*/
countlyCommon.mergeMetricsByName = function(chartData, metric) {
var uniqueNames = {},
data;
for (var i = 0; i < chartData.length; i++) {
data = chartData[i];
if (data[metric] && !uniqueNames[data[metric]]) {
uniqueNames[data[metric]] = data;
}
else {
for (var key in data) {
if (typeof data[key] === "string") {
uniqueNames[data[metric]][key] = data[key];
}
else if (typeof data[key] === "number") {
if (!uniqueNames[data[metric]][key]) {
uniqueNames[data[metric]][key] = 0;
}
uniqueNames[data[metric]][key] += data[key];
}
}
}
}
return _.values(uniqueNames);
};
/**
* Extracts top three items (from rangeArray) that have the biggest total session counts from the db object.
* @param {object} db - countly standard metric data object
* @param {object} rangeArray - array of all metrics/segments to extract (usually what is contained in meta)
* @param {function} clearFunction - function to prefill all expected properties as u, t, n, etc with 0, so you would not have null in the result which won't work when drawing graphs
* @param {function} fetchFunction - function to fetch property, default used is function (rangeArr, dataObj) {return rangeArr;}
* @returns {array} array with top 3 values
* @example <caption>Return data</caption>
* [
* {"name":"iOS","percent":35},
* {"name":"Android","percent":33},
* {"name":"Windows Phone","percent":32}
* ]
*/
countlyCommon.extractBarDataWPercentageOfTotal = function(db, rangeArray, clearFunction, fetchFunction) {
fetchFunction = fetchFunction || function(rangeArr) {
return rangeArr;
};
var rangeData = countlyCommon.extractTwoLevelData(db, rangeArray, clearFunction, [
{
name: "range",
func: fetchFunction
},
{ "name": "t" }
]);
return countlyCommon.calculateBarDataWPercentageOfTotal(rangeData);
};
/**
* Extracts top three items (from rangeArray) that have the biggest total session counts from the db object.
* @param {object} db - countly standard metric data object
* @param {object} rangeArray - array of all metrics/segments to extract (usually what is contained in meta)
* @param {function} clearFunction - function to prefill all expected properties as u, t, n, etc with 0, so you would not have null in the result which won't work when drawing graphs
* @param {function} fetchFunction - function to fetch property, default used is function (rangeArr, dataObj) {return rangeArr;}
* @returns {array} array with top 3 values
* @example <caption>Return data</caption>
* [
* {"name":"iOS","percent":35},
* {"name":"Android","percent":33},
* {"name":"Windows Phone","percent":32}
* ]
*/
countlyCommon.extractBarData = function(db, rangeArray, clearFunction, fetchFunction) {
fetchFunction = fetchFunction || function(rangeArr) {
return rangeArr;
};
var rangeData = countlyCommon.extractTwoLevelData(db, rangeArray, clearFunction, [
{
name: "range",
func: fetchFunction
},
{ "name": "t" }
]);
return countlyCommon.calculateBarData(rangeData);
};
/**
* Extracts top three items (from rangeArray) that have the biggest total session counts from the chartData with their percentage of total
* @param {object} rangeData - chartData retrieved from {@link countlyCommon.extractTwoLevelData} as {"chartData":[{"carrier":"At&t","t":71,"u":62,"n":36},{"carrier":"Verizon","t":66,"u":60,"n":30}]}
* @returns {array} array with top 3 values
* @example <caption>Return data</caption>
* [
* {"name":"iOS","percent":44},
* {"name":"Android","percent":22},
* {"name":"Windows Phone","percent":14}
* ]
*/
countlyCommon.calculateBarDataWPercentageOfTotal = function(rangeData) {
rangeData.chartData = countlyCommon.mergeMetricsByName(rangeData.chartData, "range");
rangeData.chartData = _.sortBy(rangeData.chartData, function(obj) {
return -obj.t;
});
var rangeNames = _.pluck(rangeData.chartData, 'range'),
rangeTotal = _.pluck(rangeData.chartData, 't'),
barData = [],
maxItems = 3,
totalSum = 0;
rangeTotal.forEach(function(r) {
totalSum += r;
});
rangeTotal.sort(function(a, b) {
if (a < b) {
return 1;
}
if (b < a) {
return -1;
}
return 0;
});
if (rangeNames.length < maxItems) {
maxItems = rangeNames.length;
}
for (var i = 0; i < maxItems; i++) {
var percent = Math.floor((rangeTotal[i] / totalSum) * 100);
barData[i] = { "name": rangeNames[i], "percent": percent };
}
return barData;
};
/**
* Extracts top three items (from rangeArray) that have the biggest total session counts from the chartData.
* @param {object} rangeData - chartData retrieved from {@link countlyCommon.extractTwoLevelData} as {"chartData":[{"carrier":"At&t","t":71,"u":62,"n":36},{"carrier":"Verizon","t":66,"u":60,"n":30}]}
* @returns {array} array with top 3 values
* @example <caption>Return data</caption>
* [
* {"name":"iOS","percent":35},
* {"name":"Android","percent":33},
* {"name":"Windows Phone","percent":32}
* ]
*/
countlyCommon.calculateBarData = function(rangeData) {
rangeData.chartData = countlyCommon.mergeMetricsByName(rangeData.chartData, "range");
rangeData.chartData = _.sortBy(rangeData.chartData, function(obj) {
return -obj.t;
});
var rangeNames = _.pluck(rangeData.chartData, 'range'),
rangeTotal = _.pluck(rangeData.chartData, 't'),
barData = [],
sum = 0,
maxItems = 3,
totalPercent = 0;
rangeTotal.sort(function(a, b) {
if (a < b) {
return 1;
}
if (b < a) {
return -1;
}
return 0;
});
if (rangeNames.length < maxItems) {
maxItems = rangeNames.length;
}
var i = 0;
for (i = 0; i < maxItems; i++) {
sum += rangeTotal[i];
}
for (i = 0; i < maxItems; i++) {
var percent = Math.floor((rangeTotal[i] / sum) * 100);
totalPercent += percent;
if (i === (maxItems - 1)) {
percent += 100 - totalPercent;
}
barData[i] = { "name": rangeNames[i], "percent": percent };
}
return barData;
};
countlyCommon.extractUserChartData = function(db, label, sec) {
var ret = { "data": [], "label": label };
countlyCommon.periodObj = getPeriodObj();
var periodMin, periodMax, dateob;
if (countlyCommon.periodObj.isSpecialPeriod) {
periodMin = 0;
periodMax = (countlyCommon.periodObj.daysInPeriod);
var dateob1 = countlyCommon.processPeriod(countlyCommon.periodObj.currentPeriodArr[0].toString());
var dateob2 = countlyCommon.processPeriod(countlyCommon.periodObj.currentPeriodArr[countlyCommon.periodObj.currentPeriodArr.length - 1].toString());
dateob = { timestart: dateob1.timestart, timeend: dateob2.timeend, range: "d" };
}
else {
periodMin = countlyCommon.periodObj.periodMin;
periodMax = countlyCommon.periodObj.periodMax + 1;
dateob = countlyCommon.processPeriod(countlyCommon.periodObj.activePeriod.toString());
}
var res = [],
ts;
//get all timestamps in that period
var i = 0;
for (i = 0, l = db.length; i < l; i++) {
ts = db[i];
if (sec) {
ts.ts = ts.ts * 1000;
}
if (ts.ts > dateob.timestart && ts.ts <= dateob.timeend) {
res.push(ts);
}
}
var lastStart,
lastEnd = dateob.timestart,
total,
data = ret.data;
for (i = periodMin; i < periodMax; i++) {
total = 0;
lastStart = lastEnd;
lastEnd = moment(lastStart).add(moment.duration(1, dateob.range)).valueOf();
for (var j = 0, l = res.length; j < l; j++) {
ts = res[j];
if (ts.ts > lastStart && ts.ts <= lastEnd) {
if (ts.c) {
total += ts.c;
}
else {
total++;
}
}
}
data.push([i, total]);
}
return ret;
};
countlyCommon.processPeriod = function(period) {
var date = period.split(".");
var range,
timestart,
timeend;
if (date.length === 1) {
range = "M";
timestart = moment(period, "YYYY").valueOf();
timeend = moment(period, "YYYY").add(moment.duration(1, "y")).valueOf();
}
else if (date.length === 2) {
range = "d";
timestart = moment(period, "YYYY.MM").valueOf();
timeend = moment(period, "YYYY.MM").add(moment.duration(1, "M")).valueOf();
}
else if (date.length === 3) {
range = "h";
timestart = moment(period, "YYYY.MM.DD").valueOf();
timeend = moment(period, "YYYY.MM.DD").add(moment.duration(1, "d")).valueOf();
}
return { timestart: timestart, timeend: timeend, range: range };
};
/**
* Shortens the given number by adding K (thousand) or M (million) postfix. K is added only if the number is bigger than 10000, etc.
* @param {number} number - number to shorten
* @returns {string} shorter representation of number
* @example
* //outputs 10K
* countlyCommon.getShortNumber(10000);
*/
countlyCommon.getShortNumber = function(number) {
var tmpNumber = "";
if (number >= 1000000000 || number <= -1000000000) {
tmpNumber = ((number / 1000000000).toFixed(1).replace(".0", "")) + "B";
}
else if (number >= 1000000 || number <= -1000000) {
tmpNumber = ((number / 1000000).toFixed(1).replace(".0", "")) + "M";
}
else if (number >= 10000 || number <= -10000) {
tmpNumber = ((number / 1000).toFixed(1).replace(".0", "")) + "K";
}
else {
number += "";
tmpNumber = number.replace(".0", "");
}
return tmpNumber;
};
/**
* Getting the date range shown on the dashboard like 1 Aug - 30 Aug, using {@link countlyCommon.periodObj) dateString property which holds the date format.
* @returns {string} string with formatted date range as 1 Aug - 30 Aug
*/
countlyCommon.getDateRange = function() {
countlyCommon.periodObj = getPeriodObj();
var formattedDateStart = "";
var formattedDateEnd = "";
if (!countlyCommon.periodObj.isSpecialPeriod) {
if (countlyCommon.periodObj.dateString === "HH:mm") {
formattedDateStart = moment(countlyCommon.periodObj.activePeriod + " " + countlyCommon.periodObj.periodMin + ":00", "YYYY.M.D HH:mm");
formattedDateEnd = moment(countlyCommon.periodObj.activePeriod + " " + countlyCommon.periodObj.periodMax + ":00", "YYYY.M.D HH:mm");
var nowMin = moment().format("mm");
formattedDateEnd.add(nowMin, "minutes");
}
else if (countlyCommon.periodObj.dateString === "D MMM, HH:mm") {
formattedDateStart = moment(countlyCommon.periodObj.activePeriod, "YYYY.M.D");
formattedDateEnd = moment(countlyCommon.periodObj.activePeriod, "YYYY.M.D").add(23, "hours").add(59, "minutes");
}
else {
formattedDateStart = moment(countlyCommon.periodObj.activePeriod + "." + countlyCommon.periodObj.periodMin, "YYYY.M.D");
formattedDateEnd = moment(countlyCommon.periodObj.activePeriod + "." + countlyCommon.periodObj.periodMax, "YYYY.M.D");
}
}
else {
formattedDateStart = moment(countlyCommon.periodObj.currentPeriodArr[0], "YYYY.M.D");
formattedDateEnd = moment(countlyCommon.periodObj.currentPeriodArr[(countlyCommon.periodObj.currentPeriodArr.length - 1)], "YYYY.M.D");
}
var fromStr = countlyCommon.formatDate(formattedDateStart, countlyCommon.periodObj.dateString),
toStr = countlyCommon.formatDate(formattedDateEnd, countlyCommon.periodObj.dateString);
if (fromStr === toStr) {
return fromStr;
}
else {
return fromStr + " - " + toStr;
}
};
/**
* Merge standard countly metric data object, by mergin updateObj retrieved from action=refresh api requests object into dbObj.
* Used for merging the received data for today to the existing data while updating the dashboard.
* @param {object} dbObj - standard metric data object
* @param {object} updateObj - standard metric data object retrieved from action=refresh request to last time bucket data only
*/
countlyCommon.extendDbObj = function(dbObj, updateObj) {
var now = moment(),
year = now.year(),
month = (now.month() + 1),
day = now.date(),
weekly = Math.ceil(now.format("DDD") / 7),
intRegex = /^\d+$/,
tmpUpdateObj = {},
tmpOldObj = {};
if (updateObj[year] && updateObj[year][month] && updateObj[year][month][day]) {
if (!dbObj[year]) {
dbObj[year] = {};
}
if (!dbObj[year][month]) {
dbObj[year][month] = {};
}
if (!dbObj[year][month][day]) {
dbObj[year][month][day] = {};
}
if (!dbObj[year]["w" + weekly]) {
dbObj[year]["w" + weekly] = {};
}
tmpUpdateObj = updateObj[year][month][day];
tmpOldObj = dbObj[year][month][day];
dbObj[year][month][day] = updateObj[year][month][day];
}
if (updateObj.meta) {
if (!dbObj.meta) {
dbObj.meta = {};
}
dbObj.meta = updateObj.meta;
}
for (var level1 in tmpUpdateObj) {
if (!tmpUpdateObj.hasOwnProperty(level1)) {
continue;
}
if (intRegex.test(level1)) {
continue;
}
if (_.isObject(tmpUpdateObj[level1])) {
if (!dbObj[year][level1]) {
dbObj[year][level1] = {};
}
if (!dbObj[year][month][level1]) {
dbObj[year][month][level1] = {};
}
if (!dbObj[year]["w" + weekly][level1]) {
dbObj[year]["w" + weekly][level1] = {};
}
}
else {
if (dbObj[year][level1]) {
if (tmpOldObj[level1]) {
dbObj[year][level1] += (tmpUpdateObj[level1] - tmpOldObj[level1]);
}
else {
dbObj[year][level1] += tmpUpdateObj[level1];
}
}
else {
dbObj[year][level1] = tmpUpdateObj[level1];
}
if (dbObj[year][month][level1]) {
if (tmpOldObj[level1]) {
dbObj[year][month][level1] += (tmpUpdateObj[level1] - tmpOldObj[level1]);
}
else {
dbObj[year][month][level1] += tmpUpdateObj[level1];
}
}
else {
dbObj[year][month][level1] = tmpUpdateObj[level1];
}
if (dbObj[year]["w" + weekly][level1]) {
if (tmpOldObj[level1]) {
dbObj[year]["w" + weekly][level1] += (tmpUpdateObj[level1] - tmpOldObj[level1]);
}
else {
dbObj[year]["w" + weekly][level1] += tmpUpdateObj[level1];
}
}
else {
dbObj[year]["w" + weekly][level1] = tmpUpdateObj[level1];
}
}
if (tmpUpdateObj[level1]) {
for (var level2 in tmpUpdateObj[level1]) {
if (!tmpUpdateObj[level1].hasOwnProperty(level2)) {
continue;
}
if (dbObj[year][level1][level2]) {
if (tmpOldObj[level1] && tmpOldObj[level1][level2]) {
dbObj[year][level1][level2] += (tmpUpdateObj[level1][level2] - tmpOldObj[level1][level2]);
}
else {
dbObj[year][level1][level2] += tmpUpdateObj[level1][level2];
}
}
else {
dbObj[year][level1][level2] = tmpUpdateObj[level1][level2];
}
if (dbObj[year][month][level1][level2]) {
if (tmpOldObj[level1] && tmpOldObj[level1][level2]) {
dbObj[year][month][level1][level2] += (tmpUpdateObj[level1][level2] - tmpOldObj[level1][level2]);
}
else {
dbObj[year][month][level1][level2] += tmpUpdateObj[level1][level2];
}
}
else {
dbObj[year][month][level1][level2] = tmpUpdateObj[level1][level2];
}
if (dbObj[year]["w" + weekly][level1][level2]) {
if (tmpOldObj[level1] && tmpOldObj[level1][level2]) {
dbObj[year]["w" + weekly][level1][level2] += (tmpUpdateObj[level1][level2] - tmpOldObj[level1][level2]);
}
else {
dbObj[year]["w" + weekly][level1][level2] += tmpUpdateObj[level1][level2];
}
}
else {
dbObj[year]["w" + weekly][level1][level2] = tmpUpdateObj[level1][level2];
}
}
}
}
// Fix update of total user count
if (updateObj[year]) {
if (updateObj[year].u) {
if (!dbObj[year]) {
dbObj[year] = {};
}
dbObj[year].u = updateObj[year].u;
}
if (updateObj[year][month] && updateObj[year][month].u) {
if (!dbObj[year]) {
dbObj[year] = {};
}
if (!dbObj[year][month]) {
dbObj[year][month] = {};
}
dbObj[year][month].u = updateObj[year][month].u;
}
if (updateObj[year]["w" + weekly] && updateObj[year]["w" + weekly].u) {
if (!dbObj[year]) {
dbObj[year] = {};
}
if (!dbObj[year]["w" + weekly]) {
dbObj[year]["w" + weekly] = {};
}
dbObj[year]["w" + weekly].u = updateObj[year]["w" + weekly].u;
}
}
};
/**
* Convert string to first letter uppercase and all other letters - lowercase for each word
* @param {string} str - string to convert
* @returns {string} converted string
* @example
* //outputs Hello World
* countlyCommon.toFirstUpper("hello world");
*/
countlyCommon.toFirstUpper = function(str) {
return str.replace(/\w\S*/g, function(txt) {
return txt.charAt(0).toUpperCase() + txt.substr(1).toLowerCase();
});
};
/**
* Safe division between numbers providing 0 as result in cases when dividing by 0
* @param {number} val1 - number which to divide
* @param {number} val2 - number by which to divide
* @returns {number} result of division
* @example
* //outputs 0
* countlyCommon.divide(100, 0);
*/
countlyCommon.divide = function(val1, val2) {
var temp = val1 / val2;
if (!temp || temp === Number.POSITIVE_INFINITY) {
temp = 0;
}
return temp;
};
/**
* Get Date graph ticks
* @param {string} bucket - time bucket, accepted values, hourly, weekly, monthly
* @param {boolean} overrideBucket - override existing bucket logic and simply use current date for generating ticks
* @returns {object} object containing tick texts and ticks to use on time graphs
* @example <caption>Example output</caption>
*{
* "min":0,
* "max":29,
* "tickTexts":["22 Dec, Thursday","23 Dec, Friday","24 Dec, Saturday","25 Dec, Sunday","26 Dec, Monday","27 Dec, Tuesday","28 Dec, Wednesday",
* "29 Dec, Thursday","30 Dec, Friday","31 Dec, Saturday","1 Jan, Sunday","2 Jan, Monday","3 Jan, Tuesday","4 Jan, Wednesday","5 Jan, Thursday",
* "6 Jan, Friday","7 Jan, Saturday","8 Jan, Sunday","9 Jan, Monday","10 Jan, Tuesday","11 Jan, Wednesday","12 Jan, Thursday","13 Jan, Friday",
* "14 Jan, Saturday","15 Jan, Sunday","16 Jan, Monday","17 Jan, Tuesday","18 Jan, Wednesday","19 Jan, Thursday","20 Jan, Friday"],
* "ticks":[[1,"23 Dec"],[4,"26 Dec"],[7,"29 Dec"],[10,"1 Jan"],[13,"4 Jan"],[16,"7 Jan"],[19,"10 Jan"],[22,"13 Jan"],[25,"16 Jan"],[28,"19 Jan"]]
*}
*/
countlyCommon.getTickObj = function(bucket, overrideBucket) {
var days = parseInt(countlyCommon.periodObj.numberOfDays, 10),
ticks = [],
tickTexts = [],
skipReduction = false,
limitAdjustment = 0;
if (overrideBucket) {
var thisDay = moment(countlyCommon.periodObj.activePeriod, "YYYY.M.D");
ticks.push([0, countlyCommon.formatDate(thisDay, "D MMM")]);
tickTexts[0] = countlyCommon.formatDate(thisDay, "D MMM, dddd");
}
else if ((days === 1 && _period !== "month" && _period !== "day") || (days === 1 && bucket === "hourly")) {
for (var z = 0; z < 24; z++) {
ticks.push([z, (z + ":00")]);
tickTexts.push((z + ":00"));
}
skipReduction = true;
}
else {
var start = moment().subtract(days, 'days');
if (Object.prototype.toString.call(countlyCommon.getPeriod()) === '[object Array]') {
start = moment(countlyCommon.periodObj.currentPeriodArr[countlyCommon.periodObj.currentPeriodArr.length - 1], "YYYY.MM.DD").subtract(days, 'days');
}
var i = 0;
if (bucket === "monthly") {
var allMonths = [];
//so we would not start from previous year
start.add(1, 'day');
for (i = 0; i < 12; i++) {
allMonths.push(start.format("MMM YYYY"));
start.add(1, 'months');
}
allMonths = _.uniq(allMonths);
for (i = 0; i < allMonths.length; i++) {
ticks.push([i, allMonths[i]]);
tickTexts[i] = allMonths[i];
}
}
else if (bucket === "weekly") {
var allWeeks = [];
for (i = 0; i < days; i++) {
start.add(1, 'days');
allWeeks.push(start.isoWeek() + " " + start.isoWeekYear());
}
allWeeks = _.uniq(allWeeks);
for (i = 0; i < allWeeks.length; i++) {
var parts = allWeeks[i].split(" ");
//iso week falls in the year which has thursday of the week
if (parseInt(parts[1]) === moment().isoWeekYear(parseInt(parts[1])).isoWeek(parseInt(parts[0])).isoWeekday(4).year()) {
ticks.push([i, "W" + allWeeks[i]]);
var weekText = countlyCommon.formatDate(moment().isoWeekYear(parseInt(parts[1])).isoWeek(parseInt(parts[0])).isoWeekday(1), ", D MMM YYYY");
tickTexts[i] = "W" + parts[0] + weekText;
}
}
}
else if (bucket === "hourly") {
for (i = 0; i < days; i++) {
start.add(1, 'days');
for (var j = 0; j < 24; j++) {
if (j === 0) {
ticks.push([((24 * i) + j), countlyCommon.formatDate(start, "D MMM") + " 0:00"]);
}
tickTexts.push(countlyCommon.formatDate(start, "D MMM, ") + j + ":00");
}
}
}
else {
if (_period === "day") {
for (i = 0; i < new Date(start.year(), start.month(), 0).getDate(); i++) {
start.add(1, 'days');
ticks.push([i, countlyCommon.formatDate(start, "D MMM")]);
tickTexts[i] = countlyCommon.formatDate(start, "D MMM, dddd");
}
}
else {
var startYear = start.year();
var endYear = moment().year();
for (i = 0; i < days; i++) {
start.add(1, 'days');
if (startYear < endYear) {
ticks.push([i, countlyCommon.formatDate(start, "D MMM YYYY")]);
tickTexts[i] = countlyCommon.formatDate(start, "D MMM YYYY, dddd");
}
else {
ticks.push([i, countlyCommon.formatDate(start, "D MMM")]);
tickTexts[i] = countlyCommon.formatDate(start, "D MMM, dddd");
}
}
}
}
ticks = _.compact(ticks);
tickTexts = _.compact(tickTexts);
}
if (ticks.length <= 2) {
limitAdjustment = 0.02;
var tmpTicks = [],
tmpTickTexts = [];
tmpTickTexts[0] = "";
tmpTicks[0] = [-0.02, ""];
for (var m = 0; m < ticks.length; m++) {
tmpTicks[m + 1] = [m, ticks[m][1]];
tmpTickTexts[m + 1] = tickTexts[m];
}
tmpTickTexts.push("");
tmpTicks.push([tmpTicks.length - 1 - 0.98, ""]);
ticks = tmpTicks;
tickTexts = tmpTickTexts;
}
else if (!skipReduction && ticks.length > 10) {
var reducedTicks = [],
step = (Math.floor(ticks.length / 10) < 1) ? 1 : Math.floor(ticks.length / 10),
pickStartIndex = (Math.floor(ticks.length / 30) < 1) ? 1 : Math.floor(ticks.length / 30);
for (var l = pickStartIndex; l < (ticks.length - 1); l = l + step) {
reducedTicks.push(ticks[l]);
}
ticks = reducedTicks;
}
else {
ticks[0] = null;
// Hourly ticks already contain 23 empty slots at the end
if (!(bucket === "hourly" && days !== 1)) {
ticks[ticks.length - 1] = null;
}
}
return {
min: 0 - limitAdjustment,
max: (limitAdjustment) ? tickTexts.length - 3 + limitAdjustment : tickTexts.length - 1,
tickTexts: tickTexts,
ticks: _.compact(ticks)
};
};
/**
* Joined 2 arrays into one removing all duplicated values
* @param {array} x - first array
* @param {array} y - second array
* @returns {array} new array with only unique values from x and y
* @example
* //outputs [1,2,3]
* countlyCommon.union([1,2],[2,3]);
*/
countlyCommon.union = function(x, y) {
if (!x) {
return y;
}
else if (!y) {
return x;
}
var obj = {};
var i = 0;
for (i = x.length - 1; i >= 0; --i) {
obj[x[i]] = true;
}
for (i = y.length - 1; i >= 0; --i) {
obj[y[i]] = true;
}
var res = [];
for (var k in obj) {
res.push(k);
}
return res;
};
/**
* Formats the number by separating each 3 digits with ,
* @param {number} x - number to format
* @returns {string} formatted number
* @example
* //outputs 1,234,567
* countlyCommon.formatNumber(1234567);
*/
countlyCommon.formatNumber = function(x) {
x = parseFloat(parseFloat(x).toFixed(2));
var parts = x.toString().split(".");
parts[0] = parts[0].replace(/\B(?=(\d{3})+(?!\d))/g, ",");
return parts.join(".");
};
/**
* Pad number with specified character from left to specified length ,
* @param {number} n - number to pad
* @param {number} width - pad to what length in symboles
* @param {string} z - character to pad with, default 0
* @returns {string} padded number
* @example
* //outputs 0012
* countlyCommon.pad(12, 4, "0");
*/
countlyCommon.pad = function(n, width, z) {
z = z || '0';
n = n + '';
return n.length >= width ? n : new Array(width - n.length + 1).join(z) + n;
};
countlyCommon.getNoteDateIds = function(bucket) {
var _periodObj = countlyCommon.periodObj,
dateIds = [],
dotSplit = [],
tmpDateStr = "";
var i = 0;
var j = 0;
if (!_periodObj.isSpecialPeriod && !bucket) {
for (i = _periodObj.periodMin; i < (_periodObj.periodMax + 1); i++) {
dotSplit = (_periodObj.activePeriod + "." + i).split(".");
tmpDateStr = "";
for (j = 0; j < dotSplit.length; j++) {
if (dotSplit[j].length === 1) {
tmpDateStr += "0" + dotSplit[j];
}
else {
tmpDateStr += dotSplit[j];
}
}
dateIds.push(tmpDateStr);
}
}
else {
if (!_periodObj.currentPeriodArr && bucket === "daily") {
var tmpDate = new Date();
_periodObj.currentPeriodArr = [];
if (countlyCommon.getPeriod() === "month") {
for (i = 0; i < (tmpDate.getMonth() + 1); i++) {
var daysInMonth = moment().month(i).daysInMonth();
for (j = 0; j < daysInMonth; j++) {
_periodObj.currentPeriodArr.push(_periodObj.activePeriod + "." + (i + 1) + "." + (j + 1));
// If current day of current month, just break
if ((i === tmpDate.getMonth()) && (j === (tmpDate.getDate() - 1))) {
break;
}
}
}
}
else if (countlyCommon.getPeriod() === "day") {
for (i = 0; i < tmpDate.getDate(); i++) {
_periodObj.currentPeriodArr.push(_periodObj.activePeriod + "." + (i + 1));
}
}
else {
_periodObj.currentPeriodArr.push(_periodObj.activePeriod);
}
}
for (i = 0; i < (_periodObj.currentPeriodArr.length); i++) {
dotSplit = _periodObj.currentPeriodArr[i].split(".");
tmpDateStr = "";
for (j = 0; j < dotSplit.length; j++) {
if (dotSplit[j].length === 1) {
tmpDateStr += "0" + dotSplit[j];
}
else {
tmpDateStr += dotSplit[j];
}
}
dateIds.push(tmpDateStr);
}
}
var tmpDateIds = [];
switch (bucket) {
case "hourly":
for (i = 0; i < 25; i++) {
tmpDateIds.push(dateIds[0] + ((i < 10) ? "0" + i : i));
}
dateIds = tmpDateIds;
break;
case "monthly":
for (i = 0; i < dateIds.length; i++) {
countlyCommon.arrayAddUniq(tmpDateIds, moment(dateIds[i], "YYYYMMDD").format("YYYYMM"));
}
dateIds = tmpDateIds;
break;
}
return dateIds;
};
countlyCommon.getNotesForDateId = function(dateId) {
var ret = [];
if (countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID] && countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes) {
for (var date in countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes) {
if (date.indexOf(dateId) === 0) {
ret = ret.concat(countlyGlobal.apps[countlyCommon.ACTIVE_APP_ID].notes[date]);
}
}
}
return ret.join("<br/>");
};
/**
* Add item or array to existing array only if values are not already in original array. given array is modified.
* @param {array} arr - original array where to add unique elements
* @param {string|number|array} item - item to add or array to merge
*/
countlyCommon.arrayAddUniq = function(arr, item) {
if (!arr) {
arr = [];
}
if (toString.call(item) === "[object Array]") {
for (var i = 0; i < item.length; i++) {
if (arr.indexOf(item[i]) === -1) {
arr[arr.length] = item[i];
}
}
}
else {
if (arr.indexOf(item) === -1) {
arr[arr.length] = item;
}
}
};
/**
* Format timestamp to twitter like time ago format with real date as tooltip and hidden data for exporting
* @param {number} timestamp - timestamp in seconds or miliseconds
* @returns {string} formated time ago
* @example
* //outputs <span title="Tue, 17 Jan 2017 13:54:26">3 days ago<a style="display: none;">|Tue, 17 Jan 2017 13:54:26</a></span>
* countlyCommon.formatTimeAgo(1484654066);
*/
countlyCommon.formatTimeAgo = function(timestamp) {
if (Math.round(timestamp).toString().length === 10) {
timestamp *= 1000;
}
var target = new Date(timestamp);
var tooltip = moment(target).format("ddd, D MMM YYYY HH:mm:ss");
var elem = $("<span>");
elem.prop("title", tooltip);
var now = new Date();
var diff = Math.floor((now - target) / 1000);
if (diff <= -2592000) {
elem.text(tooltip);
}
else if (diff < -86400) {
elem.text(jQuery.i18n.prop("common.in.days", Math.abs(Math.round(diff / 86400))));
}
else if (diff < -3600) {
elem.text(jQuery.i18n.prop("common.in.hours", Math.abs(Math.round(diff / 3600))));
}
else if (diff < -60) {
elem.text(jQuery.i18n.prop("common.in.minutes", Math.abs(Math.round(diff / 60))));
}
else if (diff <= -1) {
elem.css("color", "#50C354"); elem.text(jQuery.i18n.prop("common.in.seconds", Math.abs(diff)));
}
else if (diff <= 1) {
elem.css("color", "#50C354"); elem.text(jQuery.i18n.map["common.ago.just-now"]);
}
else if (diff < 20) {
elem.css("color", "#50C354"); elem.text(jQuery.i18n.prop("common.ago.seconds-ago", diff));
}
else if (diff < 40) {
elem.css("color", "#50C354"); elem.text(jQuery.i18n.map["common.ago.half-minute"]);
}
else if (diff < 60) {
elem.css("color", "#50C354"); elem.text(jQuery.i18n.map["common.ago.less-minute"]);
}
else if (diff <= 90) {
elem.text(jQuery.i18n.map["common.ago.one-minute"]);
}
else if (diff <= 3540) {
elem.text(jQuery.i18n.prop("common.ago.minutes-ago", Math.round(diff / 60)));
}
else if (diff <= 5400) {
elem.text(jQuery.i18n.map["common.ago.one-hour"]);
}
else if (diff <= 86400) {
elem.text(jQuery.i18n.prop("common.ago.hours-ago", Math.round(diff / 3600)));
}
else if (diff <= 129600) {
elem.text(jQuery.i18n.map["common.ago.one-day"]);
}
else if (diff < 604800) {
elem.text(jQuery.i18n.prop("common.ago.days-ago", Math.round(diff / 86400)));
}
else if (diff <= 777600) {
elem.text(jQuery.i18n.map["common.ago.one-week"]);
}
else if (diff <= 2592000) {
elem.text(jQuery.i18n.prop("common.ago.days-ago", Math.round(diff / 86400)));
}
else {
elem.text(tooltip);
}
elem.append("<a style='display: none;'>|" + tooltip + "</a>");
return elem.prop('outerHTML');
};
/**
* Format duration to units of how much time have passed
* @param {number} timestamp - amount in seconds passed since some reference point
* @returns {string} formated time with how much units passed
* @example
* //outputs 47 year(s) 28 day(s) 11:54:26
* countlyCommon.formatTime(1484654066);
*/
countlyCommon.formatTime = function(timestamp) {
var str = "";
var seconds = timestamp % 60;
str = str + leadingZero(seconds);
timestamp -= seconds;
var minutes = timestamp % (60 * 60);
str = leadingZero(minutes / 60) + ":" + str;
timestamp -= minutes;
var hours = timestamp % (60 * 60 * 24);
str = leadingZero(hours / (60 * 60)) + ":" + str;
timestamp -= hours;
if (timestamp > 0) {
var days = timestamp % (60 * 60 * 24 * 365);
str = (days / (60 * 60 * 24)) + " day(s) " + str;
timestamp -= days;
if (timestamp > 0) {
str = (timestamp / (60 * 60 * 24 * 365)) + " year(s) " + str;
}
}
return str;
};
/**
* Format duration into highest unit of how much time have passed. Used in big numbers
* @param {number} timespent - amount in seconds passed since some reference point
* @returns {string} formated time with how much highest units passed
* @example
* //outputs 2824.7 yrs
* countlyCommon.timeString(1484654066);
*/
countlyCommon.timeString = function(timespent) {
var timeSpentString = (timespent.toFixed(1)) + " " + jQuery.i18n.map["common.minute.abrv"];
if (timespent >= 142560) {
timeSpentString = (timespent / 525600).toFixed(1) + " " + jQuery.i18n.map["common.year.abrv"];
}
else if (timespent >= 1440) {
timeSpentString = (timespent / 1440).toFixed(1) + " " + jQuery.i18n.map["common.day.abrv"];
}
else if (timespent >= 60) {
timeSpentString = (timespent / 60).toFixed(1) + " " + jQuery.i18n.map["common.hour.abrv"];
}
return timeSpentString;
/*var timeSpentString = "";
if(timespent > 1){
timeSpentString = Math.floor(timespent) + " " + jQuery.i18n.map["common.minute.abrv"]+" ";
var left = Math.floor((timespent - Math.floor(timespent))*60);
if(left > 0)
timeSpentString += left + " s";
}
else
timeSpentString += Math.floor((timespent - Math.floor(timespent))*60) + " s";
if (timespent >= 142560) {
timeSpentString = Math.floor(timespent / 525600) + " " + jQuery.i18n.map["common.year.abrv"];
var left = Math.floor((timespent - Math.floor(timespent / 525600)*525600)/1440);
if(left > 0)
timeSpentString += " "+left + " " + jQuery.i18n.map["common.day.abrv"];
} else if (timespent >= 1440) {
timeSpentString = Math.floor(timespent / 1440) + " " + jQuery.i18n.map["common.day.abrv"];
var left = Math.floor((timespent - Math.floor(timespent / 1440)*1440)/60);
if(left > 0)
timeSpentString += " "+left + " " + jQuery.i18n.map["common.hour.abrv"];
} else if (timespent >= 60) {
timeSpentString = Math.floor(timespent / 60) + " " + jQuery.i18n.map["common.hour.abrv"];
var left = Math.floor(timespent - Math.floor(timespent / 60)*60)
if(left > 0)
timeSpentString += " "+left + " " + jQuery.i18n.map["common.minute.abrv"];
}
return timeSpentString;*/
};
/**
* Get date from seconds timestamp
* @param {number} timestamp - timestamp in seconds or miliseconds
* @returns {string} formated date
* @example
* //outputs 17.01.2017
* countlyCommon.getDate(1484654066);
*/
countlyCommon.getDate = function(timestamp) {
if (Math.round(timestamp).toString().length === 10) {
timestamp *= 1000;
}
var d = new Date(timestamp);
return moment(d).format("ddd, D MMM YYYY");
//return leadingZero(d.getDate()) + "." + leadingZero(d.getMonth() + 1) + "." + d.getFullYear();
};
/**
* Get time from seconds timestamp
* @param {number} timestamp - timestamp in seconds or miliseconds
* @returns {string} formated time
* @example
* //outputs 13:54
* countlyCommon.getTime(1484654066);
*/
countlyCommon.getTime = function(timestamp) {
if (Math.round(timestamp).toString().length === 10) {
timestamp *= 1000;
}
var d = new Date(timestamp);
return leadingZero(d.getHours()) + ":" + leadingZero(d.getMinutes());
};
/**
* Round to provided number of digits
* @param {number} num - number to round
* @param {number} digits - amount of digits to round to
* @returns {number} rounded number
* @example
* //outputs 1.235
* countlyCommon.round(1.2345, 3);
*/
countlyCommon.round = function(num, digits) {
digits = Math.pow(10, digits || 0);
return Math.round(num * digits) / digits;
};
/**
* Get calculated totals for each property, usualy used as main dashboard data timeline data without metric segments
* @param {object} data - countly metric model data
* @param {array} properties - array of all properties to extract
* @param {array} unique - array of all properties that are unique from properties array. We need to apply estimation to them
* @param {object} estOverrideMetric - using unique property as key and total_users estimation property as value for all unique metrics that we want to have total user estimation overridden
* @param {function} clearObject - function to prefill all expected properties as u, t, n, etc with 0, so you would not have null in the result which won't work when drawing graphs
* @param {string=} segment - segment value for which to fetch metric data
* @returns {object} dashboard data object
* @example
* countlyCommon.getDashboardData(countlySession.getDb(), ["t", "n", "u", "d", "e", "p", "m"], ["u", "p", "m"], {u:"users"}, countlySession.clearObject);
* //outputs
* {
* "t":{"total":980,"prev-total":332,"change":"195.2%","trend":"u"},
* "n":{"total":402,"prev-total":255,"change":"57.6%","trend":"u"},
* "u":{"total":423,"prev-total":255,"change":"75.7%","trend":"u","isEstimate":false},
* "d":{"total":0,"prev-total":0,"change":"NA","trend":"u"},
* "e":{"total":980,"prev-total":332,"change":"195.2%","trend":"u"},
* "p":{"total":103,"prev-total":29,"change":"255.2%","trend":"u","isEstimate":true},
* "m":{"total":86,"prev-total":0,"change":"NA","trend":"u","isEstimate":true}
* }
*/
countlyCommon.getDashboardData = function(data, properties, unique, estOverrideMetric, clearObject, segment) {
if (segment) {
segment = "." + segment;
}
else {
segment = "";
}
var _periodObj = countlyCommon.periodObj,
dataArr = {},
tmp_x,
tmp_y,
tmpUniqObj,
tmpPrevUniqObj,
current = {},
previous = {},
currentCheck = {},
previousCheck = {},
change = {},
isEstimate = false;
var i = 0;
var j = 0;
for (i = 0; i < properties.length; i++) {
current[properties[i]] = 0;
previous[properties[i]] = 0;
currentCheck[properties[i]] = 0;
previousCheck[properties[i]] = 0;
}
if (_periodObj.isSpecialPeriod) {
isEstimate = true;
for (j = 0; j < (_periodObj.currentPeriodArr.length); j++) {
tmp_x = countlyCommon.getDescendantProp(data, _periodObj.currentPeriodArr[j] + segment);
tmp_x = clearObject(tmp_x);
for (i = 0; i < properties.length; i++) {
if (unique.indexOf(properties[i]) === -1) {
current[properties[i]] += tmp_x[properties[i]];
}
}
}
for (j = 0; j < (_periodObj.previousPeriodArr.length); j++) {
tmp_y = countlyCommon.getDescendantProp(data, _periodObj.previousPeriodArr[j] + segment);
tmp_y = clearObject(tmp_y);
for (i = 0; i < properties.length; i++) {
if (unique.indexOf(properties[i]) === -1) {
previous[properties[i]] += tmp_y[properties[i]];
}
}
}
//deal with unique values separately
for (j = 0; j < (_periodObj.uniquePeriodArr.length); j++) {
tmp_x = countlyCommon.getDescendantProp(data, _periodObj.uniquePeriodArr[j] + segment);
tmp_x = clearObject(tmp_x);
for (i = 0; i < unique.length; i++) {
current[unique[i]] += tmp_x[unique[i]];
}
}
for (j = 0; j < (_periodObj.previousUniquePeriodArr.length); j++) {
tmp_y = countlyCommon.getDescendantProp(data, _periodObj.previousUniquePeriodArr[j] + segment);
tmp_y = clearObject(tmp_y);
for (i = 0; i < unique.length; i++) {
previous[unique[i]] += tmp_y[unique[i]];
}
}
//recheck unique values with larger buckets
for (j = 0; j < (_periodObj.uniquePeriodCheckArr.length); j++) {
tmpUniqObj = countlyCommon.getDescendantProp(data, _periodObj.uniquePeriodCheckArr[j] + segment);
tmpUniqObj = clearObject(tmpUniqObj);
for (i = 0; i < unique.length; i++) {
currentCheck[unique[i]] += tmpUniqObj[unique[i]];
}
}
for (j = 0; j < (_periodObj.previousUniquePeriodArr.length); j++) {
tmpPrevUniqObj = countlyCommon.getDescendantProp(data, _periodObj.previousUniquePeriodArr[j] + segment);
tmpPrevUniqObj = clearObject(tmpPrevUniqObj);
for (i = 0; i < unique.length; i++) {
previousCheck[unique[i]] += tmpPrevUniqObj[unique[i]];
}
}
//check if we should overwrite uniques
for (i = 0; i < unique.length; i++) {
if (current[unique[i]] > currentCheck[unique[i]]) {
current[unique[i]] = currentCheck[unique[i]];
}
if (previous[unique[i]] > previousCheck[unique[i]]) {
previous[unique[i]] = previousCheck[unique[i]];
}
}
}
else {
tmp_x = countlyCommon.getDescendantProp(data, _periodObj.activePeriod + segment);
tmp_y = countlyCommon.getDescendantProp(data, _periodObj.previousPeriod + segment);
tmp_x = clearObject(tmp_x);
tmp_y = clearObject(tmp_y);
for (i = 0; i < properties.length; i++) {
current[properties[i]] = tmp_x[properties[i]];
previous[properties[i]] = tmp_y[properties[i]];
}
}
//check if we can correct data using total users correction
if (estOverrideMetric && countlyTotalUsers.isUsable()) {
for (i = 0; i < unique.length; i++) {
if (estOverrideMetric[unique[i]] && countlyTotalUsers.get(estOverrideMetric[unique[i]]).users) {
current[unique[i]] = countlyTotalUsers.get(estOverrideMetric[unique[i]]).users;
}
if (estOverrideMetric[unique[i]] && countlyTotalUsers.get(estOverrideMetric[unique[i]], true).users) {
previous[unique[i]] = countlyTotalUsers.get(estOverrideMetric[unique[i]], true).users;
}
}
}
// Total users can't be less than new users
if (typeof current.u !== "undefined" && typeof current.n !== "undefined" && current.u < current.n) {
if (estOverrideMetric && countlyTotalUsers.isUsable() && estOverrideMetric.u && countlyTotalUsers.get(estOverrideMetric.u).users) {
current.n = current.u;
}
else {
current.u = current.n;
}
}
// Total users can't be more than total sessions
if (typeof current.u !== "undefined" && typeof current.t !== "undefined" && current.u > current.t) {
current.u = current.t;
}
for (i = 0; i < properties.length; i++) {
change[properties[i]] = countlyCommon.getPercentChange(previous[properties[i]], current[properties[i]]);
dataArr[properties[i]] = {
"total": current[properties[i]],
"prev-total": previous[properties[i]],
"change": change[properties[i]].percent,
"trend": change[properties[i]].trend
};
if (unique.indexOf(properties[i]) !== -1) {
dataArr[properties[i]].isEstimate = isEstimate;
}
}
//check if we can correct data using total users correction
if (estOverrideMetric && countlyTotalUsers.isUsable()) {
for (i = 0; i < unique.length; i++) {
if (estOverrideMetric[unique[i]] && countlyTotalUsers.get(estOverrideMetric[unique[i]]).users) {
dataArr[unique[i]].isEstimate = false;
}
}
}
return dataArr;
};
/**
* Get total data for period's each time bucket as comma separated string to generate sparkle/small bar lines
* @param {object} data - countly metric model data
* @param {object} props - object where key is output property name and value could be string as key from data object or function to create new value based on existing ones
* @param {function} clearObject - function to prefill all expected properties as u, t, n, etc with 0, so you would not have null in the result which won't work when drawing graphs
* @returns {object} object with sparkleline data for each property
* @example
* var sparkLines = countlyCommon.getSparklineData(countlySession.getDb(), {
* "total-sessions": "t",
* "new-users": "n",
* "total-users": "u",
* "total-duration": "d",
* "events": "e",
* "returning-users": function(tmp_x){return Math.max(tmp_x["u"] - tmp_x["n"], 0);},
* "avg-duration-per-session": function(tmp_x){return (tmp_x["t"] == 0) ? 0 : (tmp_x["d"] / tmp_x["t"]);},
* "avg-events": function(tmp_x){return (tmp_x["u"] == 0) ? 0 : (tmp_x["e"] / tmp_x["u"]);}
* }, countlySession.clearObject);
* //outputs
* {
* "total-sessions":"73,84,80,72,61,18,11,7,17,27,66,39,41,36,39,36,6,11,6,16,22,30,33,34,32,41,29,9,2,2",
* "new-users":"24,30,25,20,16,18,11,7,17,18,20,18,17,11,15,15,6,11,6,16,13,14,12,10,7,4,8,9,2,2",
* "total-users":"45,54,50,44,37,18,11,7,17,27,36,39,41,36,39,36,6,11,6,16,22,30,33,34,32,29,29,9,2,2",
* "total-duration":"0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0",
* "events":"73,84,80,72,61,18,11,7,17,27,66,39,41,36,39,36,6,11,6,16,22,30,33,34,32,41,29,9,2,2",
* "returning-users":"21,24,25,24,21,0,0,0,0,9,16,21,24,25,24,21,0,0,0,0,9,16,21,24,25,25,21,0,0,0",
* "avg-duration-per-session":"0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0",
* "avg-events":"1.6222222222222222,1.5555555555555556,1.6,1.6363636363636365,1.6486486486486487,1,1,1,1,1,1.8333333333333333,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1.4137931034482758,1,1,1,1"
* }
*/
countlyCommon.getSparklineData = function(data, props, clearObject) {
var _periodObj = countlyCommon.periodObj;
var sparkLines = {};
for (var pp in props) {
sparkLines[pp] = [];
}
var tmp_x = "";
var i = 0;
var p = 0;
if (!_periodObj.isSpecialPeriod) {
for (i = _periodObj.periodMin; i < (_periodObj.periodMax + 1); i++) {
tmp_x = countlyCommon.getDescendantProp(data, _periodObj.activePeriod + "." + i);
tmp_x = clearObject(tmp_x);
for (p in props) {
if (typeof props[p] === "string") {
sparkLines[p].push(tmp_x[props[p]]);
}
else if (typeof props[p] === "function") {
sparkLines[p].push(props[p](tmp_x));
}
}
}
}
else {
for (i = 0; i < (_periodObj.currentPeriodArr.length); i++) {
tmp_x = countlyCommon.getDescendantProp(data, _periodObj.currentPeriodArr[i]);
tmp_x = clearObject(tmp_x);
for (p in props) {
if (typeof props[p] === "string") {
sparkLines[p].push(tmp_x[props[p]]);
}
else if (typeof props[p] === "function") {
sparkLines[p].push(props[p](tmp_x));
}
}
}
}
for (var key in sparkLines) {
sparkLines[key] = sparkLines[key].join(",");
}
return sparkLines;
};
/**
* Format date based on some locale settings
* @param {moment} date - moment js object
* @param {string} format - format string to use
* @returns {string} date in formatted string
* @example
* //outputs Jan 20
* countlyCommon.formatDate(moment(), "MMM D");
*/
countlyCommon.formatDate = function(date, format) {
format = countlyCommon.getDateFormat(format);
return date.format(format);
};
countlyCommon.getDateFormat = function(format) {
if (countlyCommon.BROWSER_LANG_SHORT.toLowerCase() === "ko") {
format = format.replace("MMM D", "MMM D[일]").replace("D MMM", "MMM D[일]");
}
else if (countlyCommon.BROWSER_LANG_SHORT.toLowerCase() === "ja") {
format = format.replace("MMM D", "MMM D[日]").replace("D MMM", "MMM D[日]");
}
else if (countlyCommon.BROWSER_LANG_SHORT.toLowerCase() === "zh") {
format = format.replace("MMMM", "M").replace("MMM", "M").replace("MM", "M").replace("DD", "D").replace("D M, YYYY", "YYYY M D").replace("D M", "M D").replace("D", "D[日]").replace("M", "M[月]").replace("YYYY", "YYYY[年]");
}
return format;
};
countlyCommon.showTooltip = function(args) {
showTooltip(args);
};
/**
* gets days in current year
* @returns {number} days
*/
function getDOY() {
var onejan = new Date((new Date()).getFullYear(), 0, 1);
return Math.ceil(((new Date()) - onejan) / 86400000);
}
/**
* Getter for period object
* @returns {object} returns {@link countlyCommon.periodObj}
*/
countlyCommon.getPeriodObj = function() {
return countlyCommon.periodObj;
};
/**
* Getter for period object by providing period string value
* @param {object} period - given period
* @param {number} currentTimeStamp timestamp
* @returns {object} returns {@link countlyCommon.periodObj}
*/
countlyCommon.calcSpecificPeriodObj = function(period, currentTimeStamp) {
return calculatePeriodObj(period, currentTimeStamp);
};
/**
* Calculate period function
* @param {object} period - given period
* @param {number} currentTimeStamp timestamp
* @returns {object} returns {@link countlyCommon.periodObj}
*/
function calculatePeriodObj(period, currentTimeStamp) {
var now = currentTimeStamp ? moment(currentTimeStamp) : moment(currentTimeStamp || undefined);
// var _period = _period ? _period : '30days';
// _period = period ? period : _period;
period = period ? period : _period;
var year = now.year(),
month = (now.month() + 1),
day = now.date(),
activePeriod,
previousPeriod,
periodMax,
periodMin,
periodObj = {},
isSpecialPeriod = false,
daysInPeriod = 0,
numberOfDays = 0,
rangeEndDay = null,
dateString,
periodContainsToday = true;
var previousDate,
previousYear,
previousMonth,
previousDay;
switch (period) {
case "month":
activePeriod = year;
previousPeriod = year - 1;
periodMax = 12;
periodMin = 1;
dateString = "MMM";
numberOfDays = getDOY();
break;
case "day":
activePeriod = year + "." + month;
previousDate = moment(currentTimeStamp || undefined).subtract(day, 'days');
previousYear = previousDate.year();
previousMonth = (previousDate.month() + 1);
previousDay = previousDate.date();
previousPeriod = previousYear + "." + previousMonth;
periodMax = new Date(year, month, 0).getDate();
periodMin = 1;
dateString = "D MMM";
numberOfDays = moment(currentTimeStamp || undefined).format("D");
daysInPeriod = numberOfDays;
break;
case "yesterday":
var yesterday = moment(currentTimeStamp || undefined).subtract(1, 'days'),
year_y = yesterday.year(),
month_y = (yesterday.month() + 1),
day_y = yesterday.date();
activePeriod = year_y + "." + month_y + "." + day_y;
previousDate = moment(currentTimeStamp || undefined).subtract(2, 'days');
previousYear = previousDate.year();
previousMonth = (previousDate.month() + 1);
previousDay = previousDate.date();
previousPeriod = previousYear + "." + previousMonth + "." + previousDay;
periodMax = 23;
periodMin = 0;
dateString = "D MMM, HH:mm";
numberOfDays = 1;
periodContainsToday = false;
break;
case "hour":
activePeriod = year + "." + month + "." + day;
previousDate = moment(currentTimeStamp || undefined).subtract(1, 'days');
previousYear = previousDate.year();
previousMonth = (previousDate.month() + 1);
previousDay = previousDate.date();
previousPeriod = previousYear + "." + previousMonth + "." + previousDay;
periodMax = 23;
periodMin = 0;
dateString = "HH:mm";
numberOfDays = 1;
break;
default:
if (/([0-9]+)days/.test(period)) {
var match = /([0-9]+)days/.exec(period);
if (match[1]) {
numberOfDays = daysInPeriod = parseInt(match[1]);
isSpecialPeriod = true;
}
}
break;
}
// Check whether period object is array
if (Object.prototype.toString.call(period) === '[object Array]' && period.length === 2) {
// "Date to" selected date timezone changes based on how the
// date picker is initialised so we take care of it here
var tmpDate = new Date(period[1]);
tmpDate.setHours(0, 0, 0, 0);
period[1] = tmpDate.getTime();
period[1] -= countlyCommon.getOffsetCorrectionForTimestamp(period[1]);
// One day is selected from the datepicker
if (period[0] === period[1]) {
var selectedDate = moment(period[0]),
selectedYear = selectedDate.year(),
selectedMonth = (selectedDate.month() + 1),
selectedDay = selectedDate.date();
activePeriod = selectedYear + "." + selectedMonth + "." + selectedDay;
previousDate = selectedDate.subtract(1, 'days'),
previousYear = previousDate.year(),
previousMonth = (previousDate.month() + 1),
previousDay = previousDate.date();
previousPeriod = previousYear + "." + previousMonth + "." + previousDay;
periodMax = 23;
periodMin = 0;
dateString = "D MMM, HH:mm";
numberOfDays = 1;
periodContainsToday = (moment(period[0]).format("YYYYMMDD") === now.format("YYYYMMDD"));
}
else {
var a = moment(period[0]),
b = moment(period[1]);
numberOfDays = daysInPeriod = b.diff(a, 'days') + 1;
rangeEndDay = period[1];
periodContainsToday = (b.format("YYYYMMDD") === now.format("YYYYMMDD"));
isSpecialPeriod = true;
}
}
if (daysInPeriod !== 0) {
var yearChanged = false,
currentYear = 0,
currWeeksArr = [],
currWeekCounts = {},
currMonthsArr = [],
currMonthCounts = {},
currPeriodArr = [],
prevWeeksArr = [],
prevWeekCounts = {},
prevMonthsArr = [],
prevMonthCounts = {},
prevPeriodArr = [];
for (var i = (daysInPeriod - 1); i > -1; i--) {
var currIndex = (!rangeEndDay) ? moment(currentTimeStamp || undefined).subtract(i, 'days') : moment(rangeEndDay).subtract(i, 'days'),
currIndexYear = currIndex.year(),
prevIndex = (!rangeEndDay) ? moment(currentTimeStamp || undefined).subtract((daysInPeriod + i), 'days') : moment(rangeEndDay).subtract((daysInPeriod + i), 'days'),
prevYear = prevIndex.year();
if (i !== (daysInPeriod - 1) && currentYear !== currIndexYear) {
yearChanged = true;
}
currentYear = currIndexYear;
// Current period variables
var currWeek = currentYear + "." + "w" + Math.ceil(currIndex.format("DDD") / 7);
currWeeksArr[currWeeksArr.length] = currWeek;
currWeekCounts[currWeek] = (currWeekCounts[currWeek]) ? (currWeekCounts[currWeek] + 1) : 1;
var currMonth = currIndex.format("YYYY.M");
currMonthsArr[currMonthsArr.length] = currMonth;
currMonthCounts[currMonth] = (currMonthCounts[currMonth]) ? (currMonthCounts[currMonth] + 1) : 1;
currPeriodArr[currPeriodArr.length] = currIndex.format("YYYY.M.D");
// Previous period variables
var prevWeek = prevYear + "." + "w" + Math.ceil(prevIndex.format("DDD") / 7);
prevWeeksArr[prevWeeksArr.length] = prevWeek;
prevWeekCounts[prevWeek] = (prevWeekCounts[prevWeek]) ? (prevWeekCounts[prevWeek] + 1) : 1;
var prevMonth = prevIndex.format("YYYY.M");
prevMonthsArr[prevMonthsArr.length] = prevMonth;
prevMonthCounts[prevMonth] = (prevMonthCounts[prevMonth]) ? (prevMonthCounts[prevMonth] + 1) : 1;
prevPeriodArr[prevPeriodArr.length] = prevIndex.format("YYYY.M.D");
}
dateString = (yearChanged) ? "D MMM, YYYY" : "D MMM";
}
periodObj = {
"activePeriod": activePeriod,
"periodMax": periodMax,
"periodMin": periodMin,
"previousPeriod": previousPeriod,
"currentPeriodArr": currPeriodArr,
"previousPeriodArr": prevPeriodArr,
"isSpecialPeriod": isSpecialPeriod,
"dateString": dateString,
"daysInPeriod": daysInPeriod,
"numberOfDays": numberOfDays,
"uniquePeriodArr": getUniqArray(currWeeksArr, currWeekCounts, currMonthsArr, currMonthCounts, currPeriodArr),
"uniquePeriodCheckArr": getUniqCheckArray(currWeeksArr, currWeekCounts, currMonthsArr, currMonthCounts),
"previousUniquePeriodArr": getUniqArray(prevWeeksArr, prevWeekCounts, prevMonthsArr, prevMonthCounts, prevPeriodArr),
"previousUniquePeriodCheckArr": getUniqCheckArray(prevWeeksArr, prevWeekCounts, prevMonthsArr, prevMonthCounts),
"periodContainsToday": periodContainsToday
};
return periodObj;
}
var getPeriodObj = countlyCommon.getPeriodObj;
/** returns unique period check array
* @param {array} weeksArray_pd - weeks array
* @param {array} weekCounts_pd - week counts
* @param {array} monthsArray_pd - months array
* @param {array} monthCounts_pd - months counts
* @param {array} periodArr_pd - period array
* @returns {array} periods
*/
function getUniqArray(weeksArray_pd, weekCounts_pd, monthsArray_pd, monthCounts_pd, periodArr_pd) {
if (_period === "month" || _period === "day" || _period === "yesterday" || _period === "hour") {
return [];
}
if (Object.prototype.toString.call(_period) === '[object Array]' && _period.length === 2) {
if (_period[0] === _period[1]) {
return [];
}
}
var weeksArray = clone(weeksArray_pd),
weekCounts = clone(weekCounts_pd),
monthsArray = clone(monthsArray_pd),
monthCounts = clone(monthCounts_pd),
periodArr = clone(periodArr_pd);
var uniquePeriods = [],
tmpDaysInMonth = -1,
tmpPrevKey = -1,
rejectedWeeks = [],
rejectedWeekDayCounts = {};
var key = 0;
var i = 0;
for (key in weekCounts) {
// If this is the current week we can use it
if (key === moment().format("YYYY.\\w w").replace(" ", "")) {
continue;
}
if (weekCounts[key] < 7) {
for (i = 0; i < weeksArray.length; i++) {
weeksArray[i] = weeksArray[i].replace(key, 0);
}
}
}
for (key in monthCounts) {
if (tmpPrevKey !== key) {
if (moment().format("YYYY.M") === key) {
tmpDaysInMonth = moment().format("D");
}
else {
tmpDaysInMonth = moment(key, "YYYY.M").daysInMonth();
}
tmpPrevKey = key;
}
if (monthCounts[key] < tmpDaysInMonth) {
for (i = 0; i < monthsArray.length; i++) {
monthsArray[i] = monthsArray[i].replace(key, 0);
}
}
}
for (i = 0; i < monthsArray.length; i++) {
if (parseInt(monthsArray[i]) === 0) {
if (parseInt(weeksArray[i]) === 0 || (rejectedWeeks.indexOf(weeksArray[i]) !== -1)) {
uniquePeriods[i] = periodArr[i];
}
else {
uniquePeriods[i] = weeksArray[i];
}
}
else {
rejectedWeeks[rejectedWeeks.length] = weeksArray[i];
uniquePeriods[i] = monthsArray[i];
if (rejectedWeekDayCounts[weeksArray[i]]) {
rejectedWeekDayCounts[weeksArray[i]].count++;
}
else {
rejectedWeekDayCounts[weeksArray[i]] = {
count: 1,
index: i
};
}
}
}
var totalWeekCounts = _.countBy(weeksArray, function(per) {
return per;
});
for (var weekDayCount in rejectedWeekDayCounts) {
// If the whole week is rejected continue
if (rejectedWeekDayCounts[weekDayCount].count === 7) {
continue;
}
// If its the current week continue
if (moment().format("YYYY.\\w w").replace(" ", "") === weekDayCount && totalWeekCounts[weekDayCount] === rejectedWeekDayCounts[weekDayCount].count) {
continue;
}
// If only some part of the week is rejected we should add back daily buckets
var startIndex = rejectedWeekDayCounts[weekDayCount].index - (totalWeekCounts[weekDayCount] - rejectedWeekDayCounts[weekDayCount].count),
limit = startIndex + (totalWeekCounts[weekDayCount] - rejectedWeekDayCounts[weekDayCount].count);
for (i = startIndex; i < limit; i++) {
// If there isn't already a monthly bucket for that day
if (parseInt(monthsArray[i]) === 0) {
uniquePeriods[i] = periodArr[i];
}
}
}
rejectedWeeks = _.uniq(rejectedWeeks);
uniquePeriods = _.uniq(_.difference(uniquePeriods, rejectedWeeks));
return uniquePeriods;
}
/** returns unique period check array
* @param {array} weeksArray_pd - weeks array
* @param {array} weekCounts_pd - week counts
* @param {array} monthsArray_pd - months array
* @param {array} monthCounts_pd - months counts
* @returns {array} periods
*/
function getUniqCheckArray(weeksArray_pd, weekCounts_pd, monthsArray_pd, monthCounts_pd) {
if (_period === "month" || _period === "day" || _period === "yesterday" || _period === "hour") {
return [];
}
if (Object.prototype.toString.call(_period) === '[object Array]' && _period.length === 2) {
if (_period[0] === _period[1]) {
return [];
}
}
var weeksArray = clone(weeksArray_pd),
weekCounts = clone(weekCounts_pd),
monthsArray = clone(monthsArray_pd),
monthCounts = clone(monthCounts_pd);
var uniquePeriods = [],
tmpDaysInMonth = -1,
tmpPrevKey = -1;
var key = 0;
var i = 0;
for (key in weekCounts) {
if (key === moment().format("YYYY.\\w w").replace(" ", "")) {
continue;
}
if (weekCounts[key] < 1) {
for (i = 0; i < weeksArray.length; i++) {
weeksArray[i] = weeksArray[i].replace(key, 0);
}
}
}
for (key in monthCounts) {
if (tmpPrevKey !== key) {
if (moment().format("YYYY.M") === key) {
tmpDaysInMonth = moment().format("D");
}
else {
tmpDaysInMonth = moment(key, "YYYY.M").daysInMonth();
}
tmpPrevKey = key;
}
if (monthCounts[key] < (tmpDaysInMonth * 0.5)) {
for (i = 0; i < monthsArray.length; i++) {
monthsArray[i] = monthsArray[i].replace(key, 0);
}
}
}
for (i = 0; i < monthsArray.length; i++) {
if (parseInt(monthsArray[i]) === 0) {
if (parseInt(weeksArray[i]) !== 0) {
uniquePeriods[i] = weeksArray[i];
}
}
else {
uniquePeriods[i] = monthsArray[i];
}
}
uniquePeriods = _.uniq(uniquePeriods);
return uniquePeriods;
}
/** Function to clone object
* @param {object} obj - object to clone
* @returns {object} cloned object
*/
function clone(obj) {
if (null === obj || "object" !== typeof obj) {
return obj;
}
var copy = "";
if (obj instanceof Date) {
copy = new Date();
copy.setTime(obj.getTime());
return copy;
}
if (obj instanceof Array) {
copy = [];
for (var i = 0, len = obj.length; i < len; ++i) {
copy[i] = clone(obj[i]);
}
return copy;
}
if (obj instanceof Object) {
copy = {};
for (var attr in obj) {
if (obj.hasOwnProperty(attr)) {
copy[attr] = clone(obj[attr]);
}
}
return copy;
}
}
/** Function to show the tooltip when any data point in the graph is hovered on.
* @param {object} args - tooltip info
* @param {number} args.x - x position
* @param {number} args.y- y position
* @param {string} args.contents - content for tooltip
* @param {string} args.title - title
* @param {string} args.notes - notes
*/
function showTooltip(args) {
var x = args.x || 0,
y = args.y || 0,
contents = args.contents,
title = args.title,
notes = args.notes;
var tooltip = $('<div id="graph-tooltip" class="v2"></div>').append('<span class="content">' + contents + '</span>');
if (title) {
tooltip.prepend('<span id="graph-tooltip-title">' + title + '</span>');
}
if (notes) {
var noteLines = (notes + "").split("===");
for (var i = 0; i < noteLines.length; i++) {
tooltip.append("<div class='note-line'>– " + noteLines[i] + "</div>");
}
}
$("#content").append("<div id='tooltip-calc'>" + $('<div>').append(tooltip.clone()).html() + "</div>");
var widthVal = $("#graph-tooltip").outerWidth(),
heightVal = $("#graph-tooltip").outerHeight();
$("#tooltip-calc").remove();
var newLeft = (x - (widthVal / 2)),
xReach = (x + (widthVal / 2));
if (notes) {
newLeft += 10.5;
xReach += 10.5;
}
if (xReach > $(window).width()) {
newLeft = (x - widthVal);
}
else if (xReach < 340) {
newLeft = x;
}
tooltip.css({
top: y - heightVal - 20,
left: newLeft
}).appendTo("body").show();
}
/** function adds leading zero to value.
* @param {number} value - given value
* @returns {string|number} fixed value
*/
function leadingZero(value) {
if (value > 9) {
return value;
}
return "0" + value;
}
/**
* Correct timezone offset on the timestamp for current browser's timezone
* @param {number} inTS - second or milisecond timestamp
* @returns {number} corrected timestamp applying user's timezone offset
*/
countlyCommon.getOffsetCorrectionForTimestamp = function(inTS) {
var timeZoneOffset = new Date().getTimezoneOffset(),
intLength = Math.round(inTS).toString().length,
tzAdjustment = 0;
if (timeZoneOffset < 0) {
if (intLength === 13) {
tzAdjustment = timeZoneOffset * 60000;
}
else if (intLength === 10) {
tzAdjustment = timeZoneOffset * 60;
}
}
return tzAdjustment;
};
var __months = [];
/**
* Get array of localized short month names from moment js
* @param {boolean} reset - used to reset months cache when changing locale
* @returns {array} array of short localized month names used in moment js MMM formatting
* @example
* //outputs ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
* countlyCommon.getMonths();
*/
countlyCommon.getMonths = function(reset) {
if (reset) {
__months = [];
}
if (!__months.length) {
for (var i = 0; i < 12; i++) {
__months.push(moment.localeData().monthsShort(moment([0, i]), ""));
}
}
return __months;
};
/**
* Currently selected period
* @property {array=} currentPeriodArr - array with ticks for current period (available only for special periods), example ["2016.12.22","2016.12.23","2016.12.24", ...]
* @property {array=} previousPeriodArr - array with ticks for previous period (available only for special periods), example ["2016.12.22","2016.12.23","2016.12.24", ...]
* @property {string} dateString - date format to use when outputting date in graphs, example D MMM, YYYY
* @property {boolean} isSpecialPeriod - true if current period is special period, false if it is not
* @property {number} daysInPeriod - amount of full days in selected period, example 30
* @property {number} numberOfDays - number of days selected period consists of, example hour period has 1 day
* @property {boolean} periodContainsToday - true if period contains today, false if not
* @property {array} uniquePeriodArr - array with ticks for current period which contains data for unique values, like unique users, example ["2016.12.22","2016.w52","2016.12.30", ...]
* @property {array} uniquePeriodCheckArr - array with ticks for higher buckets to current period unique value estimation, example ["2016.w51","2016.w52","2016.w53","2017.1",...]
* @property {array} previousUniquePeriodArr - array with ticks for previous period which contains data for unique values, like unique users, example ["2016.12.22","2016.w52","2016.12.30"]
* @property {array} previousUniquePeriodCheckArr - array with ticks for higher buckets to previous period unique value estimation, example ["2016.w47","2016.w48","2016.12"]
* @property {string} activePeriod - period name formatted in dateString
* @property {string} previousPeriod - previous period name formatted in dateString
* @property {number} periodMax - max value of current period tick
* @property {number} periodMin - min value of current period tick
* @example <caption>Special period object (7days)</caption>
* {
* "currentPeriodArr":["2017.1.14","2017.1.15","2017.1.16","2017.1.17","2017.1.18","2017.1.19","2017.1.20"],
* "previousPeriodArr":["2017.1.7","2017.1.8","2017.1.9","2017.1.10","2017.1.11","2017.1.12","2017.1.13"],
* "isSpecialPeriod":true,
* "dateString":"D MMM",
* "daysInPeriod":7,
* "numberOfDays":7,
* "uniquePeriodArr":["2017.1.14","2017.w3"],
* "uniquePeriodCheckArr":["2017.w2","2017.w3"],
* "previousUniquePeriodArr":["2017.1.7","2017.1.8","2017.1.9","2017.1.10","2017.1.11","2017.1.12","2017.1.13"],
* "previousUniquePeriodCheckArr":["2017.w1","2017.w2"],
* "periodContainsToday":true
* }
* @example <caption>Simple period object (today period - hour)</caption>
* {
* "activePeriod":"2017.1.20",
* "periodMax":23,
* "periodMin":0,
* "previousPeriod":"2017.1.19",
* "isSpecialPeriod":false,
* "dateString":"HH:mm",
* "daysInPeriod":0,
* "numberOfDays":1,
* "uniquePeriodArr":[],
* "uniquePeriodCheckArr":[],
* "previousUniquePeriodArr":[],
* "previousUniquePeriodCheckArr":[],
* "periodContainsToday":true
* }
*/
countlyCommon.periodObj = calculatePeriodObj();
/**
* Parse second to standard time format
* @param {number} second number
* @returns {string} return format "HH:MM:SS"
*/
countlyCommon.formatSecond = function(second) {
var timeLeft = parseInt(second);
var dict = [
{k: 'day', v: 86400},
{k: 'hour', v: 3600},
{k: 'minute', v: 60},
{k: 'second', v: 1}
];
var result = {day: 0, hour: 0, minute: 0, second: 0};
for (var i = 0; i < dict.length; i++) {
result[dict[i].k] = Math.floor(timeLeft / dict[i].v);
timeLeft = timeLeft % dict[i].v;
}
var dayTrans = result.day > 1 ? jQuery.i18n.map["common.day.abrv"] : jQuery.i18n.map["common.day.abrv2"];
return (result.day > 0 ? result.day + " " + dayTrans + ' ' : '') +
(result.hour >= 10 ? result.hour + ':' : ('0' + result.hour) + ":") +
(result.minute >= 10 ? result.minute + ':' : ('0' + result.minute) + ':') +
(result.second >= 10 ? result.second : ('0' + result.second));
};
/**
* add one more column in chartDP[index].data to show string in dp
* for example:
* chartDPs = [
* {color:"#88BBC8", label:"duration", data:[[0, 23], [1, 22]}],
* {color:"#88BBC8", label:"count", data:[[0, 3], [1, 3]}],
* }
* lable = 'duration',
*
* will return
* chartDPs = [
* {color:"#88BBC8", label:"duration", data:[[0, 23, "00:00:23"], [1, 22, "00:00:22"]}],
* {color:"#88BBC8", label:"count", data:[[0, 3], [1, 3]}],
* }
* @param {array} chartDPs - chart data points
* @param {string} labelName - label name
* @return {array} chartDPs
*/
countlyCommon.formatSecondForDP = function(chartDPs, labelName) {
for (var k = 0; k < chartDPs.length; k++) {
if (chartDPs[k].label === labelName) {
var dp = chartDPs[k];
for (var i = 0; i < dp.data.length; i++) {
dp.data[i][2] = countlyCommon.formatSecond(dp.data[i][1]);
}
}
}
return chartDPs;
};
/**
* Getter/setter for dot notatons:
* @param {object} obj - object to use
* @param {string} is - path of properties to get
* @param {varies} value - value to set
* @returns {varies} value at provided path
* @example
* common.dot({a: {b: {c: 'string'}}}, 'a.b.c') === 'string'
* common.dot({a: {b: {c: 'string'}}}, ['a', 'b', 'c']) === 'string'
* common.dot({a: {b: {c: 'string'}}}, 'a.b.c', 5) === 5
* common.dot({a: {b: {c: 'string'}}}, 'a.b.c') === 5
*/
countlyCommon.dot = function(obj, is, value) {
if (typeof is === 'string') {
return countlyCommon.dot(obj, is.split('.'), value);
}
else if (is.length === 1 && value !== undefined) {
obj[is[0]] = value;
return value;
}
else if (is.length === 0) {
return obj;
}
else if (!obj) {
return obj;
}
else {
return countlyCommon.dot(obj[is[0]], is.slice(1), value);
}
};
/**
* Save division, handling division by 0 and rounding up to 2 decimals
* @param {number} dividend - object to use
* @param {number} divisor - path of properties to get
* @returns {number} division
*/
countlyCommon.safeDivision = function(dividend, divisor) {
var tmpAvgVal;
tmpAvgVal = dividend / divisor;
if (!tmpAvgVal || tmpAvgVal === Number.POSITIVE_INFINITY) {
tmpAvgVal = 0;
}
return tmpAvgVal.toFixed(2);
};
/**
* Get timestamp range in format as [startTime, endTime] with period and base time
* @param {object} period - period has two format: array or string
* @param {number} baseTimeStamp - base timestamp to calc the period range
* @returns {array} period range
*/
countlyCommon.getPeriodRange = function(period, baseTimeStamp) {
var periodRange;
if (Object.prototype.toString.call(period) === '[object Array]' && period.length === 2) { //range
periodRange = period;
return periodRange;
}
var endTimeStamp = baseTimeStamp;
var start;
switch (period) {
case 'hour':
start = moment(baseTimeStamp).hour(0).minute(0).second(0);
break;
case 'yesterday':
start = moment(baseTimeStamp).subtract(1, 'day').hour(0).minute(0).second(0);
endTimeStamp = moment(baseTimeStamp).subtract(1, 'day').hour(23).minute(59).second(59).toDate().getTime();
break;
case 'day':
start = moment(baseTimeStamp).date(1).hour(0).minute(0).second(0);
break;
case 'month':
start = moment(baseTimeStamp).month(0).date(1).hour(0).minute(0).second(0);
break;
default:
if (/([0-9]+)days/.test(period)) {
var match = /([0-9]+)days/.exec(period);
if (match[1] && (parseInt(match[1]) > 1)) {
start = moment(baseTimeStamp).subtract(parseInt(match[1]) - 1, 'day').hour(0).minute(0);
}
}
}
periodRange = [start.toDate().getTime(), endTimeStamp];
return periodRange;
};
};
window.CommonConstructor = CommonConstructor;
window.countlyCommon = new CommonConstructor();
}(window, jQuery)); | 1 | 13,301 | I think ticket meant, not applying replacement in this method, but rather applying encodeSomeHtml to each and every localization string | Countly-countly-server | js |
@@ -207,7 +207,9 @@ function isNodeShuttingDownError(err) {
* @param {Server} server
*/
function isSDAMUnrecoverableError(error, server) {
- if (error instanceof MongoParseError) {
+ // NOTE: null check is here for a strictly pre-CMAP world, a timeout or
+ // close event are considered unrecoverable
+ if (error instanceof MongoParseError || error == null) {
return true;
}
| 1 | 'use strict';
const mongoErrorContextSymbol = Symbol('mongoErrorContextSymbol');
const maxWireVersion = require('./utils').maxWireVersion;
/**
* Creates a new MongoError
*
* @augments Error
* @param {Error|string|object} message The error message
* @property {string} message The error message
* @property {string} stack The error call stack
*/
class MongoError extends Error {
constructor(message) {
if (message instanceof Error) {
super(message.message);
this.stack = message.stack;
} else {
if (typeof message === 'string') {
super(message);
} else {
super(message.message || message.errmsg || message.$err || 'n/a');
for (var name in message) {
this[name] = message[name];
}
}
Error.captureStackTrace(this, this.constructor);
}
this.name = 'MongoError';
this[mongoErrorContextSymbol] = this[mongoErrorContextSymbol] || {};
}
/**
* Creates a new MongoError object
*
* @param {Error|string|object} options The options used to create the error.
* @return {MongoError} A MongoError instance
* @deprecated Use `new MongoError()` instead.
*/
static create(options) {
return new MongoError(options);
}
hasErrorLabel(label) {
return this.errorLabels && this.errorLabels.indexOf(label) !== -1;
}
}
/**
* Creates a new MongoNetworkError
*
* @param {Error|string|object} message The error message
* @property {string} message The error message
* @property {string} stack The error call stack
*/
class MongoNetworkError extends MongoError {
constructor(message) {
super(message);
this.name = 'MongoNetworkError';
}
}
/**
* An error used when attempting to parse a value (like a connection string)
*
* @param {Error|string|object} message The error message
* @property {string} message The error message
*/
class MongoParseError extends MongoError {
constructor(message) {
super(message);
this.name = 'MongoParseError';
}
}
/**
* An error signifying a timeout event
*
* @param {Error|string|object} message The error message
* @param {string|object} [reason] The reason the timeout occured
* @property {string} message The error message
* @property {string} [reason] An optional reason context for the timeout, generally an error saved during flow of monitoring and selecting servers
*/
class MongoTimeoutError extends MongoError {
constructor(message, reason) {
super(message);
this.name = 'MongoTimeoutError';
if (reason != null) {
this.reason = reason;
}
}
}
function makeWriteConcernResultObject(input) {
const output = Object.assign({}, input);
if (output.ok === 0) {
output.ok = 1;
delete output.errmsg;
delete output.code;
delete output.codeName;
}
return output;
}
/**
* An error thrown when the server reports a writeConcernError
*
* @param {Error|string|object} message The error message
* @param {object} result The result document (provided if ok: 1)
* @property {string} message The error message
* @property {object} [result] The result document (provided if ok: 1)
*/
class MongoWriteConcernError extends MongoError {
constructor(message, result) {
super(message);
this.name = 'MongoWriteConcernError';
if (result != null) {
this.result = makeWriteConcernResultObject(result);
}
}
}
// see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms
const RETRYABLE_ERROR_CODES = new Set([
6, // HostUnreachable
7, // HostNotFound
89, // NetworkTimeout
91, // ShutdownInProgress
189, // PrimarySteppedDown
9001, // SocketException
10107, // NotMaster
11600, // InterruptedAtShutdown
11602, // InterruptedDueToReplStateChange
13435, // NotMasterNoSlaveOk
13436 // NotMasterOrSecondary
]);
/**
* Determines whether an error is something the driver should attempt to retry
*
* @param {MongoError|Error} error
*/
function isRetryableError(error) {
return (
RETRYABLE_ERROR_CODES.has(error.code) ||
error instanceof MongoNetworkError ||
error.message.match(/not master/) ||
error.message.match(/node is recovering/)
);
}
const SDAM_RECOVERING_CODES = new Set([
91, // ShutdownInProgress
189, // PrimarySteppedDown
11600, // InterruptedAtShutdown
11602, // InterruptedDueToReplStateChange
13436 // NotMasterOrSecondary
]);
const SDAM_NOTMASTER_CODES = new Set([
10107, // NotMaster
13435 // NotMasterNoSlaveOk
]);
const SDAM_NODE_SHUTTING_DOWN_ERROR_CODES = new Set([
11600, // InterruptedAtShutdown
91 // ShutdownInProgress
]);
function isRecoveringError(err) {
if (err.code && SDAM_RECOVERING_CODES.has(err.code)) {
return true;
}
return err.message.match(/not master or secondary/) || err.message.match(/node is recovering/);
}
function isNotMasterError(err) {
if (err.code && SDAM_NOTMASTER_CODES.has(err.code)) {
return true;
}
if (isRecoveringError(err)) {
return false;
}
return err.message.match(/not master/);
}
function isNodeShuttingDownError(err) {
return err.code && SDAM_NODE_SHUTTING_DOWN_ERROR_CODES.has(err.code);
}
/**
* Determines whether SDAM can recover from a given error. If it cannot
* then the pool will be cleared, and server state will completely reset
* locally.
*
* @see https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#not-master-and-node-is-recovering
* @param {MongoError|Error} error
* @param {Server} server
*/
function isSDAMUnrecoverableError(error, server) {
if (error instanceof MongoParseError) {
return true;
}
if (isRecoveringError(error) || isNotMasterError(error)) {
if (maxWireVersion(server) >= 8 && !isNodeShuttingDownError(error)) {
return false;
}
return true;
}
return false;
}
module.exports = {
MongoError,
MongoNetworkError,
MongoParseError,
MongoTimeoutError,
MongoWriteConcernError,
mongoErrorContextSymbol,
isRetryableError,
isSDAMUnrecoverableError
};
| 1 | 16,891 | ticket for the 4.0 epic? | mongodb-node-mongodb-native | js |
@@ -87,6 +87,10 @@ def main():
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
+ if cfg.model.get('neck', False):
+ if cfg.model.neck.get('rfp_backbone', False):
+ if cfg.model.neck.rfp_backbone.get('pretrained', False):
+ cfg.model.neck.rfp_backbone.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info. | 1 | import argparse
import os
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from tools.fuse_conv_bn import fuse_module
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.core import wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='arguments in dict')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_module(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.options is None else args.options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
if __name__ == '__main__':
main()
| 1 | 20,174 | `cfg.model.get('neck')` will return None if neck does not exist, thus we can omit the default value. | open-mmlab-mmdetection | py |
@@ -5,7 +5,7 @@ module Blacklight
module Facet
def facet_paginator field_config, display_facet
- Blacklight::Solr::FacetPaginator.new(display_facet.items,
+ blacklight_config.facet_paginator_class.new(display_facet.items,
sort: display_facet.sort,
offset: display_facet.offset,
limit: facet_limit_for(field_config.key)) | 1 | # These are methods that are used at both the view helper and controller layers
# They are only dependent on `blacklight_config` and `@response`
#
module Blacklight
module Facet
def facet_paginator field_config, display_facet
Blacklight::Solr::FacetPaginator.new(display_facet.items,
sort: display_facet.sort,
offset: display_facet.offset,
limit: facet_limit_for(field_config.key))
end
def facets_from_request(fields = facet_field_names)
fields.map { |field| facet_by_field_name(field) }.compact
end
def facet_field_names
blacklight_config.facet_fields.keys
end
def facet_configuration_for_field(field)
f = blacklight_config.facet_fields[field]
f ||= begin
_, value = blacklight_config.facet_fields.find { |k,v| v.field.to_s == field.to_s }
value
end
f ||= Blacklight::Configuration::FacetField.new(:field => field).normalize!
end
# Get a FacetField object from the @response
def facet_by_field_name field_or_field_name
case field_or_field_name
when String, Symbol, Blacklight::Configuration::FacetField
facet_field = facet_configuration_for_field(field_or_field_name)
@response.aggregations[facet_field.key]
else
# is this really a useful case?
field_or_field_name
end
end
end
end
| 1 | 5,980 | Trailing whitespace detected. | projectblacklight-blacklight | rb |
@@ -38,6 +38,7 @@ type TargetSpec struct {
AutoStackSize *bool `json:"automatic-stack-size"` // Determine stack size automatically at compile time.
DefaultStackSize uint64 `json:"default-stack-size"` // Default stack size if the size couldn't be determined at compile time.
CFlags []string `json:"cflags"`
+ CXXFlags []string `json:"cxxflags"`
LDFlags []string `json:"ldflags"`
LinkerScript string `json:"linkerscript"`
ExtraFiles []string `json:"extra-files"` | 1 | package compileopts
// This file loads a target specification from a JSON file.
import (
"encoding/json"
"errors"
"io"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"strings"
"github.com/tinygo-org/tinygo/goenv"
)
// Target specification for a given target. Used for bare metal targets.
//
// The target specification is mostly inspired by Rust:
// https://doc.rust-lang.org/nightly/nightly-rustc/rustc_target/spec/struct.TargetOptions.html
// https://github.com/shepmaster/rust-arduino-blink-led-no-core-with-cargo/blob/master/blink/arduino.json
type TargetSpec struct {
Inherits []string `json:"inherits"`
Triple string `json:"llvm-target"`
CPU string `json:"cpu"`
Features []string `json:"features"`
GOOS string `json:"goos"`
GOARCH string `json:"goarch"`
BuildTags []string `json:"build-tags"`
GC string `json:"gc"`
Scheduler string `json:"scheduler"`
Serial string `json:"serial"` // which serial output to use (uart, usb, none)
Linker string `json:"linker"`
RTLib string `json:"rtlib"` // compiler runtime library (libgcc, compiler-rt)
Libc string `json:"libc"`
AutoStackSize *bool `json:"automatic-stack-size"` // Determine stack size automatically at compile time.
DefaultStackSize uint64 `json:"default-stack-size"` // Default stack size if the size couldn't be determined at compile time.
CFlags []string `json:"cflags"`
LDFlags []string `json:"ldflags"`
LinkerScript string `json:"linkerscript"`
ExtraFiles []string `json:"extra-files"`
RP2040BootPatch *bool `json:"rp2040-boot-patch"` // Patch RP2040 2nd stage bootloader checksum
Emulator []string `json:"emulator" override:"copy"` // inherited Emulator must not be append
FlashCommand string `json:"flash-command"`
GDB []string `json:"gdb"`
PortReset string `json:"flash-1200-bps-reset"`
SerialPort []string `json:"serial-port"` // serial port IDs in the form "acm:vid:pid" or "usb:vid:pid"
FlashMethod string `json:"flash-method"`
FlashVolume string `json:"msd-volume-name"`
FlashFilename string `json:"msd-firmware-name"`
UF2FamilyID string `json:"uf2-family-id"`
BinaryFormat string `json:"binary-format"`
OpenOCDInterface string `json:"openocd-interface"`
OpenOCDTarget string `json:"openocd-target"`
OpenOCDTransport string `json:"openocd-transport"`
OpenOCDCommands []string `json:"openocd-commands"`
JLinkDevice string `json:"jlink-device"`
CodeModel string `json:"code-model"`
RelocationModel string `json:"relocation-model"`
WasmAbi string `json:"wasm-abi"`
}
// overrideProperties overrides all properties that are set in child into itself using reflection.
func (spec *TargetSpec) overrideProperties(child *TargetSpec) {
specType := reflect.TypeOf(spec).Elem()
specValue := reflect.ValueOf(spec).Elem()
childValue := reflect.ValueOf(child).Elem()
for i := 0; i < specType.NumField(); i++ {
field := specType.Field(i)
src := childValue.Field(i)
dst := specValue.Field(i)
switch kind := field.Type.Kind(); kind {
case reflect.String: // for strings, just copy the field of child to spec if not empty
if src.Len() > 0 {
dst.Set(src)
}
case reflect.Uint, reflect.Uint32, reflect.Uint64: // for Uint, copy if not zero
if src.Uint() != 0 {
dst.Set(src)
}
case reflect.Ptr: // for pointers, copy if not nil
if !src.IsNil() {
dst.Set(src)
}
case reflect.Slice: // for slices...
if src.Len() > 0 { // ... if not empty ...
switch tag := field.Tag.Get("override"); tag {
case "copy":
// copy the field of child to spec
dst.Set(src)
case "append", "":
// or append the field of child to spec
dst.Set(reflect.AppendSlice(src, dst))
default:
panic("override mode must be 'copy' or 'append' (default). I don't know how to '" + tag + "'.")
}
}
default:
panic("unknown field type : " + kind.String())
}
}
}
// load reads a target specification from the JSON in the given io.Reader. It
// may load more targets specified using the "inherits" property.
func (spec *TargetSpec) load(r io.Reader) error {
err := json.NewDecoder(r).Decode(spec)
if err != nil {
return err
}
return nil
}
// loadFromGivenStr loads the TargetSpec from the given string that could be:
// - targets/ directory inside the compiler sources
// - a relative or absolute path to custom (project specific) target specification .json file;
// the Inherits[] could contain the files from target folder (ex. stm32f4disco)
// as well as path to custom files (ex. myAwesomeProject.json)
func (spec *TargetSpec) loadFromGivenStr(str string) error {
path := ""
if strings.HasSuffix(str, ".json") {
path, _ = filepath.Abs(str)
} else {
path = filepath.Join(goenv.Get("TINYGOROOT"), "targets", strings.ToLower(str)+".json")
}
fp, err := os.Open(path)
if err != nil {
return err
}
defer fp.Close()
return spec.load(fp)
}
// resolveInherits loads inherited targets, recursively.
func (spec *TargetSpec) resolveInherits() error {
// First create a new spec with all the inherited properties.
newSpec := &TargetSpec{}
for _, name := range spec.Inherits {
subtarget := &TargetSpec{}
err := subtarget.loadFromGivenStr(name)
if err != nil {
return err
}
err = subtarget.resolveInherits()
if err != nil {
return err
}
newSpec.overrideProperties(subtarget)
}
// When all properties are loaded, make sure they are properly inherited.
newSpec.overrideProperties(spec)
*spec = *newSpec
return nil
}
// Load a target specification.
func LoadTarget(options *Options) (*TargetSpec, error) {
if options.Target == "" {
// Configure based on GOOS/GOARCH environment variables (falling back to
// runtime.GOOS/runtime.GOARCH), and generate a LLVM target based on it.
llvmos := options.GOOS
llvmarch := map[string]string{
"386": "i386",
"amd64": "x86_64",
"arm64": "aarch64",
"arm": "armv7",
}[options.GOARCH]
if llvmarch == "" {
llvmarch = options.GOARCH
}
// Target triples (which actually have four components, but are called
// triples for historical reasons) have the form:
// arch-vendor-os-environment
target := llvmarch + "-unknown-" + llvmos
if options.GOARCH == "arm" {
target += "-gnueabihf"
}
return defaultTarget(options.GOOS, options.GOARCH, target)
}
// See whether there is a target specification for this target (e.g.
// Arduino).
spec := &TargetSpec{}
err := spec.loadFromGivenStr(options.Target)
if err != nil {
return nil, err
}
// Successfully loaded this target from a built-in .json file. Make sure
// it includes all parents as specified in the "inherits" key.
err = spec.resolveInherits()
if err != nil {
return nil, err
}
return spec, nil
}
// WindowsBuildNotSupportedErr is being thrown, when goos is windows and no target has been specified.
var WindowsBuildNotSupportedErr = errors.New("Building Windows binaries is currently not supported. Try specifying a different target")
func defaultTarget(goos, goarch, triple string) (*TargetSpec, error) {
if goos == "windows" {
return nil, WindowsBuildNotSupportedErr
}
// No target spec available. Use the default one, useful on most systems
// with a regular OS.
spec := TargetSpec{
Triple: triple,
GOOS: goos,
GOARCH: goarch,
BuildTags: []string{goos, goarch},
Scheduler: "tasks",
Linker: "cc",
DefaultStackSize: 1024 * 64, // 64kB
GDB: []string{"gdb"},
PortReset: "false",
}
if goarch == "386" {
spec.CPU = "pentium4"
}
if goos == "darwin" {
spec.CFlags = append(spec.CFlags, "-isysroot", "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk")
spec.LDFlags = append(spec.LDFlags, "-Wl,-dead_strip")
} else {
spec.LDFlags = append(spec.LDFlags, "-no-pie", "-Wl,--gc-sections") // WARNING: clang < 5.0 requires -nopie
}
if goarch != "wasm" {
spec.ExtraFiles = append(spec.ExtraFiles, "src/runtime/gc_"+goarch+".S")
spec.ExtraFiles = append(spec.ExtraFiles, "src/internal/task/task_stack_"+goarch+".S")
}
if goarch != runtime.GOARCH {
// Some educated guesses as to how to invoke helper programs.
spec.GDB = []string{"gdb-multiarch"}
if goarch == "arm" && goos == "linux" {
spec.CFlags = append(spec.CFlags, "--sysroot=/usr/arm-linux-gnueabihf")
spec.Linker = "arm-linux-gnueabihf-gcc"
spec.Emulator = []string{"qemu-arm", "-L", "/usr/arm-linux-gnueabihf"}
}
if goarch == "arm64" && goos == "linux" {
spec.CFlags = append(spec.CFlags, "--sysroot=/usr/aarch64-linux-gnu")
spec.Linker = "aarch64-linux-gnu-gcc"
spec.Emulator = []string{"qemu-aarch64", "-L", "/usr/aarch64-linux-gnu"}
}
if goarch == "386" && runtime.GOARCH == "amd64" {
spec.CFlags = append(spec.CFlags, "-m32")
spec.LDFlags = append(spec.LDFlags, "-m32")
}
}
return &spec, nil
}
// LookupGDB looks up a gdb executable.
func (spec *TargetSpec) LookupGDB() (string, error) {
if len(spec.GDB) == 0 {
return "", errors.New("gdb not configured in the target specification")
}
for _, d := range spec.GDB {
_, err := exec.LookPath(d)
if err == nil {
return d, nil
}
}
return "", errors.New("no gdb found configured in the target specification (" + strings.Join(spec.GDB, ", ") + ")")
}
| 1 | 13,291 | I don't see why a `cxxflags` key is necessary? C flags are important in the target file because they define things like the float ABI. But these flags are also used for C++. I can't think of a reason why you would want to configure C++ flags in a target file. | tinygo-org-tinygo | go |
@@ -352,12 +352,13 @@ instrument_annotation(dcontext_t *dcontext, IN OUT app_pc *start_pc,
# endif
instr_init(dcontext, &scratch);
- TRY_EXCEPT(my_dcontext, { identify_annotation(dcontext, &layout, &scratch); },
- { /* EXCEPT */
- LOG(THREAD, LOG_ANNOTATIONS, 2,
- "Failed to instrument annotation at " PFX "\n", *start_pc);
- /* layout.type is already ANNOTATION_TYPE_NONE */
- });
+ TRY_EXCEPT(
+ my_dcontext, { identify_annotation(dcontext, &layout, &scratch); },
+ { /* EXCEPT */
+ LOG(THREAD, LOG_ANNOTATIONS, 2, "Failed to instrument annotation at " PFX "\n",
+ *start_pc);
+ /* layout.type is already ANNOTATION_TYPE_NONE */
+ });
if (layout.type != ANNOTATION_TYPE_NONE) {
LOG(GLOBAL, LOG_ANNOTATIONS, 2,
"Decoded %s annotation %s. Next pc now " PFX ".\n", | 1 | /* ******************************************************
* Copyright (c) 2014-2021 Google, Inc. All rights reserved.
* ******************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "globals.h"
#include "hashtable.h"
#include "instr.h"
#include "instr_create_shared.h"
#include "decode_fast.h"
#include "utils.h"
#include "annotations.h"
#ifdef ANNOTATIONS /* around whole file */
# if !(defined(WINDOWS) && defined(X64))
# include "../third_party/valgrind/valgrind.h"
# include "../third_party/valgrind/memcheck.h"
# endif
/* Macros for identifying an annotation head and extracting the pointer to its name.
*
* IS_ANNOTATION_LABEL_INSTR(instr): Evaluates to true for any `instr`
* that could be the instruction which encodes the pointer to the annotation name.
* IS_ANNOTATION_LABEL_REFERENCE(opnd): Evaluates to true for any `opnd`
* that could be the operand which encodes the pointer to the annotation name.
* GET_ANNOTATION_LABEL_REFERENCE(src, instr_pc): Extracts the annotation name
* pointer. IS_ANNOTATION_LABEL_GOT_OFFSET_INSTR(instr): Evaluates to true for any
* `instr` that could encode the offset of the label's GOT entry within the GOT table.
* IS_ANNOTATION_LABEL_GOT_OFFSET_REFERENCE(opnd): Evaluates to true for any `opnd`
* that could encode the offset of the label's GOT entry within the GOT table.
*/
# ifdef WINDOWS
# ifdef X64
# define IS_ANNOTATION_LABEL_INSTR(instr) \
(instr_is_mov(instr) || (instr_get_opcode(instr) == OP_prefetchw))
# define IS_ANNOTATION_LABEL_REFERENCE(opnd) opnd_is_rel_addr(opnd)
# define GET_ANNOTATION_LABEL_REFERENCE(src, instr_pc) opnd_get_addr(src)
# else
# define IS_ANNOTATION_LABEL_INSTR(instr) instr_is_mov(instr)
# define IS_ANNOTATION_LABEL_REFERENCE(opnd) opnd_is_base_disp(opnd)
# define GET_ANNOTATION_LABEL_REFERENCE(src, instr_pc) \
((app_pc)opnd_get_disp(src))
# endif
# else
# define IS_ANNOTATION_LABEL_INSTR(instr) instr_is_mov(instr)
# define IS_ANNOTATION_LABEL_REFERENCE(opnd) opnd_is_base_disp(opnd)
# ifdef X64
# define ANNOTATION_LABEL_REFERENCE_OPERAND_OFFSET 4
# else
# define ANNOTATION_LABEL_REFERENCE_OPERAND_OFFSET 0
# endif
# define GET_ANNOTATION_LABEL_REFERENCE(src, instr_pc) \
((app_pc)(opnd_get_disp(src) + (instr_pc) + \
ANNOTATION_LABEL_REFERENCE_OPERAND_OFFSET))
# define IS_ANNOTATION_LABEL_GOT_OFFSET_INSTR(instr) \
(instr_get_opcode(scratch) == OP_bsf || instr_get_opcode(scratch) == OP_bsr)
# define IS_ANNOTATION_LABEL_GOT_OFFSET_REFERENCE(opnd) opnd_is_base_disp(opnd)
# endif
/* Annotation label components. */
# define DYNAMORIO_ANNOTATION_LABEL "dynamorio-annotation"
# define DYNAMORIO_ANNOTATION_LABEL_LENGTH 20
# define ANNOTATION_STATEMENT_LABEL "statement"
# define ANNOTATION_STATEMENT_LABEL_LENGTH 9
# define ANNOTATION_EXPRESSION_LABEL "expression"
# define ANNOTATION_EXPRESSION_LABEL_LENGTH 10
# define ANNOTATION_VOID_LABEL "void"
# define ANNOTATION_VOID_LABEL_LENGTH 4
# define IS_ANNOTATION_STATEMENT_LABEL(annotation_name) \
(strncmp((const char *)(annotation_name), ANNOTATION_STATEMENT_LABEL, \
ANNOTATION_STATEMENT_LABEL_LENGTH) == 0)
# define IS_ANNOTATION_VOID(annotation_name) \
(strncmp((const char *)(annotation_name), ANNOTATION_VOID_LABEL ":", \
ANNOTATION_VOID_LABEL_LENGTH) == 0)
/* Annotation detection factors exclusive to Windows x64. */
# if defined(WINDOWS) && defined(X64)
/* Instruction `int 2c` hints that the preceding cbr is probably an annotation
* head. */
# define WINDOWS_X64_ANNOTATION_HINT_BYTE 0xcd
# define X64_WINDOWS_ENCODED_ANNOTATION_HINT 0x2ccd
/* Instruction `int 3` acts as a boundary for compiler optimizations, to prevent
* the the annotation from being transformed into something unrecognizable.
*/
# define WINDOWS_X64_OPTIMIZATION_FENCE 0xcc
# define IS_ANNOTATION_HEADER(scratch, pc) \
(instr_is_cbr(scratch) && \
(*(ushort *)(pc) == X64_WINDOWS_ENCODED_ANNOTATION_HINT))
# endif
/* OPND_RETURN_VALUE: create the return value operand for `mov $return_value,%xax`. */
# ifdef X64
# define OPND_RETURN_VALUE(return_value) OPND_CREATE_INT64(return_value)
# else
# define OPND_RETURN_VALUE(return_value) OPND_CREATE_INT32(return_value)
# endif
/* FASTCALL_REGISTER_ARG_COUNT: Specifies the number of arguments passed in registers.
*/
# ifdef X64
# ifdef UNIX
# define FASTCALL_REGISTER_ARG_COUNT 6
# else /* WINDOWS x64 */
# define FASTCALL_REGISTER_ARG_COUNT 4
# endif
# else /* x86 (all) */
# define FASTCALL_REGISTER_ARG_COUNT 2
# endif
# define DYNAMORIO_ANNOTATE_RUNNING_ON_DYNAMORIO_NAME \
"dynamorio_annotate_running_on_dynamorio"
# define DYNAMORIO_ANNOTATE_LOG_NAME "dynamorio_annotate_log"
# define DYNAMORIO_ANNOTATE_LOG_ARG_COUNT 20
/* Facilitates timestamp substitution in `dynamorio_annotate_log()`. */
# define LOG_ANNOTATION_TIMESTAMP_TOKEN "${timestamp}"
# define LOG_ANNOTATION_TIMESTAMP_TOKEN_LENGTH 12
/* Constant factors of the Valgrind annotation, as defined in valgrind.h. */
enum {
VG_ROL_COUNT = 4,
};
typedef enum _annotation_type_t {
/* Indicates that the analyzed instruction turned out not to be an annotation head. */
ANNOTATION_TYPE_NONE,
/* To invoke an annotation as an expression, the target app calls the annotation as if
* it were a normal function. The annotation instruction sequence follows the preamble
* of each annotation function, and instrumentation replaces it with (1) a clean call
* to each registered handler for that annotation, or (2) a return value substitution,
* depending on the type of registration.
*/
ANNOTATION_TYPE_EXPRESSION,
/* To invoke an annotation as a statement, the target app calls a macro defined in
* the annotation header (via dr_annotations_asm.h), which places the annotation
* instruction sequence inline at the invocation site. The sequence includes a normal
* call to the annotation function, so instrumentation simply involves removing the
* surrounding components of the annotation to expose the call. The DR client's clean
* calls will be invoked within the annotation function itself (see above).
*/
ANNOTATION_TYPE_STATEMENT,
} annotation_type_t;
/* Specifies the exact byte position of the essential components of an annotation. */
typedef struct _annotation_layout_t {
app_pc start_pc;
annotation_type_t type;
/* Indicates whether the annotation function in the target app is of void type. */
bool is_void;
/* Points to the annotation name in the target app's data section. */
const char *name;
/* Specifies the translation of the annotation instrumentation (e.g., clean call). */
app_pc substitution_xl8;
/* Specifies the byte at which app decoding should resume following the annotation. */
app_pc resume_pc;
} annotation_layout_t;
# if !(defined(WINDOWS) && defined(X64))
typedef struct _vg_handlers_t {
dr_annotation_handler_t *handlers[DR_VG_ID__LAST];
} vg_handlers_t;
# endif
static strhash_table_t *handlers;
# if !(defined(WINDOWS) && defined(X64))
/* locked under the `handlers` table lock */
static vg_handlers_t *vg_handlers;
/* Dispatching function for Valgrind annotations (required because id of the Valgrind
* client request object cannot be determined statically).
*/
static dr_annotation_handler_t vg_router;
static dr_annotation_receiver_t vg_receiver; /* The sole receiver for `vg_router`. */
static opnd_t vg_router_arg; /* The sole argument for the clean call to `vg_router`. */
# endif
/* Required for passing the va_list in `dynamorio_annotate_log()` to the log function. */
extern ssize_t
do_file_write(file_t f, const char *fmt, va_list ap);
/*********************************************************
* INTERNAL ROUTINE DECLARATIONS
*/
# if !(defined(WINDOWS) && defined(X64))
/* Valgrind dispatcher, called by the instrumentation of the Valgrind annotations. */
static void
handle_vg_annotation(app_pc request_args);
/* Maps a Valgrind request id into a (sequential) `DR_VG_ID__*` constant. */
static dr_valgrind_request_id_t
lookup_valgrind_request(ptr_uint_t request);
static ptr_uint_t
valgrind_running_on_valgrind(dr_vg_client_request_t *request);
# endif
static bool
is_annotation_tag(dcontext_t *dcontext, IN OUT app_pc *start_pc, instr_t *scratch,
OUT const char **name);
static void
identify_annotation(dcontext_t *dcontext, IN OUT annotation_layout_t *layout,
instr_t *scratch);
/* Create argument operands for the instrumented clean call */
static void
create_arg_opnds(dr_annotation_handler_t *handler, uint num_args,
dr_annotation_calling_convention_t call_type);
# ifdef DEBUG
/* Implements `dynamorio_annotate_log()`, including substitution of the string literal
* token "${timestamp}" with the current system time.
*/
static ssize_t
annotation_printf(const char *format, ...);
# endif
/* Invoked during hashtable entry removal */
static void
free_annotation_handler(void *p);
/*********************************************************
* ANNOTATION INTEGRATION FUNCTIONS
*/
void
annotation_init()
{
handlers = strhash_hash_create(
GLOBAL_DCONTEXT, 8, 80, /* favor a small table */
HASHTABLE_ENTRY_SHARED | HASHTABLE_SHARED | HASHTABLE_RELAX_CLUSTER_CHECKS |
HASHTABLE_PERSISTENT,
free_annotation_handler _IF_DEBUG("annotation handler hashtable"));
# if !(defined(WINDOWS) && defined(X64))
vg_handlers =
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, vg_handlers_t, ACCT_OTHER, UNPROTECTED);
memset(vg_handlers, 0, sizeof(vg_handlers_t));
vg_router.type = DR_ANNOTATION_HANDLER_CALL;
/* The Valgrind client request object is passed in %xax. */
vg_router.num_args = 1;
vg_router_arg = opnd_create_reg(DR_REG_XAX);
vg_router.args = &vg_router_arg;
vg_router.symbol_name = NULL; /* No symbols in Valgrind annotations. */
vg_router.receiver_list = &vg_receiver;
vg_receiver.instrumentation.callback = (void *)(void (*)())handle_vg_annotation;
vg_receiver.save_fpstate = false;
vg_receiver.next = NULL;
# endif
dr_annotation_register_return(DYNAMORIO_ANNOTATE_RUNNING_ON_DYNAMORIO_NAME,
(void *)(ptr_uint_t) true);
# ifdef DEBUG
/* The logging annotation requires a debug build of DR. Arbitrarily allows up to
* 20 arguments, since the clean call must have a fixed number of them.
*/
dr_annotation_register_call(DYNAMORIO_ANNOTATE_LOG_NAME, (void *)annotation_printf,
false, DYNAMORIO_ANNOTATE_LOG_ARG_COUNT,
DR_ANNOTATION_CALL_TYPE_VARARG);
# endif
# if !(defined(WINDOWS) && defined(X64))
/* DR pretends to be Valgrind. */
dr_annotation_register_valgrind(DR_VG_ID__RUNNING_ON_VALGRIND,
valgrind_running_on_valgrind);
# endif
}
void
annotation_exit()
{
# if !(defined(WINDOWS) && defined(X64))
uint i;
for (i = 0; i < DR_VG_ID__LAST; i++) {
if (vg_handlers->handlers[i] != NULL)
free_annotation_handler(vg_handlers->handlers[i]);
}
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, vg_handlers, vg_handlers_t, ACCT_OTHER, UNPROTECTED);
# endif
strhash_hash_destroy(GLOBAL_DCONTEXT, handlers);
}
bool
instrument_annotation(dcontext_t *dcontext, IN OUT app_pc *start_pc,
OUT instr_t **substitution _IF_WINDOWS_X64(IN bool hint_is_safe))
{
annotation_layout_t layout = { 0 };
/* This instr_t is used for analytical decoding throughout the detection functions.
* It is passed on the stack and its contents are considered void on function entry.
*/
instr_t scratch;
# if defined(WINDOWS) && defined(X64)
app_pc hint_pc = *start_pc;
bool hint = true;
byte hint_byte;
# endif
/* We need to use the passed-in cxt for IR but we need a real one for TRY_EXCEPT. */
dcontext_t *my_dcontext;
if (dcontext == GLOBAL_DCONTEXT)
my_dcontext = get_thread_private_dcontext();
else
my_dcontext = dcontext;
# if defined(WINDOWS) && defined(X64)
if (hint_is_safe) {
hint_byte = *hint_pc;
} else {
if (!d_r_safe_read(hint_pc, 1, &hint_byte))
return false;
}
if (hint_byte != WINDOWS_X64_ANNOTATION_HINT_BYTE)
return false;
/* The hint is the first byte of the 2-byte instruction `int 2c`. Skip both bytes. */
layout.start_pc = hint_pc + INT_LENGTH;
layout.substitution_xl8 = layout.start_pc;
# else
layout.start_pc = *start_pc;
# endif
instr_init(dcontext, &scratch);
TRY_EXCEPT(my_dcontext, { identify_annotation(dcontext, &layout, &scratch); },
{ /* EXCEPT */
LOG(THREAD, LOG_ANNOTATIONS, 2,
"Failed to instrument annotation at " PFX "\n", *start_pc);
/* layout.type is already ANNOTATION_TYPE_NONE */
});
if (layout.type != ANNOTATION_TYPE_NONE) {
LOG(GLOBAL, LOG_ANNOTATIONS, 2,
"Decoded %s annotation %s. Next pc now " PFX ".\n",
(layout.type == ANNOTATION_TYPE_EXPRESSION) ? "expression" : "statement",
layout.name, layout.resume_pc);
/* Notify the caller where to resume decoding app instructions. */
*start_pc = layout.resume_pc;
if (layout.type == ANNOTATION_TYPE_EXPRESSION) {
dr_annotation_handler_t *handler;
TABLE_RWLOCK(handlers, write, lock);
handler = strhash_hash_lookup(GLOBAL_DCONTEXT, handlers, layout.name);
if (handler != NULL && handler->type == DR_ANNOTATION_HANDLER_CALL) {
/* Substitute the annotation with a label pointing to the handler. */
instr_t *call = INSTR_CREATE_label(dcontext);
dr_instr_label_data_t *label_data = instr_get_label_data_area(call);
SET_ANNOTATION_HANDLER(label_data, handler);
SET_ANNOTATION_APP_PC(label_data, layout.resume_pc);
instr_set_note(call, (void *)DR_NOTE_ANNOTATION);
instr_set_meta(call);
*substitution = call;
handler->is_void = layout.is_void;
if (!handler->is_void) {
/* Append `mov $0x0,%eax` to the annotation substitution, so that
* clients and tools recognize that %xax will be written here.
* The placeholder is "ok to mangle" because it (partially)
* implements the app's annotation. The placeholder will be
* removed post-client during mangling.
* We only support writing the return value and no other registers
* (otherwise we'd need drrg to further special-case
* DR_NOTE_ANNOTATION).
*/
instr_t *return_placeholder =
INSTR_XL8(INSTR_CREATE_mov_st(dcontext, opnd_create_reg(REG_XAX),
OPND_CREATE_INT32(0)),
layout.substitution_xl8);
instr_set_note(return_placeholder, (void *)DR_NOTE_ANNOTATION);
/* Append the placeholder manually, because the caller can call
* `instrlist_append()` with a "sublist" of instr_t.
*/
instr_set_next(*substitution, return_placeholder);
instr_set_prev(return_placeholder, *substitution);
}
} else { /* Substitute the annotation with `mov $return_value,%eax` */
void *return_value;
if (handler == NULL)
return_value = NULL; /* Return nothing if no handler is registered */
else
return_value = handler->receiver_list->instrumentation.return_value;
*substitution =
INSTR_XL8(INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX),
OPND_RETURN_VALUE(return_value)),
layout.substitution_xl8);
}
TABLE_RWLOCK(handlers, write, unlock);
}
/* else (layout.type == ANNOTATION_TYPE_STATEMENT), in which case the only
* instrumentation is to remove the jump-over-annotation such that the annotation
* function gets called like a normal function. Instrumentation of clean calls and
* return values will then occur within the annotation function (the case above).
*/
}
instr_free(dcontext, &scratch);
return (layout.type != ANNOTATION_TYPE_NONE);
}
# if !(defined(WINDOWS) && defined(X64))
void
instrument_valgrind_annotation(dcontext_t *dcontext, instrlist_t *bb, instr_t *xchg_instr,
app_pc xchg_pc, app_pc next_pc, uint bb_instr_count)
{
int i;
app_pc instrumentation_pc = NULL;
instr_t *return_placeholder;
instr_t *instr, *next_instr;
dr_instr_label_data_t *label_data;
LOG(THREAD, LOG_ANNOTATIONS, 2,
"Matched valgrind client request pattern at " PFX "\n",
instr_get_app_pc(xchg_instr));
/* We leave the argument gathering code as app instructions, because it writes to app
* registers (xref i#1423). Now delete the `xchg` instruction, and the `rol`
* instructions--unless a previous BB contains some of the `rol`, in which case they
* must be executed to avoid messing up %xdi (the 4 `rol` compose to form a nop).
* Note: in the case of a split `rol` sequence, we only instrument the second half.
*/
instr_destroy(dcontext, xchg_instr);
if (bb_instr_count > VG_ROL_COUNT) {
instr = instrlist_last(bb);
for (i = 0; i < VG_ROL_COUNT; i++) {
ASSERT(instr != NULL && instr_get_opcode(instr) == OP_rol);
next_instr = instr_get_prev(instr);
instrlist_remove(bb, instr);
instr_destroy(dcontext, instr);
instr = next_instr;
}
}
/* i#1613: if the client removes the app instruction prior to the annotation, we will
* skip the annotation instrumentation, so identify the pc of that instruction here.
*/
if (instrlist_last(bb) != NULL)
instrumentation_pc = instrlist_last(bb)->translation;
/* Substitute the annotation tail with a label pointing to the Valgrind handler. */
instr = INSTR_CREATE_label(dcontext);
instr_set_note(instr, (void *)DR_NOTE_ANNOTATION);
label_data = instr_get_label_data_area(instr);
SET_ANNOTATION_HANDLER(label_data, &vg_router);
SET_ANNOTATION_APP_PC(label_data, next_pc);
SET_ANNOTATION_INSTRUMENTATION_PC(label_data, instrumentation_pc);
instr_set_meta(instr);
instrlist_append(bb, instr);
/* Append `mov $0x0,%edx` so that clients and tools recognize that %xdx will be
* written here. The placeholder is "ok to mangle" because it (partially) implements
* the app's annotation. The placeholder will be removed post-client during mangling.
* We only support writing the return value and no other registers (otherwise
* we'd need drreg to further special-case DR_NOTE_ANNOTATION).
*/
return_placeholder = INSTR_XL8(
INSTR_CREATE_mov_st(dcontext, opnd_create_reg(REG_XDX), OPND_CREATE_INT32(0)),
xchg_pc);
instr_set_note(return_placeholder, (void *)DR_NOTE_ANNOTATION);
instrlist_append(bb, return_placeholder);
}
# endif
/*********************************************************
* ANNOTATION API FUNCTIONS
*/
bool
dr_annotation_register_call(const char *annotation_name, void *callee, bool save_fpstate,
uint num_args, dr_annotation_calling_convention_t call_type)
{
bool result = true;
dr_annotation_handler_t *handler;
dr_annotation_receiver_t *receiver;
TABLE_RWLOCK(handlers, write, lock);
handler = (dr_annotation_handler_t *)strhash_hash_lookup(GLOBAL_DCONTEXT, handlers,
annotation_name);
if (handler == NULL) { /* make a new handler if never registered yet */
handler = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, dr_annotation_handler_t, ACCT_OTHER,
UNPROTECTED);
memset(handler, 0, sizeof(dr_annotation_handler_t));
handler->type = DR_ANNOTATION_HANDLER_CALL;
handler->symbol_name = dr_strdup(annotation_name HEAPACCT(ACCT_OTHER));
handler->num_args = num_args;
if (num_args == 0) {
handler->args = NULL;
} else {
handler->args = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, opnd_t, num_args,
ACCT_OTHER, UNPROTECTED);
create_arg_opnds(handler, num_args, call_type);
}
strhash_hash_add(GLOBAL_DCONTEXT, handlers, handler->symbol_name, handler);
}
if (handler->type == DR_ANNOTATION_HANDLER_CALL || handler->receiver_list == NULL) {
/* If the annotation previously had a return value registration, it can be changed
* to clean call instrumentation, provided the return value was unregistered.
*/
handler->type = DR_ANNOTATION_HANDLER_CALL;
receiver = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, dr_annotation_receiver_t, ACCT_OTHER,
UNPROTECTED);
receiver->instrumentation.callback = callee;
receiver->save_fpstate = save_fpstate;
receiver->next = handler->receiver_list; /* push the new receiver onto the list */
handler->receiver_list = receiver;
} else {
result = false; /* A return value is registered, so no call can be added. */
}
TABLE_RWLOCK(handlers, write, unlock);
return result;
}
# if !(defined(WINDOWS) && defined(X64))
bool
dr_annotation_register_valgrind(
dr_valgrind_request_id_t request_id,
ptr_uint_t (*annotation_callback)(dr_vg_client_request_t *request))
{
dr_annotation_handler_t *handler;
dr_annotation_receiver_t *receiver;
if (request_id >= DR_VG_ID__LAST)
return false;
TABLE_RWLOCK(handlers, write, lock);
handler = vg_handlers->handlers[request_id];
if (handler == NULL) { /* make a new handler if never registered yet */
handler = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, dr_annotation_handler_t, ACCT_OTHER,
UNPROTECTED);
memset(handler, 0, sizeof(dr_annotation_handler_t));
handler->type = DR_ANNOTATION_HANDLER_VALGRIND;
vg_handlers->handlers[request_id] = handler;
}
receiver = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, dr_annotation_receiver_t, ACCT_OTHER,
UNPROTECTED);
receiver->instrumentation.vg_callback = annotation_callback;
receiver->save_fpstate = false;
receiver->next = handler->receiver_list; /* push the new receiver onto the list */
handler->receiver_list = receiver;
TABLE_RWLOCK(handlers, write, unlock);
return true;
}
# endif
bool
dr_annotation_register_return(const char *annotation_name, void *return_value)
{
bool result = true;
dr_annotation_handler_t *handler;
dr_annotation_receiver_t *receiver;
TABLE_RWLOCK(handlers, write, lock);
ASSERT_TABLE_SYNCHRONIZED(handlers, WRITE);
handler = (dr_annotation_handler_t *)strhash_hash_lookup(GLOBAL_DCONTEXT, handlers,
annotation_name);
if (handler == NULL) { /* make a new handler if never registered yet */
handler = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, dr_annotation_handler_t, ACCT_OTHER,
UNPROTECTED);
memset(handler, 0, sizeof(dr_annotation_handler_t));
handler->type = DR_ANNOTATION_HANDLER_RETURN_VALUE;
handler->symbol_name = dr_strdup(annotation_name HEAPACCT(ACCT_OTHER));
strhash_hash_add(GLOBAL_DCONTEXT, handlers, handler->symbol_name, handler);
}
if (handler->receiver_list == NULL) {
/* If the annotation previously had clean call registration, it can be changed to
* return value instrumentation, provided the calls have been unregistered.
*/
handler->type = DR_ANNOTATION_HANDLER_RETURN_VALUE;
receiver = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, dr_annotation_receiver_t, ACCT_OTHER,
UNPROTECTED);
receiver->instrumentation.return_value = return_value;
receiver->save_fpstate = false;
receiver->next = NULL; /* Return value can only have one implementation. */
handler->receiver_list = receiver;
} else {
result = false; /* Existing handler prevents the new return value. */
}
TABLE_RWLOCK(handlers, write, unlock);
return result;
}
bool
dr_annotation_pass_pc(const char *annotation_name)
{
bool result = true;
dr_annotation_handler_t *handler;
TABLE_RWLOCK(handlers, write, lock);
ASSERT_TABLE_SYNCHRONIZED(handlers, WRITE);
handler = (dr_annotation_handler_t *)strhash_hash_lookup(GLOBAL_DCONTEXT, handlers,
annotation_name);
if (handler == NULL) {
result = false;
} else {
handler->pass_pc_in_slot = true;
}
TABLE_RWLOCK(handlers, write, unlock);
return result;
}
bool
dr_annotation_unregister_call(const char *annotation_name, void *callee)
{
bool found = false;
dr_annotation_handler_t *handler;
TABLE_RWLOCK(handlers, write, lock);
handler = (dr_annotation_handler_t *)strhash_hash_lookup(GLOBAL_DCONTEXT, handlers,
annotation_name);
if (handler != NULL && handler->receiver_list != NULL) {
dr_annotation_receiver_t *receiver = handler->receiver_list;
if (receiver->instrumentation.callback == callee) { /* case 1: remove the head */
handler->receiver_list = receiver->next;
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, receiver, dr_annotation_receiver_t,
ACCT_OTHER, UNPROTECTED);
found = true;
} else { /* case 2: remove from within the list */
while (receiver->next != NULL) {
if (receiver->next->instrumentation.callback == callee) {
dr_annotation_receiver_t *removal = receiver->next;
receiver->next = removal->next;
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, removal, dr_annotation_receiver_t,
ACCT_OTHER, UNPROTECTED);
found = true;
break;
}
receiver = receiver->next;
}
} /* Leave the handler for the next registration (free it on exit) */
}
TABLE_RWLOCK(handlers, write, unlock);
return found;
}
bool
dr_annotation_unregister_return(const char *annotation_name)
{
bool found = false;
dr_annotation_handler_t *handler;
TABLE_RWLOCK(handlers, write, lock);
handler = (dr_annotation_handler_t *)strhash_hash_lookup(GLOBAL_DCONTEXT, handlers,
annotation_name);
if ((handler != NULL) && (handler->receiver_list != NULL)) {
ASSERT(handler->receiver_list->next == NULL);
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, handler->receiver_list, dr_annotation_receiver_t,
ACCT_OTHER, UNPROTECTED);
handler->receiver_list = NULL;
found = true;
} /* Leave the handler for the next registration (free it on exit) */
TABLE_RWLOCK(handlers, write, unlock);
return found;
}
# if !(defined(WINDOWS) && defined(X64))
bool
dr_annotation_unregister_valgrind(
dr_valgrind_request_id_t request_id,
ptr_uint_t (*annotation_callback)(dr_vg_client_request_t *request))
{
bool found = false;
dr_annotation_handler_t *handler;
TABLE_RWLOCK(handlers, write, lock);
handler = vg_handlers->handlers[request_id];
if ((handler != NULL) && (handler->receiver_list != NULL)) {
dr_annotation_receiver_t *receiver = handler->receiver_list;
if (receiver->instrumentation.vg_callback == annotation_callback) {
handler->receiver_list = receiver->next; /* case 1: remove the head */
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, receiver, dr_annotation_receiver_t,
ACCT_OTHER, UNPROTECTED);
found = true;
} else { /* case 2: remove from within the list */
while (receiver->next != NULL) {
if (receiver->next->instrumentation.vg_callback == annotation_callback) {
dr_annotation_receiver_t *removal = receiver->next;
receiver->next = removal->next;
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, removal, dr_annotation_receiver_t,
ACCT_OTHER, UNPROTECTED);
found = true;
break;
}
receiver = receiver->next;
}
} /* Leave the handler for the next registration (free it on exit) */
}
TABLE_RWLOCK(handlers, write, unlock);
return found;
}
/*********************************************************
* ANNOTATION IMPLEMENTATIONS
*/
static void
handle_vg_annotation(app_pc request_args)
{
dcontext_t *dcontext = get_thread_private_dcontext();
dr_valgrind_request_id_t request_id;
dr_annotation_receiver_t *receiver;
dr_vg_client_request_t request;
ptr_uint_t result;
if (!d_r_safe_read(request_args, sizeof(dr_vg_client_request_t), &request)) {
LOG(THREAD, LOG_ANNOTATIONS, 2,
"Failed to read Valgrind client request args at " PFX
". Skipping the annotation.\n",
request_args);
return;
}
result = request.default_result;
request_id = lookup_valgrind_request(request.request);
if (request_id < DR_VG_ID__LAST) {
TABLE_RWLOCK(handlers, read, lock);
if (vg_handlers->handlers[request_id] != NULL) {
receiver = vg_handlers->handlers[request_id]->receiver_list;
while (receiver != NULL) {
result = receiver->instrumentation.vg_callback(&request);
receiver = receiver->next;
}
}
TABLE_RWLOCK(handlers, read, unlock);
} else {
LOG(THREAD, LOG_ANNOTATIONS, 2,
"Skipping unrecognized Valgrind client request id %d\n", request.request);
return;
}
/* Put the result in %xdx where the target app expects to find it. */
if (dcontext->client_data->mcontext_in_dcontext) {
get_mcontext(dcontext)->xdx = result;
} else {
priv_mcontext_t *state = get_priv_mcontext_from_dstack(dcontext);
state->xdx = result;
}
}
static dr_valgrind_request_id_t
lookup_valgrind_request(ptr_uint_t request)
{
switch (request) {
case VG_USERREQ__RUNNING_ON_VALGRIND: return DR_VG_ID__RUNNING_ON_VALGRIND;
case VG_USERREQ__DO_LEAK_CHECK: return DR_VG_ID__DO_LEAK_CHECK;
case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
return DR_VG_ID__MAKE_MEM_DEFINED_IF_ADDRESSABLE;
case VG_USERREQ__DISCARD_TRANSLATIONS: return DR_VG_ID__DISCARD_TRANSLATIONS;
}
return DR_VG_ID__LAST;
}
static ptr_uint_t
valgrind_running_on_valgrind(dr_vg_client_request_t *request)
{
return 1; /* Pretend to be Valgrind. */
}
# endif
/*********************************************************
* INTERNAL ROUTINES
*/
/* If the app code at `*cur_pc` can be read as an encoded annotation label, then:
* (1) advance `*cur_pc` beyond the last label instruction,
* (2) point `**name` to the start of the label within the app image data section, and
* (3) return true.
* If there is no annotation label at `*cur_pc`, or failure occurs while trying to read
* it, then return false with undefined values for `*cur_pc` and `**name`.
*
* On Unix and Windows x86, the label has the form
* "<annotation-label>:<return-type>:<annotation-name>, e.g.
*
* "dynamorio-annotation:void:dynamorio_annotate_log"
* "dynamorio-annotation:const char *:some_custom_client_annotation"
*
* On Windows x64, the label has an additional token for the annotation type:
* "<annotation-label>:<annotation-type>:<return-type>:<annotation-name>, e.g.
*
* "dynamorio-annotation:statement:void:dynamorio_annotate_log"
* "dynamorio-annotation:expression:const char *:some_custom_client_annotation"
*
* The encoding of the pointer to the label varies by platform:
*
* Unix expression annotation label (two instructions form <GOT-base> + <GOT-offset>):
*
* mov $<GOT-base>,%xax
* bsr $<GOT-offset>,%xax
*
* Windows x86 expression annotation label (direct pointer to the <label>):
*
* mov $<label>,%eax
*
* Windows x64 expression annotation label (same, in 2 variations):
*
* mov $<label>,%rax Or prefetch $<label>
* prefetch %rax
*
* Decoding the label pointer proceeds as follows:
*
* (step 1) check `*cur_pc` for the opcode of the first label-encoding instruction
* (step 2) check the operand of that instruction for a label-encoding operand type
* Unix only--dereference the GOT entry:
* (step 3) check the next instruction for the label offset opcode (bsr or bsf)
* (step 4) check its operand for the offset-encoding operand type (base disp)
* (step 5) add the two operands and dereference as the label's GOT entry
* (step 6) attempt to read the decoded pointer and compare to "dynamorio-annotation"
* (note there is a special case for Windows x64, which is not inline asm)
* (step 7) if it matches, point `**name` to the character beyond the separator ':'
*
* See https://dynamorio.org/page_annotations.html for complete examples.
*/
static inline bool
is_annotation_tag(dcontext_t *dcontext, IN OUT app_pc *cur_pc, instr_t *scratch,
OUT const char **name)
{
app_pc start_pc = *cur_pc;
instr_reset(dcontext, scratch);
*cur_pc = decode(dcontext, *cur_pc, scratch);
if (IS_ANNOTATION_LABEL_INSTR(scratch)) { /* step 1 */
opnd_t src = instr_get_src(scratch, 0);
if (IS_ANNOTATION_LABEL_REFERENCE(src)) { /* step 2 */
char buf[DYNAMORIO_ANNOTATION_LABEL_LENGTH + 1 /*nul*/];
app_pc buf_ptr;
app_pc opnd_ptr = GET_ANNOTATION_LABEL_REFERENCE(src, start_pc);
# ifdef UNIX
app_pc got_ptr;
instr_reset(dcontext, scratch);
*cur_pc = decode(dcontext, *cur_pc, scratch);
if (!IS_ANNOTATION_LABEL_GOT_OFFSET_INSTR(scratch)) /* step 3 */
return false;
src = instr_get_src(scratch, 0);
if (!IS_ANNOTATION_LABEL_GOT_OFFSET_REFERENCE(src)) /* step 4 */
return false;
opnd_ptr += opnd_get_disp(src); /* step 5 */
if (!d_r_safe_read(opnd_ptr, sizeof(app_pc), &got_ptr))
return false;
opnd_ptr = got_ptr;
# endif
# if defined(WINDOWS) && defined(X64)
/* In Windows x64, if the `prefetch` instruction was found at
* `*cur_pc` with no intervening `mov` instruction, the label
* pointer must be an immediate operand to that `prefetch`.
*/
if (instr_get_opcode(scratch) == OP_prefetchw) { /* step 6 */
if (!d_r_safe_read(opnd_ptr, DYNAMORIO_ANNOTATION_LABEL_LENGTH, buf))
return false;
buf[DYNAMORIO_ANNOTATION_LABEL_LENGTH] = '\0';
if (strcmp(buf, DYNAMORIO_ANNOTATION_LABEL) == 0) {
*name = (const char *)(opnd_ptr + /* step 7 */
DYNAMORIO_ANNOTATION_LABEL_LENGTH +
1); /* skip the separator ':' */
return true;
}
} /* else the label pointer is the usual `mov` operand: */
# endif
if (!d_r_safe_read(opnd_ptr, sizeof(app_pc), &buf_ptr)) /* step 6 */
return false;
if (!d_r_safe_read(buf_ptr, DYNAMORIO_ANNOTATION_LABEL_LENGTH, buf))
return false;
buf[DYNAMORIO_ANNOTATION_LABEL_LENGTH] = '\0';
if (strcmp(buf, DYNAMORIO_ANNOTATION_LABEL) != 0)
return false;
*name = (const char *)(buf_ptr + /* step 7 */
DYNAMORIO_ANNOTATION_LABEL_LENGTH +
1); /* skip the separator ':' */
return true;
}
}
return false;
}
# if defined(WINDOWS) && defined(X64)
/* Identify the annotation at layout->start_pc, if any. On Windows x64, some flexibility
* is required to recognize the annotation sequence because it is compiled instead of
* explicitly inserted with inline asm (which is unavailable in MSVC for this platform).
* (step 1) check if the instruction at layout->start_pc encodes an annotation label.
* (step 2) decode arbitrary instructions up to a `prefetch`,
* following any direct branches decoded along the way.
* (step 3) skip the `int 3` following the `prefetch`.
* (step 4) set the substitution_xl8 to precede the resume_pc, so we'll resume in the
* right place if the client removes all instrs following the substitution.
* (step 5) set the resume pc, so execution resumes within the annotation body.
* (step 6) compare the next label token to determine the annotation type.
* Expression annotations only:
* (step 7) compare the return type to determine whether the annotation is void.
* (step 8) advance the label pointer to the name token
* (note that the Statement annotation concludes with a special case).
*/
static inline void
identify_annotation(dcontext_t *dcontext, IN OUT annotation_layout_t *layout,
instr_t *scratch)
{
app_pc cur_pc = layout->start_pc, last_call = NULL;
if (!is_annotation_tag(dcontext, &cur_pc, scratch, &layout->name)) /* step 1 */
return;
while (instr_get_opcode(scratch) != OP_prefetchw) { /* step 2 */
if (instr_is_ubr(scratch))
cur_pc = instr_get_branch_target_pc(scratch);
instr_reset(dcontext, scratch);
cur_pc = decode(dcontext, cur_pc, scratch);
}
ASSERT(*cur_pc == WINDOWS_X64_OPTIMIZATION_FENCE); /* step 3 */
layout->substitution_xl8 = cur_pc; /* step 4 */
cur_pc++;
layout->resume_pc = cur_pc; /* step 5 */
if (IS_ANNOTATION_STATEMENT_LABEL(layout->name)) { /* step 6 */
layout->type = ANNOTATION_TYPE_STATEMENT;
layout->name += (ANNOTATION_STATEMENT_LABEL_LENGTH + 1);
layout->name = strchr(layout->name, ':') + 1; /* step 8 */
/* If the target app contains an annotation whose argument is a function call
* that gets inlined, and that function contains the same annotation, the
* compiler will fuse the headers. See
* https://dynamorio.org/page_annotations.html for a sample of
* fused headers. This loop identifies and skips any fused headers.
*/
while (true) {
instr_reset(dcontext, scratch);
cur_pc = decode(dcontext, cur_pc, scratch);
if (IS_ANNOTATION_HEADER(scratch, cur_pc)) {
cur_pc += INT_LENGTH;
while (instr_get_opcode(scratch) != OP_prefetchw) {
if (instr_is_ubr(scratch))
cur_pc = instr_get_branch_target_pc(scratch);
instr_reset(dcontext, scratch);
cur_pc = decode(dcontext, cur_pc, scratch);
}
ASSERT(*cur_pc == WINDOWS_X64_OPTIMIZATION_FENCE);
cur_pc++;
layout->resume_pc = cur_pc;
} else if (instr_is_cti(scratch))
break;
}
} else {
layout->type = ANNOTATION_TYPE_EXPRESSION;
layout->name += (ANNOTATION_EXPRESSION_LABEL_LENGTH + 1);
layout->is_void = IS_ANNOTATION_VOID(layout->name); /* step 7 */
layout->name = strchr(layout->name, ':') + 1; /* step 8 */
}
}
# else /* Windows x86 and all Unix */
/* Identify the annotation at layout->start_pc, if any. In summary:
* (step 1) check if the instruction at layout->start_pc encodes an annotation label.
* (step 2) determine the annotation type based on instruction opcodes
* Expression annotations only:
* (step 3) compare the return type to determine whether the annotation is void.
* (step 4) adjust the substitution xl8 to the current pc.
* (step 5) decode past the jump over the annotation body.
* (step 6) set the resume pc to the current instruction, which is a jump over the
* native version of the annotation body (specified by the target app).
* (step 7) advance the label pointer to the name token.
*/
static inline void
identify_annotation(dcontext_t *dcontext, IN OUT annotation_layout_t *layout,
instr_t *scratch)
{
app_pc cur_pc = layout->start_pc;
if (is_annotation_tag(dcontext, &cur_pc, scratch, &layout->name)) { /* step 1 */
# ifdef WINDOWS
if (*(cur_pc++) == RAW_OPCODE_pop_eax) { /* step 2 */
# else
if (instr_get_opcode(scratch) == OP_bsf) { /* step 2 */
# endif
layout->type = ANNOTATION_TYPE_STATEMENT;
} else {
layout->type = ANNOTATION_TYPE_EXPRESSION;
layout->is_void = IS_ANNOTATION_VOID(layout->name); /* step 3 */
}
layout->substitution_xl8 = cur_pc; /* step 4 */
instr_reset(dcontext, scratch);
cur_pc = decode_cti(dcontext, cur_pc, scratch); /* step 5 */
ASSERT(instr_is_ubr(scratch));
layout->resume_pc = cur_pc; /* step 6 */
layout->name = strchr(layout->name, ':') + 1; /* step 7 */
}
}
# endif
# ifdef X64
# ifdef UNIX
static inline void /* UNIX x64 */
create_arg_opnds(dr_annotation_handler_t *handler, uint num_args,
dr_annotation_calling_convention_t call_type)
{
uint i, arg_stack_location;
ASSERT(call_type == DR_ANNOTATION_CALL_TYPE_FASTCALL); /* architecture constraint */
switch (num_args) { /* Create up to six register args */
default:
case 6: handler->args[5] = opnd_create_reg(DR_REG_R9);
case 5: handler->args[4] = opnd_create_reg(DR_REG_R8);
case 4: handler->args[3] = opnd_create_reg(DR_REG_XCX);
case 3: handler->args[2] = opnd_create_reg(DR_REG_XDX);
case 2: handler->args[1] = opnd_create_reg(DR_REG_XSI);
case 1: handler->args[0] = opnd_create_reg(DR_REG_XDI);
}
/* Create the remaining args on the stack */
for (i = FASTCALL_REGISTER_ARG_COUNT; i < num_args; i++) {
/* The clean call will appear at the top of the annotation function body, where
* the stack arguments follow the return address and the caller's saved stack
* pointer. Rewind `i` to 0 and add 2 to skip over these pointers.
*/
arg_stack_location = sizeof(ptr_uint_t) * (i - FASTCALL_REGISTER_ARG_COUNT + 2);
/* Use the stack pointer because the base pointer is a general register in x64 */
handler->args[i] = OPND_CREATE_MEMPTR(DR_REG_XSP, arg_stack_location);
}
}
# else /* WINDOWS x64 */
static inline void
create_arg_opnds(dr_annotation_handler_t *handler, uint num_args,
dr_annotation_calling_convention_t call_type)
{
uint i, arg_stack_location;
ASSERT(call_type == DR_ANNOTATION_CALL_TYPE_FASTCALL); /* architecture constraint */
switch (num_args) { /* Create up to four register args */
default:
case 4: handler->args[3] = opnd_create_reg(DR_REG_R9);
case 3: handler->args[2] = opnd_create_reg(DR_REG_R8);
case 2: handler->args[1] = opnd_create_reg(DR_REG_XDX);
case 1: handler->args[0] = opnd_create_reg(DR_REG_XCX);
}
/* Create the remaining args on the stack */
for (i = FASTCALL_REGISTER_ARG_COUNT; i < num_args; i++) {
/* The clean call will appear at the top of the annotation function body, where
* the stack arguments follow the return address and 32 bytes of empty space.
* Since `i` is already starting at 4, just add one more to reach the args.
*/
arg_stack_location = sizeof(ptr_uint_t) * (i + 1);
/* Use the stack pointer because the base pointer is a general register in x64 */
handler->args[i] = OPND_CREATE_MEMPTR(DR_REG_XSP, arg_stack_location);
}
}
# endif
# else /* x86 (all) */
static inline void
create_arg_opnds(dr_annotation_handler_t *handler, uint num_args,
dr_annotation_calling_convention_t call_type)
{
uint i, arg_stack_location;
if (call_type == DR_ANNOTATION_CALL_TYPE_FASTCALL) {
switch (num_args) { /* Create 1 or 2 register args */
default:
case 2: handler->args[1] = opnd_create_reg(DR_REG_XDX);
case 1: handler->args[0] = opnd_create_reg(DR_REG_XCX);
}
/* Create the remaining args on the stack */
for (i = FASTCALL_REGISTER_ARG_COUNT; i < num_args; i++) {
/* The clean call will appear at the top of the annotation function body,
* where the stack args follow the return address and the caller's saved base
* pointer. Since `i` already starts at 2, use it to skip those pointers.
*/
arg_stack_location = sizeof(ptr_uint_t) * i;
handler->args[i] = OPND_CREATE_MEMPTR(DR_REG_XBP, arg_stack_location);
}
} else { /* DR_ANNOTATION_CALL_TYPE_STDCALL: Create all args on the stack */
for (i = 0; i < num_args; i++) {
/* The clean call will appear at the top of the annotation function body,
* where the stack args follow the return address and the caller's saved base
* pointer. Since `i` starts at 0, add 2 to skip those pointers.
*/
arg_stack_location = sizeof(ptr_uint_t) * (i + 2);
handler->args[i] = OPND_CREATE_MEMPTR(DR_REG_XBP, arg_stack_location);
}
}
}
# endif
# ifdef DEBUG
static ssize_t
annotation_printf(const char *format, ...)
{
va_list ap;
ssize_t count;
const char *timestamp_token_start;
char *timestamped_format = NULL;
uint buffer_length = 0;
if (d_r_stats == NULL || d_r_stats->loglevel == 0)
return 0; /* No log is available for writing. */
if ((d_r_stats->logmask & LOG_VIA_ANNOTATIONS) == 0)
return 0; /* Filtered out by the user. */
/* Substitute the first instance of the timestamp token with a timestamp string.
* Additional timestamp tokens will be ignored, because it would be pointless.
*/
timestamp_token_start = strstr(format, LOG_ANNOTATION_TIMESTAMP_TOKEN);
if (timestamp_token_start != NULL) {
char timestamp_buffer[PRINT_TIMESTAMP_MAX_LENGTH];
buffer_length = (uint)(strlen(format) + PRINT_TIMESTAMP_MAX_LENGTH);
if (print_timestamp_to_buffer(timestamp_buffer, PRINT_TIMESTAMP_MAX_LENGTH) > 0) {
uint length_before_token =
(uint)((ptr_uint_t)timestamp_token_start - (ptr_uint_t)format);
/* print the timestamped format string into this heap buffer */
timestamped_format = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, char, buffer_length,
ACCT_OTHER, UNPROTECTED);
/* copy the original format string up to the timestamp token */
d_r_snprintf(timestamped_format, length_before_token, "%s", format);
/* copy the timestamp and the remainder of the original format string */
d_r_snprintf(timestamped_format + length_before_token,
(buffer_length - length_before_token), "%s%s", timestamp_buffer,
timestamp_token_start + LOG_ANNOTATION_TIMESTAMP_TOKEN_LENGTH);
/* use the timestamped format string */
format = (const char *)timestamped_format;
} else {
LOG(GLOBAL, LOG_ANNOTATIONS, 2,
"Failed to obtain a system timestamp for "
"substitution in annotation log statements '%s'\n",
format);
}
}
va_start(ap, format);
count = do_file_write(GLOBAL, format, ap);
va_end(ap);
if (timestamped_format != NULL) { /* free the timestamp heap buffer, if any */
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, format, char, buffer_length, ACCT_OTHER,
UNPROTECTED);
}
return count;
}
# endif
static void
free_annotation_handler(void *p)
{
dr_annotation_handler_t *handler = (dr_annotation_handler_t *)p;
dr_annotation_receiver_t *next, *receiver = handler->receiver_list;
while (receiver != NULL) {
next = receiver->next;
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, receiver, dr_annotation_receiver_t, ACCT_OTHER,
UNPROTECTED);
receiver = next;
}
if (handler->num_args > 0) {
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, handler->args, opnd_t, handler->num_args,
ACCT_OTHER, UNPROTECTED);
}
if ((handler->type == DR_ANNOTATION_HANDLER_CALL) ||
(handler->type == DR_ANNOTATION_HANDLER_RETURN_VALUE))
dr_strfree(handler->symbol_name HEAPACCT(ACCT_OTHER));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, p, dr_annotation_handler_t, ACCT_OTHER, UNPROTECTED);
}
#endif /* ANNOTATIONS */
| 1 | 25,873 | Update year range in Copyright notice, and elsewhere too. | DynamoRIO-dynamorio | c |
@@ -179,10 +179,10 @@ type ACMEIssuerDNS01ProviderCloudflare struct {
// ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53
// configuration for AWS
type ACMEIssuerDNS01ProviderRoute53 struct {
- AccessKeyID string `json:"accessKeyID"`
- SecretAccessKey SecretKeySelector `json:"secretAccessKeySecretRef"`
- HostedZoneID string `json:"hostedZoneID"`
- Region string `json:"region"`
+ AccessKeyIDRef SecretKeySelector `json:"accessKeyIDSecretRef"`
+ SecretAccessKeyRef SecretKeySelector `json:"secretAccessKeySecretRef"`
+ HostedZoneID string `json:"hostedZoneID"`
+ Region string `json:"region"`
}
// ACMEIssuerDNS01ProviderAzureDNS is a structure containing the | 1 | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
AltNamesAnnotationKey = "certmanager.k8s.io/alt-names"
CommonNameAnnotationKey = "certmanager.k8s.io/common-name"
IssuerNameAnnotationKey = "certmanager.k8s.io/issuer-name"
IssuerKindAnnotationKey = "certmanager.k8s.io/issuer-kind"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +resource:path=clusterissuers
type ClusterIssuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterIssuerList is a list of Issuers
type ClusterIssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ClusterIssuer `json:"items"`
}
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +resource:path=issuers
type Issuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IssuerList is a list of Issuers
type IssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Issuer `json:"items"`
}
// IssuerSpec is the specification of an Issuer. This includes any
// configuration required for the issuer.
type IssuerSpec struct {
IssuerConfig `json:",inline"`
}
type IssuerConfig struct {
ACME *ACMEIssuer `json:"acme,omitempty"`
CA *CAIssuer `json:"ca,omitempty"`
Vault *VaultIssuer `json:"vault,omitempty"`
}
type VaultIssuer struct {
// Vault authentication
Auth VaultAuth `json:"auth"`
// Server is the vault connection address
Server string `json:"server"`
// Vault URL path to the certificate role
Path string `json:"path"`
}
// Vault authentication can be configured:
// - With a secret containing a token. Cert-manager is using this token as-is.
// - With a secret containing a AppRole. This AppRole is used to authenticate to
// Vault and retrieve a token.
type VaultAuth struct {
// This Secret contains the Vault token key
TokenSecretRef SecretKeySelector `json:"tokenSecretRef,omitempty"`
// This Secret contains a AppRole and Secret
AppRole VaultAppRole `json:"appRole,omitempty"`
}
type VaultAppRole struct {
RoleId string `json:"roleId"`
SecretRef SecretKeySelector `json:"secretRef"`
}
type CAIssuer struct {
// SecretName is the name of the secret used to sign Certificates issued
// by this Issuer.
SecretName string `json:"secretName"`
}
// ACMEIssuer contains the specification for an ACME issuer
type ACMEIssuer struct {
// Email is the email for this account
Email string `json:"email"`
// Server is the ACME server URL
Server string `json:"server"`
// If true, skip verifying the ACME server TLS certificate
SkipTLSVerify bool `json:"skipTLSVerify,omitempty"`
// PrivateKey is the name of a secret containing the private key for this
// user account.
PrivateKey SecretKeySelector `json:"privateKeySecretRef"`
// HTTP01 config
HTTP01 *ACMEIssuerHTTP01Config `json:"http01,omitempty"`
// DNS-01 config
DNS01 *ACMEIssuerDNS01Config `json:"dns01,omitempty"`
}
type ACMEIssuerHTTP01Config struct {
}
// ACMEIssuerDNS01Config is a structure containing the ACME DNS configuration
// options
type ACMEIssuerDNS01Config struct {
Providers []ACMEIssuerDNS01Provider `json:"providers"`
}
type ACMEIssuerDNS01Provider struct {
Name string `json:"name"`
Akamai *ACMEIssuerDNS01ProviderAkamai `json:"akamai,omitempty"`
CloudDNS *ACMEIssuerDNS01ProviderCloudDNS `json:"clouddns,omitempty"`
Cloudflare *ACMEIssuerDNS01ProviderCloudflare `json:"cloudflare,omitempty"`
Route53 *ACMEIssuerDNS01ProviderRoute53 `json:"route53,omitempty"`
AzureDNS *ACMEIssuerDNS01ProviderAzureDNS `json:"azuredns,omitempty"`
}
// ACMEIssuerDNS01ProviderAkamai is a structure containing the DNS
// configuration for Akamai DNS—Zone Record Management API
type ACMEIssuerDNS01ProviderAkamai struct {
ServiceConsumerDomain string `json:"serviceConsumerDomain"`
ClientToken SecretKeySelector `json:"clientTokenSecretRef"`
ClientSecret SecretKeySelector `json:"clientSecretSecretRef"`
AccessToken SecretKeySelector `json:"accessTokenSecretRef"`
}
// ACMEIssuerDNS01ProviderCloudDNS is a structure containing the DNS
// configuration for Google Cloud DNS
type ACMEIssuerDNS01ProviderCloudDNS struct {
ServiceAccount SecretKeySelector `json:"serviceAccountSecretRef"`
Project string `json:"project"`
}
// ACMEIssuerDNS01ProviderCloudflare is a structure containing the DNS
// configuration for Cloudflare
type ACMEIssuerDNS01ProviderCloudflare struct {
Email string `json:"email"`
APIKey SecretKeySelector `json:"apiKeySecretRef"`
}
// ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53
// configuration for AWS
type ACMEIssuerDNS01ProviderRoute53 struct {
AccessKeyID string `json:"accessKeyID"`
SecretAccessKey SecretKeySelector `json:"secretAccessKeySecretRef"`
HostedZoneID string `json:"hostedZoneID"`
Region string `json:"region"`
}
// ACMEIssuerDNS01ProviderAzureDNS is a structure containing the
// configuration for Azure DNS
type ACMEIssuerDNS01ProviderAzureDNS struct {
ClientID string `json:"clientID"`
ClientSecret SecretKeySelector `json:"clientSecretSecretRef"`
SubscriptionID string `json:"subscriptionID"`
TenantID string `json:"tenantID"`
ResourceGroupName string `json:"resourceGroupName"`
// + optional
HostedZoneName string `json:"hostedZoneName"`
}
// IssuerStatus contains status information about an Issuer
type IssuerStatus struct {
Conditions []IssuerCondition `json:"conditions"`
ACME *ACMEIssuerStatus `json:"acme,omitempty"`
}
// IssuerCondition contains condition information for an Issuer.
type IssuerCondition struct {
// Type of the condition, currently ('Ready').
Type IssuerConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
Reason string `json:"reason"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
Message string `json:"message"`
}
// IssuerConditionType represents an Issuer condition value.
type IssuerConditionType string
const (
// IssuerConditionReady represents the fact that a given Issuer condition
// is in ready state.
IssuerConditionReady IssuerConditionType = "Ready"
)
// ConditionStatus represents a condition's status.
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in
// the condition; "ConditionFalse" means a resource is not in the condition;
// "ConditionUnknown" means kubernetes can't decide if a resource is in the
// condition or not. In the future, we could add other intermediate
// conditions, e.g. ConditionDegraded.
const (
// ConditionTrue represents the fact that a given condition is true
ConditionTrue ConditionStatus = "True"
// ConditionFalse represents the fact that a given condition is false
ConditionFalse ConditionStatus = "False"
// ConditionUnknown represents the fact that a given condition is unknown
ConditionUnknown ConditionStatus = "Unknown"
)
type ACMEIssuerStatus struct {
// URI is the unique account identifier, which can also be used to retrieve
// account details from the CA
URI string `json:"uri"`
}
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +resource:path=certificates
// Certificate is a type to represent a Certificate from ACME
type Certificate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CertificateSpec `json:"spec,omitempty"`
Status CertificateStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CertificateList is a list of Certificates
type CertificateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Certificate `json:"items"`
}
// CertificateSpec defines the desired state of Certificate
type CertificateSpec struct {
// CommonName is a common name to be used on the Certificate
CommonName string `json:"commonName"`
// DNSNames is a list of subject alt names to be used on the Certificate
DNSNames []string `json:"dnsNames"`
// SecretName is the name of the secret resource to store this secret in
SecretName string `json:"secretName"`
// IssuerRef is a reference to the issuer for this certificate. If the
// namespace field is not set, it is assumed to be in the same namespace
// as the certificate. If the namespace field is set to the empty value "",
// a ClusterIssuer of the given name will be used. Any other value is
// invalid.
IssuerRef ObjectReference `json:"issuerRef"`
ACME *ACMECertificateConfig `json:"acme,omitempty"`
}
// ACMEConfig contains the configuration for the ACME certificate provider
type ACMECertificateConfig struct {
Config []ACMECertificateDomainConfig `json:"config"`
}
type ACMECertificateDomainConfig struct {
Domains []string `json:"domains"`
ACMESolverConfig `json:",inline"`
}
type ACMESolverConfig struct {
HTTP01 *ACMECertificateHTTP01Config `json:"http01,omitempty"`
DNS01 *ACMECertificateDNS01Config `json:"dns01,omitempty"`
}
type ACMECertificateHTTP01Config struct {
Ingress string `json:"ingress"`
IngressClass *string `json:"ingressClass,omitempty"`
}
type ACMECertificateDNS01Config struct {
Provider string `json:"provider"`
}
// CertificateStatus defines the observed state of Certificate
type CertificateStatus struct {
Conditions []CertificateCondition `json:"conditions,omitempty"`
ACME *CertificateACMEStatus `json:"acme,omitempty"`
}
// CertificateCondition contains condition information for an Certificate.
type CertificateCondition struct {
// Type of the condition, currently ('Ready').
Type CertificateConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
Reason string `json:"reason"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
Message string `json:"message"`
}
// CertificateConditionType represents an Certificate condition value.
type CertificateConditionType string
const (
// CertificateConditionReady represents the fact that a given Certificate condition
// is in ready state.
CertificateConditionReady CertificateConditionType = "Ready"
// CertificateConditionValidationFailed is used to indicate whether a
// validation for a Certificate has failed.
// This is currently used by the ACME issuer to track when the last
// validation was attempted.
CertificateConditionValidationFailed CertificateConditionType = "ValidateFailed"
)
// CertificateACMEStatus holds the status for an ACME issuer
type CertificateACMEStatus struct {
// Order contains details about the current in-progress ACME Order.
Order ACMEOrderStatus `json:"order,omitempty"`
}
type ACMEOrderStatus struct {
// The URL that can be used to get information about the ACME order.
URL string `json:"url"`
Challenges []ACMEOrderChallenge `json:"challenges,omitempty"`
}
type ACMEOrderChallenge struct {
// The URL that can be used to get information about the ACME challenge.
URL string `json:"url"`
// The URL that can be used to get information about the ACME authorization
// associated with the challenge.
AuthzURL string `json:"authzURL"`
// Type of ACME challenge
// Either http-01 or dns-01
Type string `json:"type"`
// Domain this challenge corresponds to
Domain string `json:"domain"`
// Challenge token for this challenge
Token string `json:"token"`
// Challenge key for this challenge
Key string `json:"key"`
// Set to true if this challenge is for a wildcard domain
Wildcard bool `json:"wildcard"`
// Configuration used to present this challenge
ACMESolverConfig `json:",inline"`
}
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
Name string `json:"name,omitempty"`
}
// ObjectReference is a reference to an object. If the namespace field is set,
// it is assumed to be in a namespace
type ObjectReference struct {
Name string `json:"name"`
Kind string `json:"kind,omitempty"`
}
const (
ClusterIssuerKind = "ClusterIssuer"
IssuerKind = "Issuer"
)
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
}
| 1 | 11,053 | Can we update this PR to not be a breaking change? i.e. if a user specifies `accessKeyID`, it is still used. But if a user specifies `accessKeyIDSecretRef`, it takes precedence? | jetstack-cert-manager | go |
@@ -36,7 +36,7 @@ class UsersController < ApplicationController
def save
@title = t "users.new.title"
- if params[:decline]
+ if params[:decline] || !params[:read_tou] || params[:read_tou] == "0"
if current_user
current_user.terms_seen = true
| 1 | class UsersController < ApplicationController
layout "site"
skip_before_action :verify_authenticity_token, :only => [:auth_success]
before_action :disable_terms_redirect, :only => [:terms, :save, :logout]
before_action :authorize_web
before_action :set_locale
before_action :check_database_readable
authorize_resource
before_action :require_self, :only => [:account]
before_action :check_database_writable, :only => [:new, :account, :confirm, :confirm_email, :lost_password, :reset_password, :go_public, :make_friend, :remove_friend]
before_action :require_cookies, :only => [:new, :login, :confirm]
before_action :lookup_user_by_name, :only => [:set_status, :delete]
before_action :allow_thirdparty_images, :only => [:show, :account]
def terms
@legale = params[:legale] || OSM.ip_to_country(request.remote_ip) || Settings.default_legale
@text = OSM.legal_text_for_country(@legale)
if request.xhr?
render :partial => "terms"
else
@title = t "users.terms.title"
if current_user&.terms_agreed?
# Already agreed to terms, so just show settings
redirect_to :action => :account, :display_name => current_user.display_name
elsif current_user.nil? && session[:new_user].nil?
redirect_to :action => :login, :referer => request.fullpath
end
end
end
def save
@title = t "users.new.title"
if params[:decline]
if current_user
current_user.terms_seen = true
flash[:notice] = t("users.new.terms declined", :url => t("users.new.terms declined url")).html_safe if current_user.save
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => :account, :display_name => current_user.display_name
end
else
redirect_to t("users.terms.declined")
end
elsif current_user
unless current_user.terms_agreed?
current_user.consider_pd = params[:user][:consider_pd]
current_user.terms_agreed = Time.now.getutc
current_user.terms_seen = true
flash[:notice] = t "users.new.terms accepted" if current_user.save
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => :account, :display_name => current_user.display_name
end
else
self.current_user = session.delete(:new_user)
if check_signup_allowed(current_user.email)
current_user.data_public = true
current_user.description = "" if current_user.description.nil?
current_user.creation_ip = request.remote_ip
current_user.languages = http_accept_language.user_preferred_languages
current_user.terms_agreed = Time.now.getutc
current_user.terms_seen = true
if current_user.auth_uid.blank?
current_user.auth_provider = nil
current_user.auth_uid = nil
end
if current_user.save
flash[:piwik_goal] = PIWIK["goals"]["signup"] if defined?(PIWIK)
referer = welcome_path
begin
uri = URI(session[:referer])
%r{map=(.*)/(.*)/(.*)}.match(uri.fragment) do |m|
editor = Rack::Utils.parse_query(uri.query).slice("editor")
referer = welcome_path({ "zoom" => m[1],
"lat" => m[2],
"lon" => m[3] }.merge(editor))
end
rescue StandardError
# Use default
end
if current_user.status == "active"
session[:referer] = referer
successful_login(current_user)
else
session[:token] = current_user.tokens.create.token
Notifier.signup_confirm(current_user, current_user.tokens.create(:referer => referer)).deliver_later
redirect_to :action => "confirm", :display_name => current_user.display_name
end
else
render :action => "new", :referer => params[:referer]
end
end
end
end
def account
@tokens = current_user.oauth_tokens.authorized
if params[:user] && params[:user][:display_name] && params[:user][:description]
if params[:user][:auth_provider].blank? ||
(params[:user][:auth_provider] == current_user.auth_provider &&
params[:user][:auth_uid] == current_user.auth_uid)
update_user(current_user, params)
else
session[:new_user_settings] = params
redirect_to auth_url(params[:user][:auth_provider], params[:user][:auth_uid])
end
elsif errors = session.delete(:user_errors)
errors.each do |attribute, error|
current_user.errors.add(attribute, error)
end
end
@title = t "users.account.title"
end
def go_public
current_user.data_public = true
current_user.save
flash[:notice] = t "users.go_public.flash success"
redirect_to :action => "account", :display_name => current_user.display_name
end
def lost_password
@title = t "users.lost_password.title"
if params[:user] && params[:user][:email]
user = User.visible.find_by(:email => params[:user][:email])
if user.nil?
users = User.visible.where("LOWER(email) = LOWER(?)", params[:user][:email])
user = users.first if users.count == 1
end
if user
token = user.tokens.create
Notifier.lost_password(user, token).deliver_later
flash[:notice] = t "users.lost_password.notice email on way"
redirect_to :action => "login"
else
flash.now[:error] = t "users.lost_password.notice email cannot find"
end
end
end
def reset_password
@title = t "users.reset_password.title"
if params[:token]
token = UserToken.find_by(:token => params[:token])
if token
self.current_user = token.user
if params[:user]
current_user.pass_crypt = params[:user][:pass_crypt]
current_user.pass_crypt_confirmation = params[:user][:pass_crypt_confirmation]
current_user.status = "active" if current_user.status == "pending"
current_user.email_valid = true
if current_user.save
token.destroy
flash[:notice] = t "users.reset_password.flash changed"
successful_login(current_user)
end
end
else
flash[:error] = t "users.reset_password.flash token bad"
redirect_to :action => "lost_password"
end
else
head :bad_request
end
end
def new
@title = t "users.new.title"
@referer = params[:referer] || session[:referer]
append_content_security_policy_directives(
:form_action => %w[accounts.google.com *.facebook.com login.live.com github.com meta.wikimedia.org]
)
if current_user
# The user is logged in already, so don't show them the signup
# page, instead send them to the home page
if @referer
redirect_to @referer
else
redirect_to :controller => "site", :action => "index"
end
elsif params.key?(:auth_provider) && params.key?(:auth_uid)
self.current_user = User.new(:email => params[:email],
:email_confirmation => params[:email],
:display_name => params[:nickname],
:auth_provider => params[:auth_provider],
:auth_uid => params[:auth_uid])
flash.now[:notice] = render_to_string :partial => "auth_association"
else
check_signup_allowed
self.current_user = User.new
end
end
def create
self.current_user = User.new(user_params)
if check_signup_allowed(current_user.email)
session[:referer] = params[:referer]
current_user.status = "pending"
if current_user.auth_provider.present? && current_user.pass_crypt.empty?
# We are creating an account with external authentication and
# no password was specified so create a random one
current_user.pass_crypt = SecureRandom.base64(16)
current_user.pass_crypt_confirmation = current_user.pass_crypt
end
if current_user.invalid?
# Something is wrong with a new user, so rerender the form
render :action => "new"
elsif current_user.auth_provider.present?
# Verify external authenticator before moving on
session[:new_user] = current_user
redirect_to auth_url(current_user.auth_provider, current_user.auth_uid)
else
# Save the user record
session[:new_user] = current_user
redirect_to :action => :terms
end
end
end
def login
session[:referer] = params[:referer] if params[:referer]
if params[:username].present? && params[:password].present?
session[:remember_me] ||= params[:remember_me]
password_authentication(params[:username], params[:password])
end
end
def logout
@title = t "users.logout.title"
if params[:session] == session.id
if session[:token]
token = UserToken.find_by(:token => session[:token])
token&.destroy
session.delete(:token)
end
session.delete(:user)
session_expires_automatically
if params[:referer]
redirect_to params[:referer]
else
redirect_to :controller => "site", :action => "index"
end
end
end
def confirm
if request.post?
token = UserToken.find_by(:token => params[:confirm_string])
if token&.user&.active?
flash[:error] = t("users.confirm.already active")
redirect_to :action => "login"
elsif !token || token.expired?
flash[:error] = t("users.confirm.unknown token")
redirect_to :action => "confirm"
else
user = token.user
user.status = "active"
user.email_valid = true
flash[:notice] = gravatar_status_message(user) if gravatar_enable(user)
user.save!
referer = token.referer
token.destroy
if session[:token]
token = UserToken.find_by(:token => session[:token])
session.delete(:token)
else
token = nil
end
if token.nil? || token.user != user
flash[:notice] = t("users.confirm.success")
redirect_to :action => :login, :referer => referer
else
token.destroy
session[:user] = user.id
redirect_to referer || welcome_path
end
end
else
user = User.find_by(:display_name => params[:display_name])
redirect_to root_path if user.nil? || user.active?
end
end
def confirm_resend
user = User.find_by(:display_name => params[:display_name])
token = UserToken.find_by(:token => session[:token])
if user.nil? || token.nil? || token.user != user
flash[:error] = t "users.confirm_resend.failure", :name => params[:display_name]
else
Notifier.signup_confirm(user, user.tokens.create).deliver_later
flash[:notice] = t("users.confirm_resend.success", :email => user.email, :sender => Settings.support_email).html_safe
end
redirect_to :action => "login"
end
def confirm_email
if request.post?
token = UserToken.find_by(:token => params[:confirm_string])
if token&.user&.new_email?
self.current_user = token.user
current_user.email = current_user.new_email
current_user.new_email = nil
current_user.email_valid = true
gravatar_enabled = gravatar_enable(current_user)
if current_user.save
flash[:notice] = if gravatar_enabled
t("users.confirm_email.success") + " " + gravatar_status_message(current_user)
else
t("users.confirm_email.success")
end
else
flash[:errors] = current_user.errors
end
token.destroy
session[:user] = current_user.id
redirect_to :action => "account", :display_name => current_user.display_name
elsif token
flash[:error] = t "users.confirm_email.failure"
redirect_to :action => "account", :display_name => token.user.display_name
else
flash[:error] = t "users.confirm_email.unknown_token"
end
end
end
def show
@user = User.find_by(:display_name => params[:display_name])
if @user &&
(@user.visible? || (current_user&.administrator?))
@title = @user.display_name
else
render_unknown_user params[:display_name]
end
end
def make_friend
@new_friend = User.find_by(:display_name => params[:display_name])
if @new_friend
if request.post?
friend = Friend.new
friend.befriender = current_user
friend.befriendee = @new_friend
if current_user.is_friends_with?(@new_friend)
flash[:warning] = t "users.make_friend.already_a_friend", :name => @new_friend.display_name
elsif friend.save
flash[:notice] = t "users.make_friend.success", :name => @new_friend.display_name
Notifier.friend_notification(friend).deliver_later
else
friend.add_error(t("users.make_friend.failed", :name => @new_friend.display_name))
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => "show"
end
end
else
render_unknown_user params[:display_name]
end
end
def remove_friend
@friend = User.find_by(:display_name => params[:display_name])
if @friend
if request.post?
if current_user.is_friends_with?(@friend)
Friend.where(:user_id => current_user.id, :friend_user_id => @friend.id).delete_all
flash[:notice] = t "users.remove_friend.success", :name => @friend.display_name
else
flash[:error] = t "users.remove_friend.not_a_friend", :name => @friend.display_name
end
if params[:referer]
redirect_to params[:referer]
else
redirect_to :action => "show"
end
end
else
render_unknown_user params[:display_name]
end
end
##
# sets a user's status
def set_status
@user.status = params[:status]
@user.save
redirect_to user_path(:display_name => params[:display_name])
end
##
# delete a user, marking them as deleted and removing personal data
def delete
@user.delete
redirect_to user_path(:display_name => params[:display_name])
end
##
# display a list of users matching specified criteria
def index
if request.post?
ids = params[:user].keys.collect(&:to_i)
User.where(:id => ids).update_all(:status => "confirmed") if params[:confirm]
User.where(:id => ids).update_all(:status => "deleted") if params[:hide]
redirect_to url_for(:status => params[:status], :ip => params[:ip], :page => params[:page])
else
@params = params.permit(:status, :ip)
conditions = {}
conditions[:status] = @params[:status] if @params[:status]
conditions[:creation_ip] = @params[:ip] if @params[:ip]
@user_pages, @users = paginate(:users,
:conditions => conditions,
:order => :id,
:per_page => 50)
end
end
##
# omniauth success callback
def auth_success
auth_info = request.env["omniauth.auth"]
provider = auth_info[:provider]
uid = auth_info[:uid]
name = auth_info[:info][:name]
email = auth_info[:info][:email]
case provider
when "openid"
email_verified = uid.match(%r{https://www.google.com/accounts/o8/id?(.*)}) ||
uid.match(%r{https://me.yahoo.com/(.*)})
when "google", "facebook"
email_verified = true
else
email_verified = false
end
if settings = session.delete(:new_user_settings)
current_user.auth_provider = provider
current_user.auth_uid = uid
update_user(current_user, settings)
session[:user_errors] = current_user.errors.as_json
redirect_to :action => "account", :display_name => current_user.display_name
elsif session[:new_user]
session[:new_user].auth_provider = provider
session[:new_user].auth_uid = uid
session[:new_user].status = "active" if email_verified && email == session[:new_user].email
redirect_to :action => "terms"
else
user = User.find_by(:auth_provider => provider, :auth_uid => uid)
if user.nil? && provider == "google"
openid_url = auth_info[:extra][:id_info]["openid_id"]
user = User.find_by(:auth_provider => "openid", :auth_uid => openid_url) if openid_url
user&.update(:auth_provider => provider, :auth_uid => uid)
end
if user
case user.status
when "pending" then
unconfirmed_login(user)
when "active", "confirmed" then
successful_login(user, request.env["omniauth.params"]["referer"])
when "suspended" then
failed_login t("users.login.account is suspended", :webmaster => "mailto:#{Settings.support_email}").html_safe
else
failed_login t("users.login.auth failure")
end
else
redirect_to :action => "new", :nickname => name, :email => email,
:auth_provider => provider, :auth_uid => uid
end
end
end
##
# omniauth failure callback
def auth_failure
flash[:error] = t("users.auth_failure." + params[:message])
redirect_to params[:origin] || login_url
end
private
##
# handle password authentication
def password_authentication(username, password)
if user = User.authenticate(:username => username, :password => password)
successful_login(user)
elsif user = User.authenticate(:username => username, :password => password, :pending => true)
unconfirmed_login(user)
elsif User.authenticate(:username => username, :password => password, :suspended => true)
failed_login t("users.login.account is suspended", :webmaster => "mailto:#{Settings.support_email}").html_safe, username
else
failed_login t("users.login.auth failure"), username
end
end
##
# return the URL to use for authentication
def auth_url(provider, uid, referer = nil)
params = { :provider => provider }
params[:openid_url] = openid_expand_url(uid) if provider == "openid"
if referer.nil?
params[:origin] = request.path
else
params[:origin] = request.path + "?referer=" + CGI.escape(referer)
params[:referer] = referer
end
auth_path(params)
end
##
# special case some common OpenID providers by applying heuristics to
# try and come up with the correct URL based on what the user entered
def openid_expand_url(openid_url)
if openid_url.nil?
nil
elsif openid_url.match(%r{(.*)gmail.com(/?)$}) || openid_url.match(%r{(.*)googlemail.com(/?)$})
# Special case gmail.com as it is potentially a popular OpenID
# provider and, unlike yahoo.com, where it works automatically, Google
# have hidden their OpenID endpoint somewhere obscure this making it
# somewhat less user friendly.
"https://www.google.com/accounts/o8/id"
else
openid_url
end
end
##
# process a successful login
def successful_login(user, referer = nil)
session[:user] = user.id
session_expires_after 28.days if session[:remember_me]
target = referer || session[:referer] || url_for(:controller => :site, :action => :index)
# The user is logged in, so decide where to send them:
#
# - If they haven't seen the contributor terms, send them there.
# - If they have a block on them, show them that.
# - If they were referred to the login, send them back there.
# - Otherwise, send them to the home page.
if !user.terms_seen
redirect_to :action => :terms, :referer => target
elsif user.blocked_on_view
redirect_to user.blocked_on_view, :referer => target
else
redirect_to target
end
session.delete(:remember_me)
session.delete(:referer)
end
##
# process a failed login
def failed_login(message, username = nil)
flash[:error] = message
redirect_to :action => "login", :referer => session[:referer],
:username => username, :remember_me => session[:remember_me]
session.delete(:remember_me)
session.delete(:referer)
end
##
#
def unconfirmed_login(user)
session[:token] = user.tokens.create.token
redirect_to :action => "confirm", :display_name => user.display_name
session.delete(:remember_me)
session.delete(:referer)
end
##
# update a user's details
def update_user(user, params)
user.display_name = params[:user][:display_name]
user.new_email = params[:user][:new_email]
unless params[:user][:pass_crypt].empty? && params[:user][:pass_crypt_confirmation].empty?
user.pass_crypt = params[:user][:pass_crypt]
user.pass_crypt_confirmation = params[:user][:pass_crypt_confirmation]
end
if params[:user][:description] != user.description
user.description = params[:user][:description]
user.description_format = "markdown"
end
user.languages = params[:user][:languages].split(",")
case params[:image_action]
when "new" then
user.image = params[:user][:image]
user.image_use_gravatar = false
when "delete" then
user.image = nil
user.image_use_gravatar = false
when "gravatar" then
user.image = nil
user.image_use_gravatar = true
end
user.home_lat = params[:user][:home_lat]
user.home_lon = params[:user][:home_lon]
user.preferred_editor = if params[:user][:preferred_editor] == "default"
nil
else
params[:user][:preferred_editor]
end
if params[:user][:auth_provider].nil? || params[:user][:auth_provider].blank?
user.auth_provider = nil
user.auth_uid = nil
end
if user.save
set_locale(true)
if user.new_email.blank? || user.new_email == user.email
flash.now[:notice] = t "users.account.flash update success"
else
user.email = user.new_email
if user.valid?
flash.now[:notice] = t "users.account.flash update success confirm needed"
begin
Notifier.email_confirm(user, user.tokens.create).deliver_later
rescue StandardError
# Ignore errors sending email
end
else
current_user.errors.add(:new_email, current_user.errors[:email])
current_user.errors.add(:email, [])
end
user.restore_email!
end
end
end
##
# require that the user in the URL is the logged in user
def require_self
head :forbidden if params[:display_name] != current_user.display_name
end
##
# ensure that there is a "user" instance variable
def lookup_user_by_name
@user = User.find_by(:display_name => params[:display_name])
rescue ActiveRecord::RecordNotFound
redirect_to :action => "view", :display_name => params[:display_name] unless @user
end
##
#
def disable_terms_redirect
# this is necessary otherwise going to the user terms page, when
# having not agreed already would cause an infinite redirect loop.
# it's .now so that this doesn't propagate to other pages.
flash.now[:skip_terms] = true
end
##
# return permitted user parameters
def user_params
params.require(:user).permit(:email, :email_confirmation, :display_name,
:auth_provider, :auth_uid,
:pass_crypt, :pass_crypt_confirmation)
end
##
# check signup acls
def check_signup_allowed(email = nil)
domain = if email.nil?
nil
else
email.split("@").last
end
if blocked = Acl.no_account_creation(request.remote_ip, domain)
logger.info "Blocked signup from #{request.remote_ip} for #{email}"
render :action => "blocked"
end
!blocked
end
##
# check if this user has a gravatar and set the user pref is true
def gravatar_enable(user)
# code from example https://en.gravatar.com/site/implement/images/ruby/
return false if user.image.present?
hash = Digest::MD5.hexdigest(user.email.downcase)
url = "https://www.gravatar.com/avatar/#{hash}?d=404" # without d=404 we will always get an image back
response = OSM.http_client.get(URI.parse(url))
oldsetting = user.image_use_gravatar
user.image_use_gravatar = response.success?
oldsetting != user.image_use_gravatar
end
##
# display a message about th current status of the gravatar setting
def gravatar_status_message(user)
if user.image_use_gravatar
t "users.account.gravatar.enabled"
else
t "users.account.gravatar.disabled"
end
end
end
| 1 | 11,607 | You don't actually need both tests here as "truthiness" means that `"0"` is false and hence the first test will be true... | openstreetmap-openstreetmap-website | rb |
@@ -39,7 +39,15 @@ feature 'User downgrades subscription', js: true do
click_button I18n.t('subscriptions.confirm_cancel')
- expect(page).to have_content "Scheduled for cancellation on February 19, 2013"
expect(page).to have_content I18n.t('subscriptions.flashes.cancel.success')
+
+ click_link I18n.t('subscriptions.reject_refund')
+
+ expect(page).to have_content(
+ I18n.t(
+ 'subscriptions.cancellation_scheduled_on',
+ date: 'February 19, 2013'
+ )
+ )
end
end | 1 | require 'spec_helper'
feature 'User downgrades subscription', js: true do
scenario 'successfully downgrades and then cancels' do
create(:plan, sku: 'prime', name: 'Prime')
basic_plan = create(:basic_plan)
workshop = create(:workshop)
sign_in_as_user_with_subscription
@current_user.should have_active_subscription
visit products_path
expect(find('.header-container')).not_to have_content('Prime Membership')
expect(page).not_to have_link('Subscribe to Prime')
ActionMailer::Base.deliveries.clear
visit my_account_path
click_link I18n.t('subscriptions.cancel')
click_link 'Change to'
expect(page).to have_link I18n.t('subscriptions.cancel')
expect(page).to have_no_content "Scheduled for cancellation"
@current_user.reload
expect(@current_user.subscription.plan).to eq basic_plan
visit workshop_path(workshop)
expect(page).not_to have_css('.free-with-prime')
visit products_path
expect(page).not_to have_css('section.mentor h3', text: 'Your Mentor')
visit my_account_path
click_link I18n.t('subscriptions.cancel')
expect(page).not_to have_content 'deal'
expect(page).not_to have_content 'Change to'
click_button I18n.t('subscriptions.confirm_cancel')
expect(page).to have_content "Scheduled for cancellation on February 19, 2013"
expect(page).to have_content I18n.t('subscriptions.flashes.cancel.success')
end
end
| 1 | 9,295 | Everything else in here is using `I18n`. Should we do that here to be consistent? | thoughtbot-upcase | rb |
@@ -242,8 +242,8 @@ type Config struct {
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
- FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:5473,tcp:6443,tcp:6666,tcp:6667;die-on-fail"`
- FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:5473,tcp:6443,tcp:6666,tcp:6667;die-on-fail"`
+ FailsafeInboundHostPorts []ProtoPort `config:"port-list;0.0.0.0/0:tcp:22,0.0.0.0/0:udp:68,0.0.0.0/0:tcp:179,0.0.0.0/0:tcp:2379,0.0.0.0/0:tcp:2380,0.0.0.0/0:tcp:5473,0.0.0.0/0:tcp:6443,0.0.0.0/0:tcp:6666,0.0.0.0/0:tcp:6667;die-on-fail"`
+ FailsafeOutboundHostPorts []ProtoPort `config:"port-list;0.0.0.0/0:udp:53,0.0.0.0/0:udp:67,0.0.0.0/0:tcp:179,0.0.0.0/0:tcp:2379,0.0.0.0/0:tcp:2380,0.0.0.0/0:tcp:5473,0.0.0.0/0:tcp:6443,0.0.0.0/0:tcp:6666,0.0.0.0/0:tcp:6667;die-on-fail"`
KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"`
NATPortRange numorstring.Port `config:"portrange;"` | 1 | // Copyright (c) 2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"net"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
"github.com/projectcalico/libcalico-go/lib/names"
"github.com/projectcalico/libcalico-go/lib/numorstring"
"github.com/projectcalico/felix/idalloc"
"github.com/projectcalico/typha/pkg/discovery"
)
var (
// RegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value represents a regular expression and is marked by '/' at the start
// and end and cannot have spaces
RegexpIfaceElemRegexp = regexp.MustCompile(`^\/[^\s]+\/$`)
// NonRegexpIfaceElemRegexp matches an individual element in the overall interface list;
// assumes the value is between 1-15 chars long and only be alphanumeric or - or _
NonRegexpIfaceElemRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}$`)
IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`)
AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`)
HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`)
StringRegexp = regexp.MustCompile(`^.*$`)
IfaceParamRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,15}$`)
// Hostname have to be valid ipv4, ipv6 or strings up to 64 characters.
HostAddressRegexp = regexp.MustCompile(`^[a-zA-Z0-9:._+-]{1,64}$`)
)
const (
maxUint = ^uint(0)
maxInt = int(maxUint >> 1)
minInt = -maxInt - 1
)
// Source of a config value. Values from higher-numbered sources override
// those from lower-numbered sources. Note: some parameters (such as those
// needed to connect to the datastore) can only be set from a local source.
type Source uint8
const (
Default = iota
DatastoreGlobal
DatastorePerHost
ConfigFile
EnvironmentVariable
InternalOverride
)
var SourcesInDescendingOrder = []Source{InternalOverride, EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal}
func (source Source) String() string {
switch source {
case Default:
return "<default>"
case DatastoreGlobal:
return "datastore (global)"
case DatastorePerHost:
return "datastore (per-host)"
case ConfigFile:
return "config file"
case EnvironmentVariable:
return "environment variable"
case InternalOverride:
return "internal override"
}
return fmt.Sprintf("<unknown(%v)>", uint8(source))
}
func (source Source) Local() bool {
switch source {
case Default, ConfigFile, EnvironmentVariable, InternalOverride:
return true
default:
return false
}
}
// Config contains the best, parsed config values loaded from the various sources.
// We use tags to control the parsing and validation.
type Config struct {
// Configuration parameters.
UseInternalDataplaneDriver bool `config:"bool;true"`
DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"`
// Wireguard configuration
WireguardEnabled bool `config:"bool;false"`
WireguardListeningPort int `config:"int;51820"`
WireguardRoutingRulePriority int `config:"int;99"`
WireguardInterfaceName string `config:"iface-param;wireguard.cali;non-zero"`
WireguardMTU int `config:"int;0"`
BPFEnabled bool `config:"bool;false"`
BPFDisableUnprivileged bool `config:"bool;true"`
BPFLogLevel string `config:"oneof(off,info,debug);off;non-zero"`
BPFDataIfacePattern *regexp.Regexp `config:"regexp;^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*|tunl0$|wireguard.cali$)"`
BPFConnectTimeLoadBalancingEnabled bool `config:"bool;true"`
BPFExternalServiceMode string `config:"oneof(tunnel,dsr);tunnel;non-zero"`
BPFKubeProxyIptablesCleanupEnabled bool `config:"bool;true"`
BPFKubeProxyMinSyncPeriod time.Duration `config:"seconds;1"`
BPFKubeProxyEndpointSlicesEnabled bool `config:"bool;false"`
// DebugBPFCgroupV2 controls the cgroup v2 path that we apply the connect-time load balancer to. Most distros
// are configured for cgroup v1, which prevents all but hte root cgroup v2 from working so this is only useful
// for development right now.
DebugBPFCgroupV2 string `config:"string;;local"`
// DebugBPFMapRepinEnabled can be used to prevent Felix from repinning its BPF maps at startup. This is useful for
// testing with multiple Felix instances running on one host.
DebugBPFMapRepinEnabled bool `config:"bool;true;local"`
DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"`
FelixHostname string `config:"hostname;;local,non-zero"`
EtcdAddr string `config:"authority;127.0.0.1:2379;local"`
EtcdScheme string `config:"oneof(http,https);http;local"`
EtcdKeyFile string `config:"file(must-exist);;local"`
EtcdCertFile string `config:"file(must-exist);;local"`
EtcdCaFile string `config:"file(must-exist);;local"`
EtcdEndpoints []string `config:"endpoint-list;;local"`
TyphaAddr string `config:"authority;;local"`
TyphaK8sServiceName string `config:"string;;local"`
TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"`
TyphaReadTimeout time.Duration `config:"seconds;30;local"`
TyphaWriteTimeout time.Duration `config:"seconds;10;local"`
// Client-side TLS config for Felix's communication with Typha. If any of these are
// specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left
// unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present
// a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN
// matching TyphaURISAN.
TyphaKeyFile string `config:"file(must-exist);;local"`
TyphaCertFile string `config:"file(must-exist);;local"`
TyphaCAFile string `config:"file(must-exist);;local"`
TyphaCN string `config:"string;;local"`
TyphaURISAN string `config:"string;;local"`
Ipv6Support bool `config:"bool;true"`
IptablesBackend string `config:"oneof(legacy,nft,auto);auto"`
RouteRefreshInterval time.Duration `config:"seconds;90"`
InterfaceRefreshInterval time.Duration `config:"seconds;90"`
DeviceRouteSourceAddress net.IP `config:"ipv4;"`
DeviceRouteProtocol int `config:"int;3"`
RemoveExternalRoutes bool `config:"bool;true"`
IptablesRefreshInterval time.Duration `config:"seconds;90"`
IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"`
IptablesLockFilePath string `config:"file;/run/xtables.lock"`
IptablesLockTimeoutSecs time.Duration `config:"seconds;0"`
IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"`
FeatureDetectOverride map[string]string `config:"keyvaluelist;;"`
IpsetsRefreshInterval time.Duration `config:"seconds;10"`
MaxIpsetSize int `config:"int;1048576;non-zero"`
XDPRefreshInterval time.Duration `config:"seconds;90"`
PolicySyncPathPrefix string `config:"file;;"`
NetlinkTimeoutSecs time.Duration `config:"seconds;10"`
MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"`
MetadataPort int `config:"int(0,65535);8775;die-on-fail"`
OpenstackRegion string `config:"region;;die-on-fail"`
InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"`
InterfaceExclude []*regexp.Regexp `config:"iface-list-regexp;kube-ipvs0"`
ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"`
DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"`
IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"`
LogPrefix string `config:"string;calico-packet"`
LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"`
LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"`
VXLANEnabled bool `config:"bool;false"`
VXLANPort int `config:"int;4789"`
VXLANVNI int `config:"int;4096"`
VXLANMTU int `config:"int;0"`
IPv4VXLANTunnelAddr net.IP `config:"ipv4;"`
VXLANTunnelMACAddr string `config:"string;"`
IpInIpEnabled bool `config:"bool;false"`
IpInIpMtu int `config:"int;0"`
IpInIpTunnelAddr net.IP `config:"ipv4;"`
// Knobs provided to explicitly control whether we add rules to drop encap traffic
// from workloads. We always add them unless explicitly requested not to add them.
AllowVXLANPacketsFromWorkloads bool `config:"bool;false"`
AllowIPIPPacketsFromWorkloads bool `config:"bool;false"`
AWSSrcDstCheck string `config:"oneof(DoNothing,Enable,Disable);DoNothing;non-zero"`
ServiceLoopPrevention string `config:"oneof(Drop,Reject,Disabled);Drop"`
ReportingIntervalSecs time.Duration `config:"seconds;30"`
ReportingTTLSecs time.Duration `config:"seconds;90"`
EndpointReportingEnabled bool `config:"bool;false"`
EndpointReportingDelaySecs time.Duration `config:"seconds;1"`
IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"`
DisableConntrackInvalidCheck bool `config:"bool;false"`
HealthEnabled bool `config:"bool;false"`
HealthPort int `config:"int(0,65535);9099"`
HealthHost string `config:"host-address;localhost"`
PrometheusMetricsEnabled bool `config:"bool;false"`
PrometheusMetricsHost string `config:"host-address;"`
PrometheusMetricsPort int `config:"int(0,65535);9091"`
PrometheusGoMetricsEnabled bool `config:"bool;true"`
PrometheusProcessMetricsEnabled bool `config:"bool;true"`
FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:5473,tcp:6443,tcp:6666,tcp:6667;die-on-fail"`
FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:5473,tcp:6443,tcp:6666,tcp:6667;die-on-fail"`
KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"`
NATPortRange numorstring.Port `config:"portrange;"`
NATOutgoingAddress net.IP `config:"ipv4;"`
UsageReportingEnabled bool `config:"bool;true"`
UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
UsageReportingIntervalSecs time.Duration `config:"seconds;86400"`
ClusterGUID string `config:"string;baddecaf"`
ClusterType string `config:"string;"`
CalicoVersion string `config:"string;"`
ExternalNodesCIDRList []string `config:"cidr-list;;die-on-fail"`
DebugMemoryProfilePath string `config:"file;;"`
DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"`
DebugDisableLogDropping bool `config:"bool;false"`
DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"`
DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"`
DebugPanicAfter time.Duration `config:"seconds;0"`
DebugSimulateDataRace bool `config:"bool;false"`
// Configure where Felix gets its routing information.
// - workloadIPs: use workload endpoints to construct routes.
// - calicoIPAM: use IPAM data to contruct routes.
RouteSource string `config:"oneof(WorkloadIPs,CalicoIPAM);CalicoIPAM"`
RouteTableRange idalloc.IndexRange `config:"route-table-range;1-250;die-on-fail"`
IptablesNATOutgoingInterfaceFilter string `config:"iface-param;"`
SidecarAccelerationEnabled bool `config:"bool;false"`
XDPEnabled bool `config:"bool;true"`
GenericXDPEnabled bool `config:"bool;false"`
Variant string `config:"string;Calico"`
// Configures MTU auto-detection.
MTUIfacePattern *regexp.Regexp `config:"regexp;^((en|wl|ww|sl|ib)[opsx].*|(eth|wlan|wwan).*)"`
// State tracking.
// internalOverrides contains our highest priority config source, generated from internal constraints
// such as kernel version support.
internalOverrides map[string]string
// sourceToRawConfig maps each source to the set of config that was give to us via UpdateFrom.
sourceToRawConfig map[Source]map[string]string
// rawValues maps keys to the current highest-priority raw value.
rawValues map[string]string
// Err holds the most recent error from a config update.
Err error
loadClientConfigFromEnvironment func() (*apiconfig.CalicoAPIConfig, error)
useNodeResourceUpdates bool
}
// Copy makes a copy of the object. Internal state is deep copied but config parameters are only shallow copied.
// This saves work since updates to the copy will trigger the config params to be recalculated.
func (config *Config) Copy() *Config {
// Start by shallow-copying the object.
cp := *config
// Copy the internal state over as a deep copy.
cp.internalOverrides = map[string]string{}
for k, v := range config.internalOverrides {
cp.internalOverrides[k] = v
}
cp.sourceToRawConfig = map[Source]map[string]string{}
for k, v := range config.sourceToRawConfig {
cp.sourceToRawConfig[k] = map[string]string{}
for k2, v2 := range v {
cp.sourceToRawConfig[k][k2] = v2
}
}
cp.rawValues = map[string]string{}
for k, v := range config.rawValues {
cp.rawValues[k] = v
}
return &cp
}
type ProtoPort struct {
Protocol string
Port uint16
}
// Load parses and merges the rawData from one particular source into this config object.
// If there is a config value already loaded from a higher-priority source, then
// the new value will be ignored (after validation).
func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) {
log.Infof("Merging in config from %v: %v", source, rawData)
// Defensively take a copy of the raw data, in case we've been handed
// a mutable map by mistake.
rawDataCopy := make(map[string]string)
for k, v := range rawData {
if v == "" {
log.WithFields(log.Fields{
"name": k,
"source": source,
}).Info("Ignoring empty configuration parameter. Use value 'none' if " +
"your intention is to explicitly disable the default value.")
continue
}
rawDataCopy[k] = v
}
config.sourceToRawConfig[source] = rawDataCopy
changed, err = config.resolve()
return
}
func (config *Config) IsLeader() bool {
return config.Variant == "Calico"
}
func (config *Config) InterfacePrefixes() []string {
return strings.Split(config.InterfacePrefix, ",")
}
func (config *Config) OpenstackActive() bool {
if strings.Contains(strings.ToLower(config.ClusterType), "openstack") {
// OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin
// set this flag.
log.Debug("Cluster type contains OpenStack")
return true
}
// If we get here, either OpenStack isn't present or we're running against an old version
// of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the
// presence of the OpenStack-related parameters.
if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" {
log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active")
return true
}
if config.MetadataPort != 0 && config.MetadataPort != 8775 {
log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active")
return true
}
for _, prefix := range config.InterfacePrefixes() {
if prefix == "tap" {
log.Debug("Interface prefix list contains 'tap', assuming OpenStack")
return true
}
}
log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases")
return false
}
func (config *Config) resolve() (changed bool, err error) {
newRawValues := make(map[string]string)
// Map from lower-case version of name to the highest-priority source found so far.
// We use the lower-case version of the name since we can calculate it both for
// expected and "raw" parameters, which may be used by plugins.
nameToSource := make(map[string]Source)
for _, source := range SourcesInDescendingOrder {
valueLoop:
for rawName, rawValue := range config.sourceToRawConfig[source] {
lowerCaseName := strings.ToLower(rawName)
currentSource := nameToSource[lowerCaseName]
param, ok := knownParams[lowerCaseName]
if !ok {
if source >= currentSource {
// Stash the raw value in case it's useful for an external
// dataplane driver. Use the raw name since the driver may
// want it.
newRawValues[rawName] = rawValue
nameToSource[lowerCaseName] = source
}
log.WithField("raw name", rawName).Info(
"Ignoring unknown config param.")
continue valueLoop
}
metadata := param.GetMetadata()
name := metadata.Name
if metadata.Local && !source.Local() {
log.Warningf("Ignoring local-only configuration for %v from %v",
name, source)
continue valueLoop
}
log.Infof("Parsing value for %v: %v (from %v)",
name, rawValue, source)
var value interface{}
if strings.ToLower(rawValue) == "none" {
// Special case: we allow a value of "none" to force the value to
// the zero value for a field. The zero value often differs from
// the default value. Typically, the zero value means "turn off
// the feature".
if metadata.NonZero {
err = errors.New("non-zero field cannot be set to none")
log.Errorf(
"Failed to parse value for %v: %v from source %v. %v",
name, rawValue, source, err)
config.Err = err
return
}
value = metadata.ZeroValue
log.Infof("Value set to 'none', replacing with zero-value: %#v.",
value)
} else {
value, err = param.Parse(rawValue)
if err != nil {
logCxt := log.WithError(err).WithField("source", source)
if metadata.DieOnParseFailure {
logCxt.Error("Invalid (required) config value.")
config.Err = err
return
} else {
logCxt.WithField("default", metadata.Default).Warn(
"Replacing invalid value with default")
value = metadata.Default
err = nil
}
}
}
log.Infof("Parsed value for %v: %v (from %v)",
name, value, source)
if source < currentSource {
log.Infof("Skipping config value for %v from %v; "+
"already have a value from %v", name,
source, currentSource)
continue
}
field := reflect.ValueOf(config).Elem().FieldByName(name)
field.Set(reflect.ValueOf(value))
newRawValues[name] = rawValue
nameToSource[lowerCaseName] = source
}
}
changed = !reflect.DeepEqual(newRawValues, config.rawValues)
config.rawValues = newRawValues
return
}
func (config *Config) setBy(name string, source Source) bool {
_, set := config.sourceToRawConfig[source][name]
return set
}
func (config *Config) setByConfigFileOrEnvironment(name string) bool {
return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable)
}
func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig {
// We want Felix's datastore connection to be fully configurable using the same
// CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go
// client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a
// long time supported FELIX_XXXYYY environment variables, and we want those to keep working
// too.
// To achieve that, first build a CalicoAPIConfig using libcalico-go's
// LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY
// and XXX_YYY variables.
cfg, err := config.loadClientConfigFromEnvironment()
if err != nil {
log.WithError(err).Panic("Failed to create datastore config")
}
// Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the
// etcd case. Note that that etcd options are set even if the DatastoreType isn't etcdv3.
// This allows the user to rely the default DatastoreType being etcdv3 and still being able
// to configure the other etcdv3 options. As of the time of this code change, the etcd options
// have no affect if the DatastoreType is not etcdv3.
// Datastore type, either etcdv3 or kubernetes
if config.setByConfigFileOrEnvironment("DatastoreType") {
log.Infof("Overriding DatastoreType from felix config to %s", config.DatastoreType)
if config.DatastoreType == string(apiconfig.EtcdV3) {
cfg.Spec.DatastoreType = apiconfig.EtcdV3
} else if config.DatastoreType == string(apiconfig.Kubernetes) {
cfg.Spec.DatastoreType = apiconfig.Kubernetes
}
}
// Endpoints.
if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 {
log.Infof("Overriding EtcdEndpoints from felix config to %s", config.EtcdEndpoints)
cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",")
cfg.Spec.DatastoreType = apiconfig.EtcdV3
} else if config.setByConfigFileOrEnvironment("EtcdAddr") {
etcdEndpoints := config.EtcdScheme + "://" + config.EtcdAddr
log.Infof("Overriding EtcdEndpoints from felix config to %s", etcdEndpoints)
cfg.Spec.EtcdEndpoints = etcdEndpoints
cfg.Spec.DatastoreType = apiconfig.EtcdV3
}
// TLS.
if config.setByConfigFileOrEnvironment("EtcdKeyFile") {
log.Infof("Overriding EtcdKeyFile from felix config to %s", config.EtcdKeyFile)
cfg.Spec.EtcdKeyFile = config.EtcdKeyFile
}
if config.setByConfigFileOrEnvironment("EtcdCertFile") {
log.Infof("Overriding EtcdCertFile from felix config to %s", config.EtcdCertFile)
cfg.Spec.EtcdCertFile = config.EtcdCertFile
}
if config.setByConfigFileOrEnvironment("EtcdCaFile") {
log.Infof("Overriding EtcdCaFile from felix config to %s", config.EtcdCaFile)
cfg.Spec.EtcdCACertFile = config.EtcdCaFile
}
if !(config.IpInIpEnabled || config.VXLANEnabled || config.BPFEnabled) {
// Polling k8s for node updates is expensive (because we get many superfluous
// updates) so disable if we don't need it.
log.Info("Encap disabled, disabling node poll (if KDD is in use).")
cfg.Spec.K8sDisableNodePoll = true
}
return *cfg
}
// Validate() performs cross-field validation.
func (config *Config) Validate() (err error) {
if config.FelixHostname == "" {
err = errors.New("Failed to determine hostname")
}
if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 {
if config.EtcdScheme == "" {
err = errors.New("EtcdEndpoints and EtcdScheme both missing")
}
if config.EtcdAddr == "" {
err = errors.New("EtcdEndpoints and EtcdAddr both missing")
}
}
// If any client-side TLS config parameters are specified, they _all_ must be - except that
// either TyphaCN or TyphaURISAN may be left unset.
if config.TyphaCAFile != "" ||
config.TyphaCertFile != "" ||
config.TyphaKeyFile != "" ||
config.TyphaCN != "" ||
config.TyphaURISAN != "" {
// Some TLS config specified.
if config.TyphaKeyFile == "" ||
config.TyphaCertFile == "" ||
config.TyphaCAFile == "" ||
(config.TyphaCN == "" && config.TyphaURISAN == "") {
err = errors.New("If any Felix-Typha TLS config parameters are specified," +
" they _all_ must be" +
" - except that either TyphaCN or TyphaURISAN may be left unset.")
}
}
if err != nil {
config.Err = err
}
return
}
var knownParams map[string]param
func loadParams() {
knownParams = make(map[string]param)
config := Config{}
kind := reflect.TypeOf(config)
metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` +
`([^;]*)(?:;` +
`([^;]*))?$`)
for ii := 0; ii < kind.NumField(); ii++ {
field := kind.Field(ii)
tag := field.Tag.Get("config")
if tag == "" {
continue
}
captures := metaRegexp.FindStringSubmatch(tag)
if len(captures) == 0 {
log.Panicf("Failed to parse metadata for config param %v", field.Name)
}
log.Debugf("%v: metadata captures: %#v", field.Name, captures)
kind := captures[1] // Type: "int|oneof|bool|port-list|..."
kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https"
defaultStr := captures[3] // Default value e.g "1.0"
flags := captures[4]
var param param
var err error
switch kind {
case "bool":
param = &BoolParam{}
case "int":
min := minInt
max := maxInt
if kindParams != "" {
minAndMax := strings.Split(kindParams, ",")
min, err = strconv.Atoi(minAndMax[0])
if err != nil {
log.Panicf("Failed to parse min value for %v", field.Name)
}
max, err = strconv.Atoi(minAndMax[1])
if err != nil {
log.Panicf("Failed to parse max value for %v", field.Name)
}
}
param = &IntParam{Min: min, Max: max}
case "int32":
param = &Int32Param{}
case "mark-bitmask":
param = &MarkBitmaskParam{}
case "float":
param = &FloatParam{}
case "seconds":
param = &SecondsParam{}
case "millis":
param = &MillisParam{}
case "iface-list":
param = &RegexpParam{Regexp: IfaceListRegexp,
Msg: "invalid Linux interface name"}
case "iface-list-regexp":
param = &RegexpPatternListParam{
NonRegexpElemRegexp: NonRegexpIfaceElemRegexp,
RegexpElemRegexp: RegexpIfaceElemRegexp,
Delimiter: ",",
Msg: "list contains invalid Linux interface name or regex pattern",
}
case "regexp":
param = &RegexpPatternParam{}
case "iface-param":
param = &RegexpParam{Regexp: IfaceParamRegexp,
Msg: "invalid Linux interface parameter"}
case "file":
param = &FileParam{
MustExist: strings.Contains(kindParams, "must-exist"),
Executable: strings.Contains(kindParams, "executable"),
}
case "authority":
param = &RegexpParam{Regexp: AuthorityRegexp,
Msg: "invalid URL authority"}
case "ipv4":
param = &Ipv4Param{}
case "endpoint-list":
param = &EndpointListParam{}
case "port-list":
param = &PortListParam{}
case "portrange":
param = &PortRangeParam{}
case "portrange-list":
param = &PortRangeListParam{}
case "hostname":
param = &RegexpParam{Regexp: HostnameRegexp,
Msg: "invalid hostname"}
case "host-address":
param = &RegexpParam{Regexp: HostAddressRegexp,
Msg: "invalid host address"}
case "region":
param = &RegionParam{}
case "oneof":
options := strings.Split(kindParams, ",")
lowerCaseToCanon := make(map[string]string)
for _, option := range options {
lowerCaseToCanon[strings.ToLower(option)] = option
}
param = &OneofListParam{
lowerCaseOptionsToCanonical: lowerCaseToCanon}
case "string":
param = &RegexpParam{Regexp: StringRegexp,
Msg: "invalid string"}
case "cidr-list":
param = &CIDRListParam{}
case "route-table-range":
param = &RouteTableRangeParam{}
case "keyvaluelist":
param = &KeyValueListParam{}
default:
log.Panicf("Unknown type of parameter: %v", kind)
}
metadata := param.GetMetadata()
metadata.Name = field.Name
metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface()
if strings.Contains(flags, "non-zero") {
metadata.NonZero = true
}
if strings.Contains(flags, "die-on-fail") {
metadata.DieOnParseFailure = true
}
if strings.Contains(flags, "local") {
metadata.Local = true
}
if defaultStr != "" {
if strings.Contains(flags, "skip-default-validation") {
metadata.Default = defaultStr
} else {
// Parse the default value and save it in the metadata. Doing
// that here ensures that we syntax-check the defaults now.
defaultVal, err := param.Parse(defaultStr)
if err != nil {
log.Panicf("Invalid default value: %v", err)
}
metadata.Default = defaultVal
}
} else {
metadata.Default = metadata.ZeroValue
}
knownParams[strings.ToLower(field.Name)] = param
}
}
func (config *Config) SetUseNodeResourceUpdates(b bool) {
config.useNodeResourceUpdates = b
}
func (config *Config) UseNodeResourceUpdates() bool {
return config.useNodeResourceUpdates
}
func (config *Config) RawValues() map[string]string {
return config.rawValues
}
func (config *Config) SetLoadClientConfigFromEnvironmentFunction(fnc func() (*apiconfig.CalicoAPIConfig, error)) {
config.loadClientConfigFromEnvironment = fnc
}
// OverrideParam installs a maximum priority parameter override for the given parameter. This is useful for
// disabling features that are found to be unsupported, for example. By using an extra priority class, the
// override will persist even if the host/global config is updated.
func (config *Config) OverrideParam(name, value string) (bool, error) {
config.internalOverrides[name] = value
return config.UpdateFrom(config.internalOverrides, InternalOverride)
}
func (config *Config) TyphaDiscoveryOpts() []discovery.Option {
return []discovery.Option{
discovery.WithAddrOverride(config.TyphaAddr),
discovery.WithKubeService(config.TyphaK8sNamespace, config.TyphaK8sServiceName),
}
}
func New() *Config {
if knownParams == nil {
loadParams()
}
p := &Config{
rawValues: map[string]string{},
sourceToRawConfig: map[Source]map[string]string{},
internalOverrides: map[string]string{},
}
for _, param := range knownParams {
param.setDefault(p)
}
hostname, err := names.Hostname()
if err != nil {
log.Warningf("Failed to get hostname from kernel, "+
"trying HOSTNAME variable: %v", err)
hostname = strings.ToLower(os.Getenv("HOSTNAME"))
}
p.FelixHostname = hostname
p.loadClientConfigFromEnvironment = apiconfig.LoadClientConfigFromEnvironment
return p
}
type param interface {
GetMetadata() *Metadata
Parse(raw string) (result interface{}, err error)
setDefault(*Config)
}
| 1 | 18,603 | What do you think about changing the ordering to be `<protocol>:<cidr>:<port>`? I think it matches the most with what we previously had so that it doesn't change too much for our users. | projectcalico-felix | go |
@@ -442,9 +442,9 @@ std::string MolToSmiles(const ROMol &mol, bool doIsomericSmiles, bool doKekule,
if (rootedAtAtom == -1) {
rootedAtAtom = std::rand() % mol.getNumAtoms();
// need to find an atom id between 0 and mol.getNumAtoms() exclusively
- PRECONDITION(rootedAtAtom < 0 || static_cast<unsigned int>(
- rootedAtAtom) < mol.getNumAtoms(),
- "rootedAtomAtom must be less than the number of atoms");
+ PRECONDITION(
+ static_cast<unsigned int>(rootedAtAtom) < mol.getNumAtoms(),
+ "rootedAtomAtom must be less than the number of atoms");
}
}
| 1 | //
// Copyright (C) 2002-2019 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include "SmilesWrite.h"
#include <GraphMol/RDKitBase.h>
#include <RDGeneral/types.h>
#include <GraphMol/Canon.h>
#include <GraphMol/new_canon.h>
#include <RDGeneral/BoostStartInclude.h>
#include <boost/lexical_cast.hpp>
#include <boost/foreach.hpp>
#include <boost/dynamic_bitset.hpp>
#include <RDGeneral/BoostEndInclude.h>
#include <sstream>
#include <map>
#include <list>
//#define VERBOSE_CANON 1
namespace RDKit {
namespace SmilesWrite {
const int atomicSmiles[] = {0, 5, 6, 7, 8, 9, 15, 16, 17, 35, 53, -1};
bool inOrganicSubset(int atomicNumber) {
unsigned int idx = 0;
while (atomicSmiles[idx] < atomicNumber && atomicSmiles[idx] != -1) {
++idx;
}
if (atomicSmiles[idx] == atomicNumber) {
return true;
}
return false;
}
std::string GetAtomSmiles(const Atom *atom, bool doKekule, const Bond *bondIn,
bool allHsExplicit, bool isomericSmiles) {
RDUNUSED_PARAM(bondIn);
PRECONDITION(atom, "bad atom");
INT_VECT atomicSmilesVect(
atomicSmiles,
atomicSmiles + (sizeof(atomicSmiles) - 1) / sizeof(atomicSmiles[0]));
std::string res;
int fc = atom->getFormalCharge();
int num = atom->getAtomicNum();
int isotope = atom->getIsotope();
bool needsBracket = false;
std::string symb;
bool hasCustomSymbol =
atom->getPropIfPresent(common_properties::smilesSymbol, symb);
if (!hasCustomSymbol) {
symb = PeriodicTable::getTable()->getElementSymbol(num);
}
// check for atomic stereochemistry
std::string atString = "";
if (isomericSmiles ||
(atom->hasOwningMol() &&
atom->getOwningMol().hasProp(common_properties::_doIsoSmiles))) {
if (atom->getChiralTag() != Atom::CHI_UNSPECIFIED &&
!atom->hasProp(common_properties::_brokenChirality)) {
switch (atom->getChiralTag()) {
case Atom::CHI_TETRAHEDRAL_CW:
atString = "@@";
break;
case Atom::CHI_TETRAHEDRAL_CCW:
atString = "@";
break;
default:
break;
}
}
}
if (!allHsExplicit && inOrganicSubset(num)) {
// it's a member of the organic subset
// -----
// figure out if we need to put a bracket around the atom,
// the conditions for this are:
// - formal charge specified
// - the atom has a nonstandard valence
// - chirality present and writing isomeric smiles
// - non-default isotope and writing isomeric smiles
// - atom-map information present
const INT_VECT &defaultVs = PeriodicTable::getTable()->getValenceList(num);
int totalValence = atom->getTotalValence();
bool nonStandard = false;
if (hasCustomSymbol || atom->getNumRadicalElectrons()) {
nonStandard = true;
} else if ((num == 7 || num == 15) && atom->getIsAromatic() &&
atom->getNumExplicitHs()) {
// another type of "nonstandard" valence is an aromatic N or P with
// explicit Hs indicated:
nonStandard = true;
} else {
nonStandard =
(totalValence != defaultVs.front() && atom->getTotalNumHs());
}
if (fc || nonStandard ||
atom->hasProp(common_properties::molAtomMapNumber)) {
needsBracket = true;
} else if ((isomericSmiles || (atom->hasOwningMol() &&
atom->getOwningMol().hasProp(
common_properties::_doIsoSmiles))) &&
(isotope || atString != "")) {
needsBracket = true;
}
} else {
needsBracket = true;
}
if (needsBracket) res += "[";
if (isotope && (isomericSmiles || (atom->hasOwningMol() &&
atom->getOwningMol().hasProp(
common_properties::_doIsoSmiles)))) {
res += std::to_string(isotope);
}
// this was originally only done for the organic subset,
// applying it to other atom-types is a fix for Issue 3152751:
if (!doKekule && atom->getIsAromatic() && symb[0] >= 'A' && symb[0] <= 'Z') {
symb[0] -= ('A' - 'a');
}
res += symb;
res += atString;
if (needsBracket) {
unsigned int totNumHs = atom->getTotalNumHs();
if (totNumHs > 0) {
res += "H";
if (totNumHs > 1) res += std::to_string(totNumHs);
}
if (fc > 0) {
res += "+";
if (fc > 1) res += std::to_string(fc);
} else if (fc < 0) {
if (fc < -1)
res += std::to_string(fc);
else
res += "-";
}
int mapNum;
if (atom->getPropIfPresent(common_properties::molAtomMapNumber, mapNum)) {
res += ":";
res += std::to_string(mapNum);
}
res += "]";
}
// If the atom has this property, the contained string will
// be inserted directly in the SMILES:
std::string label;
if (atom->getPropIfPresent(common_properties::_supplementalSmilesLabel,
label)) {
res += label;
}
return res;
}
std::string GetBondSmiles(const Bond *bond, int atomToLeftIdx, bool doKekule,
bool allBondsExplicit) {
PRECONDITION(bond, "bad bond");
if (atomToLeftIdx < 0) atomToLeftIdx = bond->getBeginAtomIdx();
std::string res = "";
bool aromatic = false;
if (!doKekule && (bond->getBondType() == Bond::SINGLE ||
bond->getBondType() == Bond::DOUBLE ||
bond->getBondType() == Bond::AROMATIC)) {
if (bond->hasOwningMol()) {
auto a1 = bond->getOwningMol().getAtomWithIdx(atomToLeftIdx);
auto a2 = bond->getOwningMol().getAtomWithIdx(
bond->getOtherAtomIdx(atomToLeftIdx));
if ((a1->getIsAromatic() && a2->getIsAromatic()) &&
(a1->getAtomicNum() || a2->getAtomicNum()))
aromatic = true;
} else {
aromatic = false;
}
}
Bond::BondDir dir = bond->getBondDir();
if (bond->hasProp(common_properties::_TraversalRingClosureBond)) {
// std::cerr<<"FLIP: "<<bond->getIdx()<<"
// "<<bond->getBeginAtomIdx()<<"-"<<bond->getEndAtomIdx()<<std::endl;
// if(dir==Bond::ENDDOWNRIGHT) dir=Bond::ENDUPRIGHT;
// else if(dir==Bond::ENDUPRIGHT) dir=Bond::ENDDOWNRIGHT;
bond->clearProp(common_properties::_TraversalRingClosureBond);
}
switch (bond->getBondType()) {
case Bond::SINGLE:
if (dir != Bond::NONE && dir != Bond::UNKNOWN) {
switch (dir) {
case Bond::ENDDOWNRIGHT:
if (allBondsExplicit ||
(bond->hasOwningMol() &&
bond->getOwningMol().hasProp(common_properties::_doIsoSmiles)))
res = "\\";
break;
case Bond::ENDUPRIGHT:
if (allBondsExplicit ||
(bond->hasOwningMol() &&
bond->getOwningMol().hasProp(common_properties::_doIsoSmiles)))
res = "/";
break;
default:
if (allBondsExplicit) res = "-";
break;
}
} else {
// if the bond is marked as aromatic and the two atoms
// are aromatic, we need no marker (this arises in kekulized
// molecules).
// FIX: we should be able to dump kekulized smiles
// currently this is possible by removing all
// isAromatic flags, but there should maybe be another way
if (allBondsExplicit)
res = "-";
else if (aromatic && !bond->getIsAromatic())
res = "-";
}
break;
case Bond::DOUBLE:
// see note above
if (!aromatic || !bond->getIsAromatic() || allBondsExplicit) res = "=";
break;
case Bond::TRIPLE:
res = "#";
break;
case Bond::AROMATIC:
if (dir != Bond::NONE && dir != Bond::UNKNOWN) {
switch (dir) {
case Bond::ENDDOWNRIGHT:
if (allBondsExplicit ||
(bond->hasOwningMol() &&
bond->getOwningMol().hasProp(common_properties::_doIsoSmiles)))
res = "\\";
break;
case Bond::ENDUPRIGHT:
if (allBondsExplicit ||
(bond->hasOwningMol() &&
bond->getOwningMol().hasProp(common_properties::_doIsoSmiles)))
res = "/";
break;
default:
if (allBondsExplicit || !aromatic) res = ":";
break;
}
} else if (allBondsExplicit || !aromatic) {
res = ":";
}
break;
case Bond::DATIVE:
if (atomToLeftIdx >= 0 &&
bond->getBeginAtomIdx() == static_cast<unsigned int>(atomToLeftIdx))
res = "->";
else
res = "<-";
break;
default:
res = "~";
}
return res;
}
std::string FragmentSmilesConstruct(
ROMol &mol, int atomIdx, std::vector<Canon::AtomColors> &colors,
const UINT_VECT &ranks, bool doKekule, bool canonical,
bool doIsomericSmiles, bool allBondsExplicit, bool allHsExplicit,
bool doRandom, std::vector<unsigned int> &atomOrdering,
const boost::dynamic_bitset<> *bondsInPlay = nullptr,
const std::vector<std::string> *atomSymbols = nullptr,
const std::vector<std::string> *bondSymbols = nullptr) {
PRECONDITION(!bondsInPlay || bondsInPlay->size() >= mol.getNumBonds(),
"bad bondsInPlay");
PRECONDITION(!atomSymbols || atomSymbols->size() >= mol.getNumAtoms(),
"bad atomSymbols");
PRECONDITION(!bondSymbols || bondSymbols->size() >= mol.getNumBonds(),
"bad bondSymbols");
Canon::MolStack molStack;
// try to prevent excessive reallocation
molStack.reserve(mol.getNumAtoms() + mol.getNumBonds());
std::stringstream res;
std::map<int, int> ringClosureMap;
int ringIdx, closureVal;
if (!canonical) mol.setProp(common_properties::_StereochemDone, 1);
std::list<unsigned int> ringClosuresToErase;
Canon::canonicalizeFragment(mol, atomIdx, colors, ranks, molStack,
bondsInPlay, bondSymbols, doIsomericSmiles,
doRandom);
Bond *bond = nullptr;
BOOST_FOREACH (Canon::MolStackElem mSE, molStack) {
switch (mSE.type) {
case Canon::MOL_STACK_ATOM:
if (!ringClosuresToErase.empty()) {
BOOST_FOREACH (unsigned int rclosure, ringClosuresToErase) {
ringClosureMap.erase(rclosure);
}
ringClosuresToErase.clear();
}
// std::cout<<"\t\tAtom: "<<mSE.obj.atom->getIdx()<<std::endl;
if (!atomSymbols) {
res << GetAtomSmiles(mSE.obj.atom, doKekule, bond, allHsExplicit,
doIsomericSmiles);
} else {
res << (*atomSymbols)[mSE.obj.atom->getIdx()];
}
atomOrdering.push_back(mSE.obj.atom->getIdx());
break;
case Canon::MOL_STACK_BOND:
bond = mSE.obj.bond;
// std::cout<<"\t\tBond: "<<bond->getIdx()<<std::endl;
if (!bondSymbols) {
res << GetBondSmiles(bond, mSE.number, doKekule, allBondsExplicit);
} else {
res << (*bondSymbols)[bond->getIdx()];
}
break;
case Canon::MOL_STACK_RING:
ringIdx = mSE.number;
// std::cout<<"\t\tRing: "<<ringIdx;
if (ringClosureMap.count(ringIdx)) {
// the index is already in the map ->
// we're closing a ring, so grab
// the index and then delete the value:
closureVal = ringClosureMap[ringIdx];
// ringClosureMap.erase(ringIdx);
ringClosuresToErase.push_back(ringIdx);
} else {
// we're opening a new ring, find the index for it:
closureVal = 1;
bool done = false;
// EFF: there's got to be a more efficient way to do this
while (!done) {
std::map<int, int>::iterator mapIt;
for (mapIt = ringClosureMap.begin(); mapIt != ringClosureMap.end();
mapIt++) {
if (mapIt->second == closureVal) break;
}
if (mapIt == ringClosureMap.end()) {
done = true;
} else {
closureVal += 1;
}
}
ringClosureMap[ringIdx] = closureVal;
}
if (closureVal < 10)
res << (char)(closureVal + '0');
else if (closureVal < 100)
res << '%' << closureVal;
else // use extension to OpenSMILES
res << "%(" << closureVal << ')';
break;
case Canon::MOL_STACK_BRANCH_OPEN:
res << "(";
break;
case Canon::MOL_STACK_BRANCH_CLOSE:
res << ")";
break;
default:
break;
}
}
return res.str();
}
} // end of namespace SmilesWrite
static bool SortBasedOnFirstElement(
const std::pair<std::string, std::vector<unsigned int>> &a,
const std::pair<std::string, std::vector<unsigned int>> &b) {
return a.first < b.first;
}
std::string MolToSmiles(const ROMol &mol, bool doIsomericSmiles, bool doKekule,
int rootedAtAtom, bool canonical, bool allBondsExplicit,
bool allHsExplicit, bool doRandom) {
if (!mol.getNumAtoms()) return "";
PRECONDITION(rootedAtAtom < 0 ||
static_cast<unsigned int>(rootedAtAtom) < mol.getNumAtoms(),
"rootedAtomAtom must be less than the number of atoms");
std::vector<std::vector<int>> fragsMolAtomMapping;
std::vector<ROMOL_SPTR> mols =
MolOps::getMolFrags(mol, false, nullptr, &fragsMolAtomMapping, false);
std::vector<std::string> vfragsmi;
// for(unsigned i=0; i<fragsMolAtomMapping.size(); i++){
// std::cout << i << ": ";
// for(unsigned j=0; j<fragsMolAtomMapping[i].size(); j++){
// std::cout << j <<"("<<fragsMolAtomMapping[i][j]<<") ";
// }
// std::cout << std::endl;
// }
std::vector<std::vector<RDKit::UINT>> allAtomOrdering;
for (unsigned i = 0; i < mols.size(); i++) {
ROMol *tmol = mols[i].get();
// update property cache
for (ROMol::AtomIterator atomIt = tmol->beginAtoms();
atomIt != tmol->endAtoms(); ++atomIt) {
(*atomIt)->updatePropertyCache(false);
}
// clean up the chirality on any atom that is marked as chiral,
// but that should not be:
if (doIsomericSmiles) {
tmol->setProp(common_properties::_doIsoSmiles, 1);
if (!mol.hasProp(common_properties::_StereochemDone)) {
MolOps::assignStereochemistry(*tmol, true);
}
}
#if 0
std::cout << "----------------------------" << std::endl;
std::cout << "MolToSmiles:"<< std::endl;
tmol->debugMol(std::cout);
std::cout << "----------------------------" << std::endl;
#endif
// adding randomness without setting the rootedAtAtom
if (doRandom) {
if (rootedAtAtom == -1) {
rootedAtAtom = std::rand() % mol.getNumAtoms();
// need to find an atom id between 0 and mol.getNumAtoms() exclusively
PRECONDITION(rootedAtAtom < 0 || static_cast<unsigned int>(
rootedAtAtom) < mol.getNumAtoms(),
"rootedAtomAtom must be less than the number of atoms");
}
}
std::string res;
unsigned int nAtoms = tmol->getNumAtoms();
UINT_VECT ranks(nAtoms);
std::vector<unsigned int> atomOrdering;
if (canonical) {
if (tmol->hasProp("_canonicalRankingNumbers")) {
for (unsigned int i = 0; i < tmol->getNumAtoms(); ++i) {
unsigned int rankNum = 0;
tmol->getAtomWithIdx(i)->getPropIfPresent("_canonicalRankingNumber",
rankNum);
ranks[i] = rankNum;
}
} else {
Canon::rankMolAtoms(*tmol, ranks, true, doIsomericSmiles,
doIsomericSmiles);
}
} else {
for (unsigned int i = 0; i < tmol->getNumAtoms(); ++i) ranks[i] = i;
}
#ifdef VERBOSE_CANON
for (unsigned int tmpI = 0; tmpI < ranks.size(); tmpI++) {
std::cout << tmpI << " " << ranks[tmpI] << " "
<< *(tmol->getAtomWithIdx(tmpI)) << std::endl;
}
#endif
std::vector<Canon::AtomColors> colors(nAtoms, Canon::WHITE_NODE);
std::vector<Canon::AtomColors>::iterator colorIt;
colorIt = colors.begin();
// loop to deal with the possibility that there might be disconnected
// fragments
while (colorIt != colors.end()) {
int nextAtomIdx = -1;
std::string subSmi;
// find the next atom for a traverse
if (rootedAtAtom >= 0) {
nextAtomIdx = rootedAtAtom;
rootedAtAtom = -1;
} else {
unsigned int nextRank = nAtoms + 1;
for (unsigned int i = 0; i < nAtoms; i++) {
if (colors[i] == Canon::WHITE_NODE && ranks[i] < nextRank) {
nextRank = ranks[i];
nextAtomIdx = i;
}
}
}
CHECK_INVARIANT(nextAtomIdx >= 0, "no start atom found");
subSmi = SmilesWrite::FragmentSmilesConstruct(
*tmol, nextAtomIdx, colors, ranks, doKekule, canonical,
doIsomericSmiles, allBondsExplicit, allHsExplicit, doRandom,
atomOrdering);
res += subSmi;
colorIt = std::find(colors.begin(), colors.end(), Canon::WHITE_NODE);
if (colorIt != colors.end()) {
res += ".";
}
}
vfragsmi.push_back(res);
for (unsigned int &vit : atomOrdering) {
vit = fragsMolAtomMapping[i][vit]; // Lookup the Id in the original
// molecule
}
allAtomOrdering.push_back(atomOrdering);
}
std::string result;
std::vector<unsigned int> flattenedAtomOrdering;
if (canonical) {
// Sort the vfragsmi, but also sort the atom order vectors into the same
// order
typedef std::pair<std::string, std::vector<unsigned int>> PairStrAndVec;
std::vector<PairStrAndVec> tmp(vfragsmi.size());
for (unsigned int ti = 0; ti < vfragsmi.size(); ++ti)
tmp[ti] = PairStrAndVec(vfragsmi[ti], allAtomOrdering[ti]);
std::sort(tmp.begin(), tmp.end(), SortBasedOnFirstElement);
for (unsigned int ti = 0; ti < vfragsmi.size(); ++ti) {
result += tmp[ti].first;
if (ti < vfragsmi.size() - 1) result += ".";
flattenedAtomOrdering.insert(flattenedAtomOrdering.end(),
tmp[ti].second.begin(),
tmp[ti].second.end());
}
} else { // Not canonical
for (auto &i : allAtomOrdering)
flattenedAtomOrdering.insert(flattenedAtomOrdering.end(), i.begin(),
i.end());
for (unsigned i = 0; i < vfragsmi.size(); ++i) {
result += vfragsmi[i];
if (i < vfragsmi.size() - 1) {
result += ".";
}
}
}
mol.setProp(common_properties::_smilesAtomOutputOrder, flattenedAtomOrdering,
true);
return result;
} // end of MolToSmiles()
std::string MolToCXSmiles(const ROMol &mol, bool doIsomericSmiles,
bool doKekule, int rootedAtAtom, bool canonical,
bool allBondsExplicit, bool allHsExplicit,
bool doRandom) {
auto res = MolToSmiles(mol, doIsomericSmiles, doKekule, rootedAtAtom,
canonical, allBondsExplicit, allHsExplicit, doRandom);
if (!res.empty()) {
auto cxext = SmilesWrite::getCXExtensions(mol);
if (cxext.length()) {
res += " " + cxext;
}
}
return res;
}
std::string MolFragmentToSmiles(const ROMol &mol,
const std::vector<int> &atomsToUse,
const std::vector<int> *bondsToUse,
const std::vector<std::string> *atomSymbols,
const std::vector<std::string> *bondSymbols,
bool doIsomericSmiles, bool doKekule,
int rootedAtAtom, bool canonical,
bool allBondsExplicit, bool allHsExplicit) {
PRECONDITION(atomsToUse.size(), "no atoms provided");
PRECONDITION(rootedAtAtom < 0 ||
static_cast<unsigned int>(rootedAtAtom) < mol.getNumAtoms(),
"rootedAtomAtom must be less than the number of atoms");
PRECONDITION(
rootedAtAtom < 0 || std::find(atomsToUse.begin(), atomsToUse.end(),
rootedAtAtom) != atomsToUse.end(),
"rootedAtomAtom not found in atomsToUse");
PRECONDITION(!atomSymbols || atomSymbols->size() >= mol.getNumAtoms(),
"bad atomSymbols vector");
PRECONDITION(!bondSymbols || bondSymbols->size() >= mol.getNumBonds(),
"bad bondSymbols vector");
if (!mol.getNumAtoms()) return "";
ROMol tmol(mol, true);
if (doIsomericSmiles) {
tmol.setProp(common_properties::_doIsoSmiles, 1);
}
std::string res;
boost::dynamic_bitset<> atomsInPlay(mol.getNumAtoms(), 0);
BOOST_FOREACH (int aidx, atomsToUse) { atomsInPlay.set(aidx); }
// figure out which bonds are actually in play:
boost::dynamic_bitset<> bondsInPlay(mol.getNumBonds(), 0);
if (bondsToUse) {
BOOST_FOREACH (int bidx, *bondsToUse) { bondsInPlay.set(bidx); }
} else {
BOOST_FOREACH (int aidx, atomsToUse) {
ROMol::OEDGE_ITER beg, end;
boost::tie(beg, end) = mol.getAtomBonds(mol.getAtomWithIdx(aidx));
while (beg != end) {
const Bond *bond = mol[*beg];
if (atomsInPlay[bond->getOtherAtomIdx(aidx)])
bondsInPlay.set(bond->getIdx());
++beg;
}
}
}
// copy over the rings that only involve atoms/bonds in this fragment:
if (mol.getRingInfo()->isInitialized()) {
tmol.getRingInfo()->reset();
tmol.getRingInfo()->initialize();
for (unsigned int ridx = 0; ridx < mol.getRingInfo()->numRings(); ++ridx) {
const INT_VECT å = mol.getRingInfo()->atomRings()[ridx];
const INT_VECT &bring = mol.getRingInfo()->bondRings()[ridx];
bool keepIt = true;
BOOST_FOREACH (int aidx, aring) {
if (!atomsInPlay[aidx]) {
keepIt = false;
break;
}
}
if (keepIt) {
BOOST_FOREACH (int bidx, bring) {
if (!bondsInPlay[bidx]) {
keepIt = false;
break;
}
}
}
if (keepIt) {
tmol.getRingInfo()->addRing(aring, bring);
}
}
}
if (tmol.needsUpdatePropertyCache()) {
for (ROMol::AtomIterator atIt = tmol.beginAtoms(); atIt != tmol.endAtoms();
atIt++) {
(*atIt)->updatePropertyCache(false);
}
}
UINT_VECT ranks(tmol.getNumAtoms());
std::vector<unsigned int> atomOrdering;
// clean up the chirality on any atom that is marked as chiral,
// but that should not be:
if (doIsomericSmiles) {
if (!mol.hasProp(common_properties::_StereochemDone)) {
MolOps::assignStereochemistry(tmol, true);
} else {
tmol.setProp(common_properties::_StereochemDone, 1);
// we need the CIP codes:
BOOST_FOREACH (int aidx, atomsToUse) {
const Atom *oAt = mol.getAtomWithIdx(aidx);
std::string cipCode;
if (oAt->getPropIfPresent(common_properties::_CIPCode, cipCode)) {
tmol.getAtomWithIdx(aidx)->setProp(common_properties::_CIPCode,
cipCode);
}
}
}
}
if (canonical) {
Canon::rankFragmentAtoms(tmol, ranks, atomsInPlay, bondsInPlay, atomSymbols,
true, doIsomericSmiles, doIsomericSmiles);
// std::cerr << "RANKS: ";
// std::copy(ranks.begin(), ranks.end(),
// std::ostream_iterator<int>(std::cerr, " "));
// std::cerr << std::endl;
// MolOps::rankAtomsInFragment(tmol,ranks,atomsInPlay,bondsInPlay,atomSymbols,bondSymbols);
} else {
for (unsigned int i = 0; i < tmol.getNumAtoms(); ++i) ranks[i] = i;
}
#ifdef VERBOSE_CANON
for (unsigned int tmpI = 0; tmpI < ranks.size(); tmpI++) {
std::cout << tmpI << " " << ranks[tmpI] << " "
<< *(tmol.getAtomWithIdx(tmpI)) << std::endl;
}
#endif
std::vector<Canon::AtomColors> colors(tmol.getNumAtoms(), Canon::BLACK_NODE);
BOOST_FOREACH (int aidx, atomsToUse) { colors[aidx] = Canon::WHITE_NODE; }
std::vector<Canon::AtomColors>::iterator colorIt;
colorIt = colors.begin();
// loop to deal with the possibility that there might be disconnected
// fragments
while (colorIt != colors.end()) {
int nextAtomIdx = -1;
std::string subSmi;
// find the next atom for a traverse
if (rootedAtAtom >= 0) {
nextAtomIdx = rootedAtAtom;
rootedAtAtom = -1;
} else {
unsigned int nextRank = rdcast<unsigned int>(tmol.getNumAtoms()) + 1;
BOOST_FOREACH (int i, atomsToUse) {
if (colors[i] == Canon::WHITE_NODE && ranks[i] < nextRank) {
nextRank = ranks[i];
nextAtomIdx = i;
}
}
}
CHECK_INVARIANT(nextAtomIdx >= 0, "no start atom found");
subSmi = SmilesWrite::FragmentSmilesConstruct(
tmol, nextAtomIdx, colors, ranks, doKekule, canonical, doIsomericSmiles,
allBondsExplicit, allHsExplicit, false, atomOrdering, &bondsInPlay,
atomSymbols, bondSymbols);
res += subSmi;
colorIt = std::find(colors.begin(), colors.end(), Canon::WHITE_NODE);
if (colorIt != colors.end()) {
res += ".";
}
}
mol.setProp(common_properties::_smilesAtomOutputOrder, atomOrdering, true);
return res;
} // end of MolFragmentToSmiles()
std::string MolFragmentToCXSmiles(const ROMol &mol,
const std::vector<int> &atomsToUse,
const std::vector<int> *bondsToUse,
const std::vector<std::string> *atomSymbols,
const std::vector<std::string> *bondSymbols,
bool doIsomericSmiles, bool doKekule,
int rootedAtAtom, bool canonical,
bool allBondsExplicit, bool allHsExplicit) {
auto res = MolFragmentToSmiles(
mol, atomsToUse, bondsToUse, atomSymbols, bondSymbols, doIsomericSmiles,
doKekule, rootedAtAtom, canonical, allBondsExplicit, allHsExplicit);
auto cxext = SmilesWrite::getCXExtensions(mol);
if (cxext.length()) {
res += " " + cxext;
}
return res;
}
} // namespace RDKit
| 1 | 20,242 | I'm fairly sure this precondition is always true due to the mod (%) | rdkit-rdkit | cpp |
@@ -404,7 +404,7 @@ final class ReaderPool implements Closeable {
private boolean noDups() {
Set<String> seen = new HashSet<>();
for(SegmentCommitInfo info : readerMap.keySet()) {
- assert !seen.contains(info.info.name);
+ assert !seen.contains(info.info.name) : "seen twice: " + info.info.name ;
seen.add(info.info.name);
}
return true; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.index;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.LongSupplier;
import java.util.stream.Collectors;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.InfoStream;
/** Holds shared SegmentReader instances. IndexWriter uses
* SegmentReaders for 1) applying deletes/DV updates, 2) doing
* merges, 3) handing out a real-time reader. This pool
* reuses instances of the SegmentReaders in all these
* places if it is in "near real-time mode" (getReader()
* has been called on this instance). */
final class ReaderPool implements Closeable {
private final Map<SegmentCommitInfo,ReadersAndUpdates> readerMap = new HashMap<>();
private final Directory directory;
private final Directory originalDirectory;
private final FieldInfos.FieldNumbers fieldNumbers;
private final LongSupplier completedDelGenSupplier;
private final InfoStream infoStream;
private final SegmentInfos segmentInfos;
private final String softDeletesField;
// This is a "write once" variable (like the organic dye
// on a DVD-R that may or may not be heated by a laser and
// then cooled to permanently record the event): it's
// false, by default until {@link #enableReaderPooling()}
// is called for the first time,
// at which point it's switched to true and never changes
// back to false. Once this is true, we hold open and
// reuse SegmentReader instances internally for applying
// deletes, doing merges, and reopening near real-time
// readers.
// in practice this should be called once the readers are likely
// to be needed and reused ie if IndexWriter#getReader is called.
private volatile boolean poolReaders;
private final AtomicBoolean closed = new AtomicBoolean(false);
ReaderPool(Directory directory, Directory originalDirectory, SegmentInfos segmentInfos,
FieldInfos.FieldNumbers fieldNumbers, LongSupplier completedDelGenSupplier, InfoStream infoStream,
String softDeletesField, StandardDirectoryReader reader) throws IOException {
this.directory = directory;
this.originalDirectory = originalDirectory;
this.segmentInfos = segmentInfos;
this.fieldNumbers = fieldNumbers;
this.completedDelGenSupplier = completedDelGenSupplier;
this.infoStream = infoStream;
this.softDeletesField = softDeletesField;
if (reader != null) {
// Pre-enroll all segment readers into the reader pool; this is necessary so
// any in-memory NRT live docs are correctly carried over, and so NRT readers
// pulled from this IW share the same segment reader:
List<LeafReaderContext> leaves = reader.leaves();
assert segmentInfos.size() == leaves.size();
for (int i=0;i<leaves.size();i++) {
LeafReaderContext leaf = leaves.get(i);
SegmentReader segReader = (SegmentReader) leaf.reader();
SegmentReader newReader = new SegmentReader(segmentInfos.info(i), segReader, segReader.getLiveDocs(),
segReader.getHardLiveDocs(), segReader.numDocs(), true);
readerMap.put(newReader.getOriginalSegmentInfo(), new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(),
newReader, newPendingDeletes(newReader, newReader.getOriginalSegmentInfo())));
}
}
}
/** Asserts this info still exists in IW's segment infos */
synchronized boolean assertInfoIsLive(SegmentCommitInfo info) {
int idx = segmentInfos.indexOf(info);
assert idx != -1: "info=" + info + " isn't live";
assert segmentInfos.info(idx) == info: "info=" + info + " doesn't match live info in segmentInfos";
return true;
}
/**
* Drops reader for the given {@link SegmentCommitInfo} if it's pooled
* @return <code>true</code> if a reader is pooled
*/
synchronized boolean drop(SegmentCommitInfo info) throws IOException {
final ReadersAndUpdates rld = readerMap.get(info);
if (rld != null) {
assert info == rld.info;
readerMap.remove(info);
rld.dropReaders();
return true;
}
return false;
}
/**
* Returns the sum of the ram used by all the buffered readers and updates in MB
*/
synchronized long ramBytesUsed() {
long bytes = 0;
for (ReadersAndUpdates rld : readerMap.values()) {
bytes += rld.ramBytesUsed.get();
}
return bytes;
}
/**
* Returns <code>true</code> iff any of the buffered readers and updates has at least one pending delete
*/
synchronized boolean anyDeletions() {
for(ReadersAndUpdates rld : readerMap.values()) {
if (rld.getDelCount() > 0) {
return true;
}
}
return false;
}
/**
* Enables reader pooling for this pool. This should be called once the readers in this pool are shared with an
* outside resource like an NRT reader. Once reader pooling is enabled a {@link ReadersAndUpdates} will be kept around
* in the reader pool on calling {@link #release(ReadersAndUpdates, boolean)} until the segment get dropped via calls
* to {@link #drop(SegmentCommitInfo)} or {@link #dropAll()} or {@link #close()}.
* Reader pooling is disabled upon construction but can't be disabled again once it's enabled.
*/
void enableReaderPooling() {
poolReaders = true;
}
boolean isReaderPoolingEnabled() {
return poolReaders;
}
/**
* Releases the {@link ReadersAndUpdates}. This should only be called if the {@link #get(SegmentCommitInfo, boolean)}
* is called with the create paramter set to true.
* @return <code>true</code> if any files were written by this release call.
*/
synchronized boolean release(ReadersAndUpdates rld, boolean assertInfoLive) throws IOException {
boolean changed = false;
// Matches incRef in get:
rld.decRef();
if (rld.refCount() == 0) {
// This happens if the segment was just merged away,
// while a buffered deletes packet was still applying deletes/updates to it.
assert readerMap.containsKey(rld.info) == false: "seg=" + rld.info
+ " has refCount 0 but still unexpectedly exists in the reader pool";
} else {
// Pool still holds a ref:
assert rld.refCount() > 0: "refCount=" + rld.refCount() + " reader=" + rld.info;
if (poolReaders == false && rld.refCount() == 1 && readerMap.containsKey(rld.info)) {
// This is the last ref to this RLD, and we're not
// pooling, so remove it:
if (rld.writeLiveDocs(directory)) {
// Make sure we only write del docs for a live segment:
assert assertInfoLive == false || assertInfoIsLive(rld.info);
// Must checkpoint because we just
// created new _X_N.del and field updates files;
// don't call IW.checkpoint because that also
// increments SIS.version, which we do not want to
// do here: it was done previously (after we
// invoked BDS.applyDeletes), whereas here all we
// did was move the state to disk:
changed = true;
}
if (rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream)) {
changed = true;
}
if (rld.getNumDVUpdates() == 0) {
rld.dropReaders();
readerMap.remove(rld.info);
} else {
// We are forced to pool this segment until its deletes fully apply (no delGen gaps)
}
}
}
return changed;
}
@Override
public synchronized void close() throws IOException {
if (closed.compareAndSet(false, true)) {
dropAll();
}
}
/**
* Writes all doc values updates to disk if there are any.
* @return <code>true</code> iff any files where written
*/
boolean writeAllDocValuesUpdates() throws IOException {
Collection<ReadersAndUpdates> copy;
synchronized (this) {
// this needs to be protected by the reader pool lock otherwise we hit ConcurrentModificationException
copy = new HashSet<>(readerMap.values());
}
boolean any = false;
for (ReadersAndUpdates rld : copy) {
any |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream);
}
return any;
}
/**
* Writes all doc values updates to disk if there are any.
* @return <code>true</code> iff any files where written
*/
boolean writeDocValuesUpdatesForMerge(List<SegmentCommitInfo> infos) throws IOException {
boolean any = false;
for (SegmentCommitInfo info : infos) {
ReadersAndUpdates rld = get(info, false);
if (rld != null) {
any |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream);
rld.setIsMerging();
}
}
return any;
}
/**
* Returns a list of all currently maintained ReadersAndUpdates sorted by it's ram consumption largest to smallest.
* This list can also contain readers that don't consume any ram at this point ie. don't have any updates buffered.
*/
synchronized List<ReadersAndUpdates> getReadersByRam() {
class RamRecordingHolder {
final ReadersAndUpdates updates;
final long ramBytesUsed;
RamRecordingHolder(ReadersAndUpdates updates) {
this.updates = updates;
this.ramBytesUsed = updates.ramBytesUsed.get();
}
}
final ArrayList<RamRecordingHolder> readersByRam;
synchronized (this) {
if (readerMap.isEmpty()) {
return Collections.emptyList();
}
readersByRam = new ArrayList<>(readerMap.size());
for (ReadersAndUpdates rld : readerMap.values()) {
// we have to record the ram usage once and then sort
// since the ram usage can change concurrently and that will confuse the sort or hit an assertion
// the we can acquire here is not enough we would need to lock all ReadersAndUpdates to make sure it doesn't
// change
readersByRam.add(new RamRecordingHolder(rld));
}
}
// Sort this outside of the lock by largest ramBytesUsed:
CollectionUtil.introSort(readersByRam, (a, b) -> Long.compare(b.ramBytesUsed, a.ramBytesUsed));
return Collections.unmodifiableList(readersByRam.stream().map(h -> h.updates).collect(Collectors.toList()));
}
/** Remove all our references to readers, and commits
* any pending changes. */
synchronized void dropAll() throws IOException {
Throwable priorE = null;
final Iterator<Map.Entry<SegmentCommitInfo,ReadersAndUpdates>> it = readerMap.entrySet().iterator();
while(it.hasNext()) {
final ReadersAndUpdates rld = it.next().getValue();
// Important to remove as-we-go, not with .clear()
// in the end, in case we hit an exception;
// otherwise we could over-decref if close() is
// called again:
it.remove();
// NOTE: it is allowed that these decRefs do not
// actually close the SRs; this happens when a
// near real-time reader is kept open after the
// IndexWriter instance is closed:
try {
rld.dropReaders();
} catch (Throwable t) {
priorE = IOUtils.useOrSuppress(priorE, t);
}
}
assert readerMap.size() == 0;
if (priorE != null) {
throw IOUtils.rethrowAlways(priorE);
}
}
/**
* Commit live docs changes for the segment readers for
* the provided infos.
*
* @throws IOException If there is a low-level I/O error
*/
synchronized boolean commit(SegmentInfos infos) throws IOException {
boolean atLeastOneChange = false;
for (SegmentCommitInfo info : infos) {
final ReadersAndUpdates rld = readerMap.get(info);
if (rld != null) {
assert rld.info == info;
boolean changed = rld.writeLiveDocs(directory);
changed |= rld.writeFieldUpdates(directory, fieldNumbers, completedDelGenSupplier.getAsLong(), infoStream);
if (changed) {
// Make sure we only write del docs for a live segment:
assert assertInfoIsLive(info);
// Must checkpoint because we just
// created new _X_N.del and field updates files;
// don't call IW.checkpoint because that also
// increments SIS.version, which we do not want to
// do here: it was done previously (after we
// invoked BDS.applyDeletes), whereas here all we
// did was move the state to disk:
atLeastOneChange = true;
}
}
}
return atLeastOneChange;
}
/**
* Returns <code>true</code> iff there are any buffered doc values updates. Otherwise <code>false</code>.
*/
synchronized boolean anyDocValuesChanges() {
for (ReadersAndUpdates rld : readerMap.values()) {
// NOTE: we don't check for pending deletes because deletes carry over in RAM to NRT readers
if (rld.getNumDVUpdates() != 0) {
return true;
}
}
return false;
}
/**
* Obtain a ReadersAndLiveDocs instance from the
* readerPool. If create is true, you must later call
* {@link #release(ReadersAndUpdates, boolean)}.
*/
synchronized ReadersAndUpdates get(SegmentCommitInfo info, boolean create) {
assert info.info.dir == originalDirectory: "info.dir=" + info.info.dir + " vs " + originalDirectory;
if (closed.get()) {
assert readerMap.isEmpty() : "Reader map is not empty: " + readerMap;
throw new AlreadyClosedException("ReaderPool is already closed");
}
ReadersAndUpdates rld = readerMap.get(info);
if (rld == null) {
if (create == false) {
return null;
}
rld = new ReadersAndUpdates(segmentInfos.getIndexCreatedVersionMajor(), info, newPendingDeletes(info));
// Steal initial reference:
readerMap.put(info, rld);
} else {
assert rld.info == info: "rld.info=" + rld.info + " info=" + info + " isLive?=" + assertInfoIsLive(rld.info)
+ " vs " + assertInfoIsLive(info);
}
if (create) {
// Return ref to caller:
rld.incRef();
}
assert noDups();
return rld;
}
private PendingDeletes newPendingDeletes(SegmentCommitInfo info) {
return softDeletesField == null ? new PendingDeletes(info) : new PendingSoftDeletes(softDeletesField, info);
}
private PendingDeletes newPendingDeletes(SegmentReader reader, SegmentCommitInfo info) {
return softDeletesField == null ? new PendingDeletes(reader, info) :
new PendingSoftDeletes(softDeletesField, reader, info);
}
// Make sure that every segment appears only once in the
// pool:
private boolean noDups() {
Set<String> seen = new HashSet<>();
for(SegmentCommitInfo info : readerMap.keySet()) {
assert !seen.contains(info.info.name);
seen.add(info.info.name);
}
return true;
}
} | 1 | 35,175 | I love seeing diffs like this one, adding a `String` message to an otherwise cryptic `assert`! It makes me realize you must have had a hellacious debugging session! | apache-lucene-solr | java |
@@ -731,6 +731,8 @@ func loadConfigFromFile(configFile string) (c Local, err error) {
}
// Migrate in case defaults were changed
+ // If a config file does not have version, it is assumed to be zero.
+ // All fields listed in migrate() might be changed if an actual value matches to default value from a previous version.
c, err = migrate(c)
return
} | 1 | // Copyright (C) 2019 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package config
import (
"encoding/json"
"errors"
"io"
"os"
"os/user"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/codecs"
)
// Devnet identifies the 'development network' use for development and not generally accessible publicly
const Devnet protocol.NetworkID = "devnet"
// Devtestnet identifies the 'development network for tests' use for running tests against development and not generally accessible publicly
const Devtestnet protocol.NetworkID = "devtestnet"
// Testnet identifies the publicly-available test network
const Testnet protocol.NetworkID = "testnet"
// Mainnet identifies the publicly-available real-money network
const Mainnet protocol.NetworkID = "mainnet"
// GenesisJSONFile is the name of the genesis.json file
const GenesisJSONFile = "genesis.json"
// Global defines global Algorand protocol parameters which should not be overriden.
type Global struct {
SmallLambda time.Duration // min amount of time to wait for leader's credential (i.e., time to propagate one credential)
BigLambda time.Duration // max amount of time to wait for leader's proposal (i.e., time to propagate one block)
}
// Protocol holds the global configuration settings for the agreement protocol,
// initialized with our current defaults. This is used across all nodes we create.
var Protocol = Global{
SmallLambda: 2000 * time.Millisecond,
BigLambda: 15000 * time.Millisecond,
}
// ConsensusParams specifies settings that might vary based on the
// particular version of the consensus protocol.
type ConsensusParams struct {
// Consensus protocol upgrades. Votes for upgrades are collected for
// UpgradeVoteRounds. If the number of positive votes is over
// UpgradeThreshold, the proposal is accepted.
//
// UpgradeVoteRounds needs to be long enough to collect an
// accurate sample of participants, and UpgradeThreshold needs
// to be high enough to ensure that there are sufficient participants
// after the upgrade.
//
// There is a delay of UpgradeWaitRounds between approval of
// an upgrade and its deployment, to give clients time to notify users.
UpgradeVoteRounds uint64
UpgradeThreshold uint64
UpgradeWaitRounds uint64
MaxVersionStringLen int
// MaxTxnBytesPerBlock determines the maximum number of bytes
// that transactions can take up in a block. Specifically,
// the sum of the lengths of encodings of each transaction
// in a block must not exceed MaxTxnBytesPerBlock.
MaxTxnBytesPerBlock int
// MaxTxnBytesPerBlock is the maximum size of a transaction's Note field.
MaxTxnNoteBytes int
// MaxTxnLife is how long a transaction can be live for:
// the maximum difference between LastValid and FirstValid.
//
// Note that in a protocol upgrade, the ledger must first be upgraded
// to hold more past blocks for this value to be raised.
MaxTxnLife uint64
// ApprovedUpgrades describes the upgrade proposals that this protocol
// implementation will vote for.
ApprovedUpgrades map[protocol.ConsensusVersion]bool
// SupportGenesisHash indicates support for the GenesisHash
// fields in transactions (and requires them in blocks).
SupportGenesisHash bool
// RequireGenesisHash indicates that GenesisHash must be present
// in every transaction.
RequireGenesisHash bool
// DefaultKeyDilution specifies the granularity of top-level ephemeral
// keys. KeyDilution is the number of second-level keys in each batch,
// signed by a top-level "batch" key. The default value can be
// overriden in the account state.
DefaultKeyDilution uint64
// MinBalance specifies the minimum balance that can appear in
// an account. To spend money below MinBalance requires issuing
// an account-closing transaction, which transfers all of the
// money from the account, and deletes the account state.
MinBalance uint64
// MinTxnFee specifies the minimum fee allowed on a transaction.
// A minimum fee is necessary to prevent DoS. In some sense this is
// a way of making the spender subsidize the cost of storing this transaction.
MinTxnFee uint64
// RewardUnit specifies the number of MicroAlgos corresponding to one reward
// unit.
//
// Rewards are received by whole reward units. Fractions of
// RewardUnits do not receive rewards.
RewardUnit uint64
// RewardsRateRefreshInterval is the number of rounds after which the
// rewards level is recomputed for the next RewardsRateRefreshInterval rounds.
RewardsRateRefreshInterval uint64
// seed-related parameters
SeedLookback uint64 // how many blocks back we use seeds from in sortition. delta_s in the spec
SeedRefreshInterval uint64 // how often an old block hash is mixed into the seed. delta_r in the spec
// ledger retention policy
MaxBalLookback uint64 // (current round - MaxBalLookback) is the oldest round the ledger must answer balance queries for
// sortition threshold factors
NumProposers uint64
SoftCommitteeSize uint64
SoftCommitteeThreshold uint64
CertCommitteeSize uint64
CertCommitteeThreshold uint64
NextCommitteeSize uint64 // for any non-FPR votes >= deadline step, committee sizes and thresholds are constant
NextCommitteeThreshold uint64
LateCommitteeSize uint64
LateCommitteeThreshold uint64
RedoCommitteeSize uint64
RedoCommitteeThreshold uint64
DownCommitteeSize uint64
DownCommitteeThreshold uint64
FastRecoveryLambda time.Duration // time between fast recovery attempts
FastPartitionRecovery bool // set when fast partition recovery is enabled
// commit to payset using a hash of entire payset,
// instead of txid merkle tree
PaysetCommitFlat bool
MaxTimestampIncrement int64 // maximum time between timestamps on successive blocks
// support for the efficient encoding in SignedTxnInBlock
SupportSignedTxnInBlock bool
// force the FeeSink address to be non-participating in the genesis balances.
ForceNonParticipatingFeeSink bool
// support for ApplyData in SignedTxnInBlock
ApplyData bool
// track reward distributions in ApplyData
RewardsInApplyData bool
// domain-separated credentials
CredentialDomainSeparationEnabled bool
// support for transactions that mark an account non-participating
SupportBecomeNonParticipatingTransactions bool
// fix the rewards calculation by avoiding subtracting too much from the rewards pool
PendingResidueRewards bool
// asset support
Asset bool
// max number of assets per account
MaxAssetsPerAccount int
// support sequential transaction counter TxnCounter
TxnCounter bool
// transaction groups
SupportTxGroups bool
// max group size
MaxTxGroupSize int
// support for transaction leases
SupportTransactionLeases bool
// 0 for no support, otherwise highest version supported
LogicSigVersion uint64
// len(LogicSig.Logic) + len(LogicSig.Args[*]) must be less than this
LogicSigMaxSize uint64
// sum of estimated op cost must be less than this
LogicSigMaxCost uint64
}
// Consensus tracks the protocol-level settings for different versions of the
// consensus protocol.
var Consensus map[protocol.ConsensusVersion]ConsensusParams
func init() {
Consensus = make(map[protocol.ConsensusVersion]ConsensusParams)
initConsensusProtocols()
initConsensusTestProtocols()
// This must appear last, since it depends on all of the other
// versions to already be registered (by the above calls).
initConsensusTestFastUpgrade()
// Allow tuning SmallLambda for faster consensus in single-machine e2e
// tests. Useful for development. This might make sense to fold into
// a protocol-version-specific setting, once we move SmallLambda into
// ConsensusParams.
algoSmallLambda, err := strconv.ParseInt(os.Getenv("ALGOSMALLLAMBDAMSEC"), 10, 64)
if err == nil {
Protocol.SmallLambda = time.Duration(algoSmallLambda) * time.Millisecond
}
}
func initConsensusProtocols() {
// WARNING: copying a ConsensusParams by value into a new variable
// does not copy the ApprovedUpgrades map. Make sure that each new
// ConsensusParams structure gets a fresh ApprovedUpgrades map.
// Base consensus protocol version, v7.
v7 := ConsensusParams{
UpgradeVoteRounds: 10000,
UpgradeThreshold: 9000,
UpgradeWaitRounds: 10000,
MaxVersionStringLen: 64,
MinBalance: 10000,
MinTxnFee: 1000,
MaxTxnLife: 1000,
MaxTxnNoteBytes: 1024,
MaxTxnBytesPerBlock: 1000000,
DefaultKeyDilution: 10000,
MaxTimestampIncrement: 25,
RewardUnit: 1e6,
RewardsRateRefreshInterval: 5e5,
ApprovedUpgrades: map[protocol.ConsensusVersion]bool{},
NumProposers: 30,
SoftCommitteeSize: 2500,
SoftCommitteeThreshold: 1870,
CertCommitteeSize: 1000,
CertCommitteeThreshold: 720,
NextCommitteeSize: 10000,
NextCommitteeThreshold: 7750,
LateCommitteeSize: 10000,
LateCommitteeThreshold: 7750,
RedoCommitteeSize: 10000,
RedoCommitteeThreshold: 7750,
DownCommitteeSize: 10000,
DownCommitteeThreshold: 7750,
FastRecoveryLambda: 5 * time.Minute,
SeedLookback: 2,
SeedRefreshInterval: 100,
MaxBalLookback: 320,
MaxTxGroupSize: 1,
}
v7.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV7] = v7
// v8 uses parameters and a seed derivation policy (the "twin seeds") from Georgios' new analysis
v8 := v7
v8.SeedRefreshInterval = 80
v8.NumProposers = 9
v8.SoftCommitteeSize = 2990
v8.SoftCommitteeThreshold = 2267
v8.CertCommitteeSize = 1500
v8.CertCommitteeThreshold = 1112
v8.NextCommitteeSize = 5000
v8.NextCommitteeThreshold = 3838
v8.LateCommitteeSize = 5000
v8.LateCommitteeThreshold = 3838
v8.RedoCommitteeSize = 5000
v8.RedoCommitteeThreshold = 3838
v8.DownCommitteeSize = 5000
v8.DownCommitteeThreshold = 3838
v8.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV8] = v8
// v7 can be upgraded to v8.
v7.ApprovedUpgrades[protocol.ConsensusV8] = true
// v9 increases the minimum balance to 100,000 microAlgos.
v9 := v8
v9.MinBalance = 100000
v9.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV9] = v9
// v8 can be upgraded to v9.
v8.ApprovedUpgrades[protocol.ConsensusV9] = true
// v10 introduces fast partition recovery (and also raises NumProposers).
v10 := v9
v10.FastPartitionRecovery = true
v10.NumProposers = 20
v10.LateCommitteeSize = 500
v10.LateCommitteeThreshold = 320
v10.RedoCommitteeSize = 2400
v10.RedoCommitteeThreshold = 1768
v10.DownCommitteeSize = 6000
v10.DownCommitteeThreshold = 4560
v10.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV10] = v10
// v9 can be upgraded to v10.
v9.ApprovedUpgrades[protocol.ConsensusV10] = true
// v11 introduces SignedTxnInBlock.
v11 := v10
v11.SupportSignedTxnInBlock = true
v11.PaysetCommitFlat = true
v11.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV11] = v11
// v10 can be upgraded to v11.
v10.ApprovedUpgrades[protocol.ConsensusV11] = true
// v12 increases the maximum length of a version string.
v12 := v11
v12.MaxVersionStringLen = 128
v12.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV12] = v12
// v11 can be upgraded to v12.
v11.ApprovedUpgrades[protocol.ConsensusV12] = true
// v13 makes the consensus version a meaningful string.
v13 := v12
v13.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV13] = v13
// v12 can be upgraded to v13.
v12.ApprovedUpgrades[protocol.ConsensusV13] = true
// v14 introduces tracking of closing amounts in ApplyData, and enables
// GenesisHash in transactions.
v14 := v13
v14.ApplyData = true
v14.SupportGenesisHash = true
v14.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV14] = v14
// v13 can be upgraded to v14.
v13.ApprovedUpgrades[protocol.ConsensusV14] = true
// v15 introduces tracking of reward distributions in ApplyData.
v15 := v14
v15.RewardsInApplyData = true
v15.ForceNonParticipatingFeeSink = true
v15.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV15] = v15
// v14 can be upgraded to v15.
v14.ApprovedUpgrades[protocol.ConsensusV15] = true
// v16 fixes domain separation in credentials.
v16 := v15
v16.CredentialDomainSeparationEnabled = true
v16.RequireGenesisHash = true
v16.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV16] = v16
// v15 can be upgraded to v16.
v15.ApprovedUpgrades[protocol.ConsensusV16] = true
// ConsensusV17 points to 'final' spec commit
v17 := v16
v17.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV17] = v17
// v16 can be upgraded to v17.
v16.ApprovedUpgrades[protocol.ConsensusV17] = true
// ConsensusV18 points to reward calculation spec commit
v18 := v17
v18.PendingResidueRewards = true
v18.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusV18] = v18
// v17 can be upgraded to v18.
// for now, I will leave this gated out.
// v17.ApprovedUpgrades[protocol.ConsensusV18] = true
// ConsensusFuture is used to test features that are implemented
// but not yet released in a production protocol version.
vFuture := v18
vFuture.TxnCounter = true
vFuture.Asset = true
vFuture.LogicSigVersion = 1
vFuture.LogicSigMaxSize = 1000
vFuture.LogicSigMaxCost = 20000
vFuture.MaxAssetsPerAccount = 1000
vFuture.SupportTxGroups = true
vFuture.MaxTxGroupSize = 16
vFuture.SupportTransactionLeases = true
vFuture.SupportBecomeNonParticipatingTransactions = true
vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusFuture] = vFuture
}
func initConsensusTestProtocols() {
// Various test protocol versions
Consensus[protocol.ConsensusTest0] = ConsensusParams{
UpgradeVoteRounds: 2,
UpgradeThreshold: 1,
UpgradeWaitRounds: 2,
MaxVersionStringLen: 64,
MaxTxnBytesPerBlock: 1000000,
DefaultKeyDilution: 10000,
ApprovedUpgrades: map[protocol.ConsensusVersion]bool{
protocol.ConsensusTest1: true,
},
}
Consensus[protocol.ConsensusTest1] = ConsensusParams{
UpgradeVoteRounds: 10,
UpgradeThreshold: 8,
UpgradeWaitRounds: 10,
MaxVersionStringLen: 64,
MaxTxnBytesPerBlock: 1000000,
DefaultKeyDilution: 10000,
ApprovedUpgrades: map[protocol.ConsensusVersion]bool{},
}
testBigBlocks := Consensus[protocol.ConsensusCurrentVersion]
testBigBlocks.MaxTxnBytesPerBlock = 100000000
testBigBlocks.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusTestBigBlocks] = testBigBlocks
rapidRecalcParams := Consensus[protocol.ConsensusCurrentVersion]
rapidRecalcParams.RewardsRateRefreshInterval = 25
//because rapidRecalcParams is based on ConsensusCurrentVersion,
//it *shouldn't* have any ApprovedUpgrades
//but explicitly mark "no approved upgrades" just in case
rapidRecalcParams.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{}
Consensus[protocol.ConsensusTestRapidRewardRecalculation] = rapidRecalcParams
}
func initConsensusTestFastUpgrade() {
fastUpgradeProtocols := make(map[protocol.ConsensusVersion]ConsensusParams)
for proto, params := range Consensus {
fastParams := params
fastParams.UpgradeVoteRounds = 5
fastParams.UpgradeThreshold = 3
fastParams.UpgradeWaitRounds = 5
fastParams.MaxVersionStringLen += len(protocol.ConsensusTestFastUpgrade(""))
fastParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]bool)
for ver, flag := range params.ApprovedUpgrades {
fastParams.ApprovedUpgrades[protocol.ConsensusTestFastUpgrade(ver)] = flag
}
fastUpgradeProtocols[protocol.ConsensusTestFastUpgrade(proto)] = fastParams
}
// Put the test protocols into the Consensus struct; this
// is done as a separate step so we don't recurse forever.
for proto, params := range fastUpgradeProtocols {
Consensus[proto] = params
}
}
// Local holds the per-node-instance configuration settings for the protocol.
type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter.
Version uint32
// environmental (may be overridden)
// if true, does not garbage collect; also, replies to catchup requests
Archival bool
// gossipNode.go
// how many peers to propagate to?
GossipFanout int
NetAddress string
ReconnectTime time.Duration
// what we should tell people to connect to
PublicAddress string
MaxConnectionsPerIP int
// 0 == disable
PeerPingPeriodSeconds int
// for https serving
TLSCertFile string
TLSKeyFile string
// Logging
BaseLoggerDebugLevel uint32
// if this is 0, do not produce agreement.cadaver
CadaverSizeTarget uint64
// IncomingConnectionsLimit specifies the max number of long-lived incoming
// connections. 0 means no connections allowed. -1 is unbounded.
IncomingConnectionsLimit int
// BroadcastConnectionsLimit specifies the number of connections that
// will receive broadcast (gossip) messages from this node. If the
// node has more connections than this number, it will send broadcasts
// to the top connections by priority (outgoing connections first, then
// by money held by peers based on their participation key). 0 means
// no outgoing messages (not even transaction broadcasting to outgoing
// peers). -1 means unbounded (default).
BroadcastConnectionsLimit int
// AnnounceParticipationKey specifies that this node should announce its
// participation key (with the largest stake) to its gossip peers. This
// allows peers to prioritize our connection, if necessary, in case of a
// DoS attack. Disabling this means that the peers will not have any
// additional information to allow them to prioritize our connection.
AnnounceParticipationKey bool
// PriorityPeers specifies peer IP addresses that should always get
// outgoing broadcast messages from this node.
PriorityPeers map[string]bool
// To make sure the algod process does not run out of FDs, algod ensures
// that RLIMIT_NOFILE exceeds the max number of incoming connections (i.e.,
// IncomingConnectionsLimit) by at least ReservedFDs. ReservedFDs are meant
// to leave room for short-lived FDs like DNS queries, SQLite files, etc.
ReservedFDs uint64
// local server
// API endpoint address
EndpointAddress string
// timeouts passed to the rest http.Server implementation
RestReadTimeoutSeconds int
RestWriteTimeoutSeconds int
// SRV-based phonebook
DNSBootstrapID string
// Log file size limit in bytes
LogSizeLimit uint64
// text/template for creating log archive filename.
// Available template vars:
// Time at start of log: {{.Year}} {{.Month}} {{.Day}} {{.Hour}} {{.Minute}} {{.Second}}
// Time at end of log: {{.EndYear}} {{.EndMonth}} {{.EndDay}} {{.EndHour}} {{.EndMinute}} {{.EndSecond}}
//
// If the filename ends with .gz or .bz2 it will be compressed.
//
// default: "node.archive.log" (no rotation, clobbers previous archive)
LogArchiveName string
// LogArchiveMaxAge will be parsed by time.ParseDuration().
// Valid units are 's' seconds, 'm' minutes, 'h' hours
LogArchiveMaxAge string
// number of consecutive attempts to catchup after which we replace the peers we're connected to
CatchupFailurePeerRefreshRate int
// where should the node exporter listen for metrics
NodeExporterListenAddress string
// enable metric reporting flag
EnableMetricReporting bool
// enable top accounts reporting flag
EnableTopAccountsReporting bool
// enable agreement reporting flag. Currently only prints additional period events.
EnableAgreementReporting bool
// enable agreement timing metrics flag
EnableAgreementTimeMetrics bool
// The path to the node exporter.
NodeExporterPath string
// The fallback DNS resolver address that would be used if the system resolver would fail to retrieve SRV records
FallbackDNSResolverAddress string
// exponential increase factor of transaction pool's fee threshold, should always be 2 in production
TxPoolExponentialIncreaseFactor uint64
SuggestedFeeBlockHistory int
// TxPoolSize is the number of transactions that fit in the transaction pool
TxPoolSize int
// number of seconds allowed for syncing transactions
TxSyncTimeoutSeconds int64
// number of seconds between transaction synchronizations
TxSyncIntervalSeconds int64
// the number of incoming message hashes buckets.
IncomingMessageFilterBucketCount int
// the size of each incoming message hash bucket.
IncomingMessageFilterBucketSize int
// the number of outgoing message hashes buckets.
OutgoingMessageFilterBucketCount int
// the size of each outgoing message hash bucket.
OutgoingMessageFilterBucketSize int
// enable the filtering of outgoing messages
EnableOutgoingNetworkMessageFiltering bool
// enable the filtering of incoming messages
EnableIncomingMessageFilter bool
// control enabling / disabling deadlock detection.
// negative (-1) to disable, positive (1) to enable, 0 for default.
DeadlockDetection int
// Prefer to run algod Hosted (under algoh)
// Observed by `goal` for now.
RunHosted bool
// The maximal number of blocks that catchup will fetch in parallel.
// If less than Protocol.SeedLookback, then Protocol.SeedLookback will be used as to limit the catchup.
CatchupParallelBlocks uint64
// Generate AssembleBlockMetrics telemetry event
EnableAssembleStats bool
// Generate ProcessBlockMetrics telemetry event
EnableProcessBlockStats bool
// SuggestedFeeSlidingWindowSize is number of past blocks that will be considered in computing the suggested fee
SuggestedFeeSlidingWindowSize uint32
// the max size the sync server would return
TxSyncServeResponseSize int
// IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions
// Note -- Indexer cannot operate on non Archival nodes
IsIndexerActive bool
// UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when
// determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the
// proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header
// field can be used.
UseXForwardedForAddressField string
// ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified.
ForceRelayMessages bool
// ConnectionsRateLimitingWindowSeconds is being used in conjunction with ConnectionsRateLimitingCount;
// see ConnectionsRateLimitingCount description for further information. Providing a zero value
// in this variable disables the connection rate limiting.
ConnectionsRateLimitingWindowSeconds uint
// ConnectionsRateLimitingCount is being used along with ConnectionsRateLimitingWindowSeconds to determine if
// a connection request should be accepted or not. The gossip network examine all the incoming requests in the past
// ConnectionsRateLimitingWindowSeconds seconds that share the same origin. If the total count exceed the ConnectionsRateLimitingCount
// value, the connection is refused.
ConnectionsRateLimitingCount uint
// EnableRequestLogger enabled the logging of the incoming requests to the telemetry server.
EnableRequestLogger bool
}
// Filenames of config files within the configdir (e.g. ~/.algorand)
// ConfigFilename is the name of the config.json file where we store per-algod-instance settings
const ConfigFilename = "config.json"
// PhonebookFilename is the name of the phonebook configuration files - no longer used
const PhonebookFilename = "phonebook.json" // No longer used in product - still in tests
// LedgerFilenamePrefix is the prefix of the name of the ledger database files
const LedgerFilenamePrefix = "ledger"
// CrashFilename is the name of the agreement database file.
// It is used to recover from node crashes.
const CrashFilename = "crash.sqlite"
// LoadConfigFromDisk returns a Local config structure based on merging the defaults
// with settings loaded from the config file from the custom dir. If the custom file
// cannot be loaded, the default config is returned (with the error from loading the
// custom file).
func LoadConfigFromDisk(custom string) (c Local, err error) {
return loadConfigFromFile(filepath.Join(custom, ConfigFilename))
}
func loadConfigFromFile(configFile string) (c Local, err error) {
c = defaultLocal
c.Version = 0 // Reset to 0 so we get the version from the loaded file.
c, err = mergeConfigFromFile(configFile, c)
if err != nil {
return
}
// Migrate in case defaults were changed
c, err = migrate(c)
return
}
// GetDefaultLocal returns a copy of the current defaultLocal config
func GetDefaultLocal() Local {
return defaultLocal
}
func mergeConfigFromDir(root string, source Local) (Local, error) {
return mergeConfigFromFile(filepath.Join(root, ConfigFilename), source)
}
func mergeConfigFromFile(configpath string, source Local) (Local, error) {
f, err := os.Open(configpath)
if err != nil {
return source, err
}
defer f.Close()
err = loadConfig(f, &source)
// For now, all relays (listening for incoming connections) are also Archival
// We can change this logic in the future, but it's currently the sanest default.
if source.NetAddress != "" {
source.Archival = true
}
return source, err
}
func loadConfig(reader io.Reader, config *Local) error {
dec := json.NewDecoder(reader)
return dec.Decode(config)
}
// DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers
func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) {
dnsBootstrapString := cfg.DNSBootstrap(networkID)
bootstrapArray = strings.Split(dnsBootstrapString, ";")
return
}
// DNSBootstrap returns the network-specific DNSBootstrap identifier
func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
// if user hasn't modified the default DNSBootstrapID in the configuration
// file and we're targeting a devnet ( via genesis file ), we the
// explicit devnet network bootstrap.
if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID && network == Devnet {
return "devnet.algodev.network"
}
return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1)
}
// SaveToDisk writes the Local settings into a root/ConfigFilename file
func (cfg Local) SaveToDisk(root string) error {
configpath := filepath.Join(root, ConfigFilename)
filename := os.ExpandEnv(configpath)
return cfg.SaveToFile(filename)
}
// SaveToFile saves the config to a specific filename, allowing overriding the default name
func (cfg Local) SaveToFile(filename string) error {
var alwaysInclude []string
alwaysInclude = append(alwaysInclude, "Version")
return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true)
}
type phonebookBlackWhiteList struct {
Include []string
}
// LoadPhonebook returns a phonebook loaded from the provided directory, if it exists.
// NOTE: We no longer use phonebook for anything but tests, but users should be able to use it
func LoadPhonebook(datadir string) ([]string, error) {
var entries []string
path := filepath.Join(datadir, PhonebookFilename)
f, rootErr := os.Open(path)
if rootErr != nil {
if !os.IsNotExist(rootErr) {
return nil, rootErr
}
} else {
defer f.Close()
phonebook := phonebookBlackWhiteList{}
dec := json.NewDecoder(f)
err := dec.Decode(&phonebook)
if err != nil {
return nil, errors.New("error decoding phonebook! got error: " + err.Error())
}
entries = phonebook.Include
}
// get an initial list of peers
return entries, rootErr
}
// SavePhonebookToDisk writes the phonebook into a root/PhonebookFilename file
func SavePhonebookToDisk(entries []string, root string) error {
configpath := filepath.Join(root, PhonebookFilename)
f, err := os.OpenFile(os.ExpandEnv(configpath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err == nil {
defer f.Close()
err = savePhonebook(entries, f)
}
return err
}
func savePhonebook(entries []string, w io.Writer) error {
pb := phonebookBlackWhiteList{
Include: entries,
}
enc := codecs.NewFormattedJSONEncoder(w)
return enc.Encode(pb)
}
var globalConfigFileRoot string
// GetConfigFilePath retrieves the full path to a configuration file
// These are global configurations - not specific to data-directory / network.
func GetConfigFilePath(file string) (string, error) {
rootPath, err := GetGlobalConfigFileRoot()
if err != nil {
return "", err
}
return filepath.Join(rootPath, file), nil
}
// GetGlobalConfigFileRoot returns the current root folder for global configuration files.
// This will likely only change for tests.
func GetGlobalConfigFileRoot() (string, error) {
var err error
if globalConfigFileRoot == "" {
globalConfigFileRoot, err = GetDefaultConfigFilePath()
if err == nil {
dirErr := os.Mkdir(globalConfigFileRoot, os.ModePerm)
if !os.IsExist(dirErr) {
err = dirErr
}
}
}
return globalConfigFileRoot, err
}
// SetGlobalConfigFileRoot allows overriding the root folder for global configuration files.
// It returns the current one so it can be restored, if desired.
// This will likely only change for tests.
func SetGlobalConfigFileRoot(rootPath string) string {
currentRoot := globalConfigFileRoot
globalConfigFileRoot = rootPath
return currentRoot
}
// GetDefaultConfigFilePath retrieves the default directory for global (not per-instance) config files
// By default we store in ~/.algorand/.
// This will likely only change for tests.
func GetDefaultConfigFilePath() (string, error) {
currentUser, err := user.Current()
if err != nil {
return "", err
}
if currentUser.HomeDir == "" {
return "", errors.New("GetDefaultConfigFilePath fail - current user has no home directory")
}
return filepath.Join(currentUser.HomeDir, ".algorand"), nil
}
| 1 | 36,610 | 1. No logger exists to produce a message 2. Can't assume the latest version because missed version means 0. | algorand-go-algorand | go |
@@ -633,6 +633,14 @@ class Config
return $config;
}
+ /**
+ * Computes the hash to use for a cache folder from CLI flags and from the config file's xml contents
+ */
+ public function computeHash(): string
+ {
+ return sha1($this->hash . ':' . $this->level);
+ }
+
/**
* Creates a new config object from an XML string
* @param string|null $current_dir Current working directory, if different to $base_dir | 1 | <?php
namespace Psalm;
use Composer\Autoload\ClassLoader;
use Composer\Semver\VersionParser;
use DOMDocument;
use LogicException;
use OutOfBoundsException;
use Psalm\Config\IssueHandler;
use Psalm\Config\ProjectFileFilter;
use Psalm\Config\TaintAnalysisFileFilter;
use Psalm\Exception\ConfigException;
use Psalm\Exception\ConfigNotFoundException;
use Psalm\Internal\Analyzer\ClassLikeAnalyzer;
use Psalm\Internal\Analyzer\FileAnalyzer;
use Psalm\Internal\Analyzer\ProjectAnalyzer;
use Psalm\Internal\Composer;
use Psalm\Internal\EventDispatcher;
use Psalm\Internal\IncludeCollector;
use Psalm\Internal\Scanner\FileScanner;
use Psalm\Issue\ArgumentIssue;
use Psalm\Issue\ClassIssue;
use Psalm\Issue\CodeIssue;
use Psalm\Issue\ConfigIssue;
use Psalm\Issue\FunctionIssue;
use Psalm\Issue\MethodIssue;
use Psalm\Issue\PropertyIssue;
use Psalm\Issue\VariableIssue;
use Psalm\Progress\Progress;
use Psalm\Progress\VoidProgress;
use SimpleXMLElement;
use Webmozart\PathUtil\Path;
use XdgBaseDir\Xdg;
use function array_map;
use function array_merge;
use function array_pad;
use function array_pop;
use function array_shift;
use function assert;
use function basename;
use function chdir;
use function class_exists;
use function count;
use function dirname;
use function explode;
use function file_exists;
use function file_get_contents;
use function filetype;
use function get_class;
use function get_defined_constants;
use function get_defined_functions;
use function getcwd;
use function glob;
use function implode;
use function in_array;
use function intval;
use function is_a;
use function is_dir;
use function is_file;
use function json_decode;
use function libxml_clear_errors;
use function libxml_get_errors;
use function libxml_use_internal_errors;
use function mkdir;
use function phpversion;
use function preg_match;
use function preg_quote;
use function preg_replace;
use function realpath;
use function reset;
use function rmdir;
use function rtrim;
use function scandir;
use function sha1;
use function simplexml_import_dom;
use function str_replace;
use function strlen;
use function strpos;
use function strrpos;
use function strtolower;
use function strtr;
use function substr;
use function substr_count;
use function sys_get_temp_dir;
use function trigger_error;
use function unlink;
use function version_compare;
use const DIRECTORY_SEPARATOR;
use const E_USER_ERROR;
use const GLOB_NOSORT;
use const LIBXML_ERR_ERROR;
use const LIBXML_ERR_FATAL;
use const LIBXML_NONET;
use const PHP_EOL;
use const SCANDIR_SORT_NONE;
/**
* @psalm-suppress PropertyNotSetInConstructor
* @psalm-consistent-constructor
*/
class Config
{
private const DEFAULT_FILE_NAME = 'psalm.xml';
public const REPORT_INFO = 'info';
public const REPORT_ERROR = 'error';
public const REPORT_SUPPRESS = 'suppress';
/**
* @var array<string>
*/
public static $ERROR_LEVELS = [
self::REPORT_INFO,
self::REPORT_ERROR,
self::REPORT_SUPPRESS,
];
/**
* @var array
*/
private const MIXED_ISSUES = [
'MixedArgument',
'MixedArrayAccess',
'MixedArrayAssignment',
'MixedArrayOffset',
'MixedArrayTypeCoercion',
'MixedAssignment',
'MixedFunctionCall',
'MixedInferredReturnType',
'MixedMethodCall',
'MixedOperand',
'MixedPropertyFetch',
'MixedPropertyAssignment',
'MixedReturnStatement',
'MixedStringOffsetAssignment',
'MixedArgumentTypeCoercion',
'MixedPropertyTypeCoercion',
'MixedReturnTypeCoercion',
];
/**
* These are special object classes that allow any and all properties to be get/set on them
* @var array<int, class-string>
*/
protected $universal_object_crates = [
\stdClass::class,
SimpleXMLElement::class,
];
/**
* @var static|null
*/
private static $instance;
/**
* Whether or not to use types as defined in docblocks
*
* @var bool
*/
public $use_docblock_types = true;
/**
* Whether or not to use types as defined in property docblocks.
* This is distinct from the above because you may want to use
* property docblocks, but not function docblocks.
*
* @var bool
*/
public $use_docblock_property_types = false;
/**
* Whether or not to throw an exception on first error
*
* @var bool
*/
public $throw_exception = false;
/**
* Whether or not to load Xdebug stub
*
* @var bool|null
*/
public $load_xdebug_stub = null;
/**
* The directory to store PHP Parser (and other) caches
*
* @var string|null
*/
public $cache_directory;
/**
* The directory to store all Psalm project caches
*
* @var string|null
*/
public $global_cache_directory;
/**
* Path to the autoader
*
* @var string|null
*/
public $autoloader;
/**
* @var ProjectFileFilter|null
*/
protected $project_files;
/**
* @var ProjectFileFilter|null
*/
protected $extra_files;
/**
* The base directory of this config file
*
* @var string
*/
public $base_dir;
/**
* The PHP version to assume as declared in the config file
*
* @var string|null
*/
private $configured_php_version;
/**
* @var array<int, string>
*/
private $file_extensions = ['php'];
/**
* @var array<string, class-string<FileScanner>>
*/
private $filetype_scanners = [];
/**
* @var array<string, class-string<FileAnalyzer>>
*/
private $filetype_analyzers = [];
/**
* @var array<string, string>
*/
private $filetype_scanner_paths = [];
/**
* @var array<string, string>
*/
private $filetype_analyzer_paths = [];
/**
* @var array<string, IssueHandler>
*/
private $issue_handlers = [];
/**
* @var array<int, string>
*/
private $mock_classes = [];
/**
* @var array<string, string>
*/
private $preloaded_stub_files = [];
/**
* @var array<string, string>
*/
private $stub_files = [];
/**
* @var bool
*/
public $hide_external_errors = false;
/** @var bool */
public $allow_includes = true;
/** @var 1|2|3|4|5|6|7|8 */
public $level = 1;
/**
* @var ?bool
*/
public $show_mixed_issues = null;
/** @var bool */
public $strict_binary_operands = false;
/**
* @var bool
*/
public $remember_property_assignments_after_call = true;
/** @var bool */
public $use_igbinary = false;
/**
* @var bool
*/
public $allow_phpstorm_generics = false;
/**
* @var bool
*/
public $allow_string_standin_for_class = false;
/**
* @var bool
*/
public $use_phpdoc_method_without_magic_or_parent = false;
/**
* @var bool
*/
public $use_phpdoc_property_without_magic_or_parent = false;
/**
* @var bool
*/
public $skip_checks_on_unresolvable_includes = false;
/**
* @var bool
*/
public $seal_all_methods = false;
/**
* @var bool
*/
public $memoize_method_calls = false;
/**
* @var bool
*/
public $hoist_constants = false;
/**
* @var bool
*/
public $add_param_default_to_docblock_type = false;
/**
* @var bool
*/
public $check_for_throws_docblock = false;
/**
* @var bool
*/
public $check_for_throws_in_global_scope = false;
/**
* @var bool
*/
public $ignore_internal_falsable_issues = true;
/**
* @var bool
*/
public $ignore_internal_nullable_issues = true;
/**
* @var array<string, bool>
*/
public $ignored_exceptions = [];
/**
* @var array<string, bool>
*/
public $ignored_exceptions_in_global_scope = [];
/**
* @var array<string, bool>
*/
public $ignored_exceptions_and_descendants = [];
/**
* @var array<string, bool>
*/
public $ignored_exceptions_and_descendants_in_global_scope = [];
/**
* @var bool
*/
public $infer_property_types_from_constructor = true;
/**
* @var bool
*/
public $ensure_array_string_offsets_exist = false;
/**
* @var bool
*/
public $ensure_array_int_offsets_exist = false;
/**
* @var array<lowercase-string, bool>
*/
public $forbidden_functions = [];
/**
* @var bool
*/
public $forbid_echo = false;
/**
* @var bool
*/
public $find_unused_code = false;
/**
* @var bool
*/
public $find_unused_variables = false;
/**
* @var bool
*/
public $find_unused_psalm_suppress = false;
/**
* @var bool
*/
public $run_taint_analysis = false;
/** @var bool */
public $use_phpstorm_meta_path = true;
/**
* @var bool
*/
public $resolve_from_config_file = true;
/**
* @var bool
*/
public $restrict_return_types = false;
/**
* @var bool
*/
public $limit_method_complexity = false;
/**
* @var int
*/
public $max_graph_size = 200;
/**
* @var int
*/
public $max_avg_path_length = 70;
/**
* @var string[]
*/
public $plugin_paths = [];
/**
* @var array<array{class:string,config:?SimpleXMLElement}>
*/
private $plugin_classes = [];
/**
* @var bool
*/
public $allow_internal_named_arg_calls = true;
/**
* @var bool
*/
public $allow_named_arg_calls = true;
/** @var array<string, mixed> */
private $predefined_constants = [];
/** @var array<callable-string, bool> */
private $predefined_functions = [];
/** @var ClassLoader|null */
private $composer_class_loader;
/**
* Custom functions that always exit
*
* @var array<lowercase-string, bool>
*/
public $exit_functions = [];
/**
* @var string
*/
public $hash = '';
/** @var string|null */
public $error_baseline = null;
/**
* @var bool
*/
public $include_php_versions_in_error_baseline = false;
/** @var string */
public $shepherd_host = 'shepherd.dev';
/**
* @var array<string, string>
*/
public $globals = [];
/**
* @var int
*/
public $max_string_length = 1000;
/** @var ?IncludeCollector */
private $include_collector;
/**
* @var TaintAnalysisFileFilter|null
*/
protected $taint_analysis_ignored_files;
/**
* @var bool whether to emit a backtrace of emitted issues to stderr
*/
public $debug_emitted_issues = false;
/**
* @var bool
*/
private $report_info = true;
/**
* @var EventDispatcher
*/
public $eventDispatcher;
/** @var list<ConfigIssue> */
public $config_issues = [];
/**
* @var 'default'|'never'|'always'
*/
public $trigger_error_exits = 'default';
protected function __construct()
{
self::$instance = $this;
$this->eventDispatcher = new EventDispatcher();
}
/**
* Gets a Config object from an XML file.
*
* Searches up a folder hierarchy for the most immediate config.
*
* @throws ConfigException if a config path is not found
*
*/
public static function getConfigForPath(string $path, string $current_dir): Config
{
$config_path = self::locateConfigFile($path);
if (!$config_path) {
throw new ConfigNotFoundException('Config not found for path ' . $path);
}
return self::loadFromXMLFile($config_path, $current_dir);
}
/**
* Searches up a folder hierarchy for the most immediate config.
*
* @throws ConfigException
*
*/
public static function locateConfigFile(string $path): ?string
{
$dir_path = realpath($path);
if ($dir_path === false) {
throw new ConfigNotFoundException('Config not found for path ' . $path);
}
if (!is_dir($dir_path)) {
$dir_path = dirname($dir_path);
}
do {
$maybe_path = $dir_path . DIRECTORY_SEPARATOR . Config::DEFAULT_FILE_NAME;
if (file_exists($maybe_path) || file_exists($maybe_path .= '.dist')) {
return $maybe_path;
}
$dir_path = dirname($dir_path);
} while (dirname($dir_path) !== $dir_path);
return null;
}
/**
* Creates a new config object from the file
*/
public static function loadFromXMLFile(string $file_path, string $current_dir): Config
{
$file_contents = file_get_contents($file_path);
$base_dir = dirname($file_path) . DIRECTORY_SEPARATOR;
if ($file_contents === false) {
throw new \InvalidArgumentException('Cannot open ' . $file_path);
}
try {
$config = self::loadFromXML($base_dir, $file_contents, $current_dir, $file_path);
$config->hash = sha1($file_contents . \PSALM_VERSION);
} catch (ConfigException $e) {
throw new ConfigException(
'Problem parsing ' . $file_path . ":\n" . ' ' . $e->getMessage()
);
}
return $config;
}
/**
* Creates a new config object from an XML string
* @param string|null $current_dir Current working directory, if different to $base_dir
*
* @throws ConfigException
*/
public static function loadFromXML(
string $base_dir,
string $file_contents,
?string $current_dir = null,
?string $file_path = null
): Config {
if ($current_dir === null) {
$current_dir = $base_dir;
}
self::validateXmlConfig($base_dir, $file_contents);
return self::fromXmlAndPaths($base_dir, $file_contents, $current_dir, $file_path);
}
private static function loadDomDocument(string $base_dir, string $file_contents): DOMDocument
{
$dom_document = new DOMDocument();
// there's no obvious way to set xml:base for a document when loading it from string
// so instead we're changing the current directory instead to be able to process XIncludes
$oldpwd = getcwd();
chdir($base_dir);
$dom_document->loadXML($file_contents, LIBXML_NONET);
$dom_document->xinclude(LIBXML_NONET);
chdir($oldpwd);
return $dom_document;
}
/**
* @throws ConfigException
*/
private static function validateXmlConfig(string $base_dir, string $file_contents): void
{
$schema_path = dirname(__DIR__, 2). '/config.xsd';
if (!file_exists($schema_path)) {
throw new ConfigException('Cannot locate config schema');
}
$dom_document = self::loadDomDocument($base_dir, $file_contents);
$psalm_nodes = $dom_document->getElementsByTagName('psalm');
/** @var \DomElement|null */
$psalm_node = $psalm_nodes->item(0);
if (!$psalm_node) {
throw new ConfigException(
'Missing psalm node'
);
}
if (!$psalm_node->hasAttribute('xmlns')) {
$psalm_node->setAttribute('xmlns', 'https://getpsalm.org/schema/config');
$old_dom_document = $dom_document;
$dom_document = self::loadDomDocument($base_dir, $old_dom_document->saveXML());
}
// Enable user error handling
libxml_use_internal_errors(true);
if (!$dom_document->schemaValidate($schema_path)) {
$errors = libxml_get_errors();
foreach ($errors as $error) {
if ($error->level === LIBXML_ERR_FATAL || $error->level === LIBXML_ERR_ERROR) {
throw new ConfigException(
'Error on line ' . $error->line . ":\n" . ' ' . $error->message
);
}
}
libxml_clear_errors();
}
}
/**
* @param positive-int $line_number 1-based line number
* @return int 0-based byte offset
* @throws OutOfBoundsException
*/
private static function lineNumberToByteOffset(string $string, int $line_number): int
{
if ($line_number === 1) {
return 0;
}
$offset = 0;
for ($i = 0; $i < $line_number - 1; $i++) {
$newline_offset = strpos($string, "\n", $offset);
if (false === $newline_offset) {
throw new OutOfBoundsException(
'Line ' . $line_number . ' is not found in a string with ' . ($i + 1) . ' lines'
);
}
$offset = $newline_offset + 1;
}
if ($offset > strlen($string)) {
throw new OutOfBoundsException('Line ' . $line_number . ' is not found');
}
return $offset;
}
private static function processConfigDeprecations(
self $config,
DOMDocument $dom_document,
string $file_contents,
string $config_path
): void {
// Attributes to be removed in Psalm 5
$deprecated_attributes = [
'allowCoercionFromStringToClassConst'
];
$config->config_issues = [];
$attributes = $dom_document->getElementsByTagName('psalm')->item(0)->attributes;
foreach ($attributes as $attribute) {
if (in_array($attribute->name, $deprecated_attributes, true)) {
$line = $attribute->getLineNo();
assert($line > 0); // getLineNo() always returns non-zero for nodes loaded from file
$offset = self::lineNumberToByteOffset($file_contents, $line);
$attribute_start = strrpos($file_contents, $attribute->name, $offset - strlen($file_contents)) ?: 0;
$attribute_end = $attribute_start + strlen($attribute->name) - 1;
$config->config_issues[] = new ConfigIssue(
'Attribute "' . $attribute->name . '" is deprecated '
. 'and is going to be removed in the next major version',
new CodeLocation\Raw(
$file_contents,
$config_path,
basename($config_path),
$attribute_start,
$attribute_end
)
);
}
}
}
/**
* @psalm-suppress MixedMethodCall
* @psalm-suppress MixedAssignment
* @psalm-suppress MixedOperand
* @psalm-suppress MixedArgument
* @psalm-suppress MixedPropertyFetch
*
* @throws ConfigException
*/
private static function fromXmlAndPaths(
string $base_dir,
string $file_contents,
string $current_dir,
?string $config_path
): self {
$config = new static();
$dom_document = self::loadDomDocument($base_dir, $file_contents);
if (null !== $config_path) {
self::processConfigDeprecations(
$config,
$dom_document,
$file_contents,
$config_path
);
}
$config_xml = simplexml_import_dom($dom_document);
$booleanAttributes = [
'useDocblockTypes' => 'use_docblock_types',
'useDocblockPropertyTypes' => 'use_docblock_property_types',
'throwExceptionOnError' => 'throw_exception',
'hideExternalErrors' => 'hide_external_errors',
'resolveFromConfigFile' => 'resolve_from_config_file',
'allowFileIncludes' => 'allow_includes',
'strictBinaryOperands' => 'strict_binary_operands',
'rememberPropertyAssignmentsAfterCall' => 'remember_property_assignments_after_call',
'allowPhpStormGenerics' => 'allow_phpstorm_generics',
'allowStringToStandInForClass' => 'allow_string_standin_for_class',
'usePhpDocMethodsWithoutMagicCall' => 'use_phpdoc_method_without_magic_or_parent',
'usePhpDocPropertiesWithoutMagicCall' => 'use_phpdoc_property_without_magic_or_parent',
'memoizeMethodCallResults' => 'memoize_method_calls',
'hoistConstants' => 'hoist_constants',
'addParamDefaultToDocblockType' => 'add_param_default_to_docblock_type',
'checkForThrowsDocblock' => 'check_for_throws_docblock',
'checkForThrowsInGlobalScope' => 'check_for_throws_in_global_scope',
'forbidEcho' => 'forbid_echo',
'ignoreInternalFunctionFalseReturn' => 'ignore_internal_falsable_issues',
'ignoreInternalFunctionNullReturn' => 'ignore_internal_nullable_issues',
'includePhpVersionsInErrorBaseline' => 'include_php_versions_in_error_baseline',
'loadXdebugStub' => 'load_xdebug_stub',
'ensureArrayStringOffsetsExist' => 'ensure_array_string_offsets_exist',
'ensureArrayIntOffsetsExist' => 'ensure_array_int_offsets_exist',
'reportMixedIssues' => 'show_mixed_issues',
'skipChecksOnUnresolvableIncludes' => 'skip_checks_on_unresolvable_includes',
'sealAllMethods' => 'seal_all_methods',
'runTaintAnalysis' => 'run_taint_analysis',
'usePhpStormMetaPath' => 'use_phpstorm_meta_path',
'allowInternalNamedArgumentsCalls' => 'allow_internal_named_arg_calls',
'allowNamedArgumentCalls' => 'allow_named_arg_calls',
'findUnusedPsalmSuppress' => 'find_unused_psalm_suppress',
'reportInfo' => 'report_info',
'restrictReturnTypes' => 'restrict_return_types',
'limitMethodComplexity' => 'limit_method_complexity',
'triggerErrorExits' => 'trigger_error_exits',
];
foreach ($booleanAttributes as $xmlName => $internalName) {
if (isset($config_xml[$xmlName])) {
$attribute_text = (string) $config_xml[$xmlName];
$config->setBooleanAttribute(
$internalName,
$attribute_text === 'true' || $attribute_text === '1'
);
}
}
if ($config->resolve_from_config_file) {
$config->base_dir = $base_dir;
} else {
$config->base_dir = $current_dir;
$base_dir = $current_dir;
}
if (isset($config_xml['phpVersion'])) {
$config->configured_php_version = (string) $config_xml['phpVersion'];
}
if (isset($config_xml['autoloader'])) {
$autoloader_path = $config->base_dir . DIRECTORY_SEPARATOR . $config_xml['autoloader'];
if (!file_exists($autoloader_path)) {
throw new ConfigException('Cannot locate autoloader');
}
$config->autoloader = realpath($autoloader_path);
}
if (isset($config_xml['cacheDirectory'])) {
$config->cache_directory = (string)$config_xml['cacheDirectory'];
} elseif ($user_cache_dir = (new Xdg())->getHomeCacheDir()) {
$config->cache_directory = $user_cache_dir . '/psalm';
} else {
$config->cache_directory = sys_get_temp_dir() . '/psalm';
}
$config->global_cache_directory = $config->cache_directory;
$config->cache_directory .= DIRECTORY_SEPARATOR . sha1($base_dir);
$cwd = null;
if ($config->resolve_from_config_file) {
$cwd = getcwd();
chdir($config->base_dir);
}
if (is_dir($config->cache_directory) === false && @mkdir($config->cache_directory, 0777, true) === false) {
trigger_error('Could not create cache directory: ' . $config->cache_directory, E_USER_ERROR);
}
if ($cwd) {
chdir($cwd);
}
if (isset($config_xml['serializer'])) {
$attribute_text = (string) $config_xml['serializer'];
$config->use_igbinary = $attribute_text === 'igbinary';
} elseif ($igbinary_version = phpversion('igbinary')) {
$config->use_igbinary = version_compare($igbinary_version, '2.0.5') >= 0;
}
if (isset($config_xml['findUnusedCode'])) {
$attribute_text = (string) $config_xml['findUnusedCode'];
$config->find_unused_code = $attribute_text === 'true' || $attribute_text === '1';
$config->find_unused_variables = $config->find_unused_code;
}
if (isset($config_xml['findUnusedVariablesAndParams'])) {
$attribute_text = (string) $config_xml['findUnusedVariablesAndParams'];
$config->find_unused_variables = $attribute_text === 'true' || $attribute_text === '1';
}
if (isset($config_xml['errorLevel'])) {
$attribute_text = (int) $config_xml['errorLevel'];
if (!in_array($attribute_text, [1, 2, 3, 4, 5, 6, 7, 8], true)) {
throw new Exception\ConfigException(
'Invalid error level ' . $config_xml['errorLevel']
);
}
$config->level = $attribute_text;
} elseif (isset($config_xml['totallyTyped'])) {
$totally_typed = (string) $config_xml['totallyTyped'];
if ($totally_typed === 'true' || $totally_typed === '1') {
$config->level = 1;
} else {
$config->level = 2;
if ($config->show_mixed_issues === null) {
$config->show_mixed_issues = false;
}
}
} else {
$config->level = 2;
}
// turn on unused variable detection in level 1
if (!isset($config_xml['findUnusedCode'])
&& !isset($config_xml['findUnusedVariablesAndParams'])
&& $config->level === 1
&& $config->show_mixed_issues !== false
) {
$config->find_unused_variables = true;
}
if (isset($config_xml['errorBaseline'])) {
$attribute_text = (string) $config_xml['errorBaseline'];
$config->error_baseline = $attribute_text;
}
if (isset($config_xml['maxStringLength'])) {
$attribute_text = intval($config_xml['maxStringLength']);
$config->max_string_length = $attribute_text;
}
if (isset($config_xml['inferPropertyTypesFromConstructor'])) {
$attribute_text = (string) $config_xml['inferPropertyTypesFromConstructor'];
$config->infer_property_types_from_constructor = $attribute_text === 'true' || $attribute_text === '1';
}
if (isset($config_xml->projectFiles)) {
$config->project_files = ProjectFileFilter::loadFromXMLElement($config_xml->projectFiles, $base_dir, true);
}
if (isset($config_xml->extraFiles)) {
$config->extra_files = ProjectFileFilter::loadFromXMLElement($config_xml->extraFiles, $base_dir, true);
}
if (isset($config_xml->taintAnalysis->ignoreFiles)) {
$config->taint_analysis_ignored_files = TaintAnalysisFileFilter::loadFromXMLElement(
$config_xml->taintAnalysis->ignoreFiles,
$base_dir,
false
);
}
if (isset($config_xml->fileExtensions)) {
$config->file_extensions = [];
$config->loadFileExtensions($config_xml->fileExtensions->extension);
}
if (isset($config_xml->mockClasses) && isset($config_xml->mockClasses->class)) {
/** @var \SimpleXMLElement $mock_class */
foreach ($config_xml->mockClasses->class as $mock_class) {
$config->mock_classes[] = strtolower((string)$mock_class['name']);
}
}
if (isset($config_xml->universalObjectCrates) && isset($config_xml->universalObjectCrates->class)) {
/** @var \SimpleXMLElement $universal_object_crate */
foreach ($config_xml->universalObjectCrates->class as $universal_object_crate) {
/** @var string $classString */
$classString = $universal_object_crate['name'];
$config->addUniversalObjectCrate($classString);
}
}
if (isset($config_xml->ignoreExceptions)) {
if (isset($config_xml->ignoreExceptions->class)) {
/** @var \SimpleXMLElement $exception_class */
foreach ($config_xml->ignoreExceptions->class as $exception_class) {
$exception_name = (string) $exception_class['name'];
$global_attribute_text = (string) $exception_class['onlyGlobalScope'];
if ($global_attribute_text !== 'true' && $global_attribute_text !== '1') {
$config->ignored_exceptions[$exception_name] = true;
}
$config->ignored_exceptions_in_global_scope[$exception_name] = true;
}
}
if (isset($config_xml->ignoreExceptions->classAndDescendants)) {
/** @var \SimpleXMLElement $exception_class */
foreach ($config_xml->ignoreExceptions->classAndDescendants as $exception_class) {
$exception_name = (string) $exception_class['name'];
$global_attribute_text = (string) $exception_class['onlyGlobalScope'];
if ($global_attribute_text !== 'true' && $global_attribute_text !== '1') {
$config->ignored_exceptions_and_descendants[$exception_name] = true;
}
$config->ignored_exceptions_and_descendants_in_global_scope[$exception_name] = true;
}
}
}
if (isset($config_xml->forbiddenFunctions) && isset($config_xml->forbiddenFunctions->function)) {
/** @var \SimpleXMLElement $forbidden_function */
foreach ($config_xml->forbiddenFunctions->function as $forbidden_function) {
$config->forbidden_functions[strtolower((string) $forbidden_function['name'])] = true;
}
}
if (isset($config_xml->exitFunctions) && isset($config_xml->exitFunctions->function)) {
/** @var \SimpleXMLElement $exit_function */
foreach ($config_xml->exitFunctions->function as $exit_function) {
$config->exit_functions[strtolower((string) $exit_function['name'])] = true;
}
}
if (isset($config_xml->stubs) && isset($config_xml->stubs->file)) {
/** @var \SimpleXMLElement $stub_file */
foreach ($config_xml->stubs->file as $stub_file) {
$stub_file_name = (string)$stub_file['name'];
if (!Path::isAbsolute($stub_file_name)) {
$stub_file_name = $config->base_dir . DIRECTORY_SEPARATOR . $stub_file_name;
}
$file_path = realpath($stub_file_name);
if (!$file_path) {
throw new Exception\ConfigException(
'Cannot resolve stubfile path '
. rtrim($config->base_dir, DIRECTORY_SEPARATOR)
. DIRECTORY_SEPARATOR
. $stub_file['name']
);
}
if (isset($stub_file['preloadClasses'])) {
$preload_classes = (string)$stub_file['preloadClasses'];
if ($preload_classes === 'true' || $preload_classes === '1') {
$config->addPreloadedStubFile($file_path);
} else {
$config->addStubFile($file_path);
}
} else {
$config->addStubFile($file_path);
}
}
}
// this plugin loading system borrows heavily from etsy/phan
if (isset($config_xml->plugins)) {
if (isset($config_xml->plugins->plugin)) {
/** @var \SimpleXMLElement $plugin */
foreach ($config_xml->plugins->plugin as $plugin) {
$plugin_file_name = (string) $plugin['filename'];
$path = Path::isAbsolute($plugin_file_name)
? $plugin_file_name
: $config->base_dir . $plugin_file_name;
$config->addPluginPath($path);
}
}
if (isset($config_xml->plugins->pluginClass)) {
/** @var \SimpleXMLElement $plugin */
foreach ($config_xml->plugins->pluginClass as $plugin) {
$plugin_class_name = $plugin['class'];
// any child elements are used as plugin configuration
$plugin_config = null;
if ($plugin->count()) {
$plugin_config = $plugin->children();
}
$config->addPluginClass((string) $plugin_class_name, $plugin_config);
}
}
}
if (isset($config_xml->issueHandlers)) {
/** @var \SimpleXMLElement $issue_handler */
foreach ($config_xml->issueHandlers->children() as $key => $issue_handler) {
if ($key === 'PluginIssue') {
$custom_class_name = (string) $issue_handler['name'];
/** @var string $key */
$config->issue_handlers[$custom_class_name] = IssueHandler::loadFromXMLElement(
$issue_handler,
$base_dir
);
} else {
/** @var string $key */
$config->issue_handlers[$key] = IssueHandler::loadFromXMLElement(
$issue_handler,
$base_dir
);
}
}
}
if (isset($config_xml->globals) && isset($config_xml->globals->var)) {
/** @var \SimpleXMLElement $var */
foreach ($config_xml->globals->var as $var) {
$config->globals['$' . (string) $var['name']] = (string) $var['type'];
}
}
return $config;
}
public static function getInstance(): Config
{
if (self::$instance) {
return self::$instance;
}
throw new \UnexpectedValueException('No config initialized');
}
public function setComposerClassLoader(?ClassLoader $loader = null): void
{
$this->composer_class_loader = $loader;
}
public function setCustomErrorLevel(string $issue_key, string $error_level): void
{
$this->issue_handlers[$issue_key] = new IssueHandler();
$this->issue_handlers[$issue_key]->setErrorLevel($error_level);
}
/**
* @throws ConfigException if a Config file could not be found
*
*/
private function loadFileExtensions(SimpleXMLElement $extensions): void
{
foreach ($extensions as $extension) {
$extension_name = preg_replace('/^\.?/', '', (string)$extension['name']);
$this->file_extensions[] = $extension_name;
if (isset($extension['scanner'])) {
$path = $this->base_dir . (string)$extension['scanner'];
if (!file_exists($path)) {
throw new Exception\ConfigException('Error parsing config: cannot find file ' . $path);
}
$this->filetype_scanner_paths[$extension_name] = $path;
}
if (isset($extension['checker'])) {
$path = $this->base_dir . (string)$extension['checker'];
if (!file_exists($path)) {
throw new Exception\ConfigException('Error parsing config: cannot find file ' . $path);
}
$this->filetype_analyzer_paths[$extension_name] = $path;
}
}
}
public function addPluginPath(string $path): void
{
if (!file_exists($path)) {
throw new \InvalidArgumentException('Cannot find plugin file ' . $path);
}
$this->plugin_paths[] = $path;
}
public function addPluginClass(string $class_name, ?SimpleXMLElement $plugin_config = null): void
{
$this->plugin_classes[] = ['class' => $class_name, 'config' => $plugin_config];
}
/** @return array<array{class:string, config:?SimpleXMLElement}> */
public function getPluginClasses(): array
{
return $this->plugin_classes;
}
/**
* Initialises all the plugins (done once the config is fully loaded)
*
* @psalm-suppress MixedAssignment
*/
public function initializePlugins(ProjectAnalyzer $project_analyzer): void
{
$codebase = $project_analyzer->getCodebase();
$project_analyzer->progress->debug('Initializing plugins...' . PHP_EOL);
$socket = new PluginRegistrationSocket($this, $codebase);
// initialize plugin classes earlier to let them hook into subsequent load process
foreach ($this->plugin_classes as $plugin_class_entry) {
$plugin_class_name = $plugin_class_entry['class'];
$plugin_config = $plugin_class_entry['config'];
try {
// Below will attempt to load plugins from the project directory first.
// Failing that, it will use registered autoload chain, which will load
// plugins from Psalm directory or phar file. If that fails as well, it
// will fall back to project autoloader. It may seem that the last step
// will always fail, but it's only true if project uses Composer autoloader
if ($this->composer_class_loader
&& ($plugin_class_path = $this->composer_class_loader->findFile($plugin_class_name))
) {
$project_analyzer->progress->debug(
'Loading plugin ' . $plugin_class_name . ' via require' . PHP_EOL
);
self::requirePath($plugin_class_path);
} else {
if (!class_exists($plugin_class_name, true)) {
throw new \UnexpectedValueException($plugin_class_name . ' is not a known class');
}
}
/**
* @psalm-suppress InvalidStringClass
*
* @var Plugin\PluginEntryPointInterface
*/
$plugin_object = new $plugin_class_name;
$plugin_object($socket, $plugin_config);
} catch (\Throwable $e) {
throw new ConfigException('Failed to load plugin ' . $plugin_class_name, 0, $e);
}
$project_analyzer->progress->debug('Loaded plugin ' . $plugin_class_name . ' successfully' . PHP_EOL);
}
foreach ($this->filetype_scanner_paths as $extension => $path) {
$fq_class_name = $this->getPluginClassForPath(
$codebase,
$path,
FileScanner::class
);
self::requirePath($path);
$this->filetype_scanners[$extension] = $fq_class_name;
}
foreach ($this->filetype_analyzer_paths as $extension => $path) {
$fq_class_name = $this->getPluginClassForPath(
$codebase,
$path,
FileAnalyzer::class
);
self::requirePath($path);
$this->filetype_analyzers[$extension] = $fq_class_name;
}
foreach ($this->plugin_paths as $path) {
try {
$plugin_object = new FileBasedPluginAdapter($path, $this, $codebase);
$plugin_object($socket);
} catch (\Throwable $e) {
throw new ConfigException('Failed to load plugin ' . $path, 0, $e);
}
}
// populate additional aspects after plugins have been initialized
foreach ($socket->getAdditionalFileExtensions() as $fileExtension) {
$this->file_extensions[] = $fileExtension;
}
foreach ($socket->getAdditionalFileTypeScanners() as $extension => $className) {
$this->filetype_scanners[$extension] = $className;
}
foreach ($socket->getAdditionalFileTypeAnalyzers() as $extension => $className) {
$this->filetype_analyzers[$extension] = $className;
}
new \Psalm\Internal\Provider\AddRemoveTaints\HtmlFunctionTainter();
$socket->registerHooksFromClass(\Psalm\Internal\Provider\AddRemoveTaints\HtmlFunctionTainter::class);
}
private static function requirePath(string $path): void
{
/** @psalm-suppress UnresolvableInclude */
require_once($path);
}
/**
* @template T
*
* @param T::class $must_extend
*
* @return class-string<T>
*/
private function getPluginClassForPath(Codebase $codebase, string $path, string $must_extend): string
{
$file_storage = $codebase->createFileStorageForPath($path);
$file_to_scan = new FileScanner($path, $this->shortenFileName($path), true);
$file_to_scan->scan(
$codebase,
$file_storage
);
$declared_classes = ClassLikeAnalyzer::getClassesForFile($codebase, $path);
if (!count($declared_classes)) {
throw new \InvalidArgumentException(
'Plugins must have at least one class in the file - ' . $path . ' has ' .
count($declared_classes)
);
}
$fq_class_name = reset($declared_classes);
if (!$codebase->classlikes->classExtends(
$fq_class_name,
$must_extend
)
) {
throw new \InvalidArgumentException(
'This plugin must extend ' . $must_extend . ' - ' . $path . ' does not'
);
}
/**
* @var class-string<T>
*/
return $fq_class_name;
}
public function shortenFileName(string $to): string
{
if (!is_file($to)) {
return preg_replace('/^' . preg_quote($this->base_dir, '/') . '/', '', $to);
}
$from = $this->base_dir;
// some compatibility fixes for Windows paths
$from = is_dir($from) ? rtrim($from, '\/') . '/' : $from;
$to = is_dir($to) ? rtrim($to, '\/') . '/' : $to;
$from = str_replace('\\', '/', $from);
$to = str_replace('\\', '/', $to);
$from = explode('/', $from);
$to = explode('/', $to);
$relPath = $to;
foreach ($from as $depth => $dir) {
// find first non-matching dir
if ($dir === $to[$depth]) {
// ignore this directory
array_shift($relPath);
} else {
// get number of remaining dirs to $from
$remaining = count($from) - $depth;
if ($remaining > 1) {
// add traversals up to first matching dir
$padLength = (count($relPath) + $remaining - 1) * -1;
$relPath = array_pad($relPath, $padLength, '..');
break;
}
}
}
return implode('/', $relPath);
}
public function reportIssueInFile(string $issue_type, string $file_path): bool
{
if (($this->show_mixed_issues === false || $this->level > 2)
&& in_array($issue_type, self::MIXED_ISSUES, true)
) {
return false;
}
if ($this->mustBeIgnored($file_path)) {
return false;
}
$dependent_files = [strtolower($file_path) => $file_path];
$project_analyzer = ProjectAnalyzer::getInstance();
$codebase = $project_analyzer->getCodebase();
if (!$this->hide_external_errors) {
try {
$file_storage = $codebase->file_storage_provider->get($file_path);
$dependent_files += $file_storage->required_by_file_paths;
} catch (\InvalidArgumentException $e) {
// do nothing
}
}
$any_file_path_matched = false;
foreach ($dependent_files as $dependent_file_path) {
if (((!$project_analyzer->full_run && $codebase->analyzer->canReportIssues($dependent_file_path))
|| $project_analyzer->canReportIssues($dependent_file_path))
&& ($file_path === $dependent_file_path || !$this->mustBeIgnored($dependent_file_path))
) {
$any_file_path_matched = true;
break;
}
}
if (!$any_file_path_matched) {
return false;
}
if ($this->getReportingLevelForFile($issue_type, $file_path) === self::REPORT_SUPPRESS) {
return false;
}
return true;
}
public function isInProjectDirs(string $file_path): bool
{
return $this->project_files && $this->project_files->allows($file_path);
}
public function isInExtraDirs(string $file_path): bool
{
return $this->extra_files && $this->extra_files->allows($file_path);
}
public function mustBeIgnored(string $file_path): bool
{
return $this->project_files && $this->project_files->forbids($file_path);
}
public function trackTaintsInPath(string $file_path): bool
{
return !$this->taint_analysis_ignored_files
|| $this->taint_analysis_ignored_files->allows($file_path);
}
public function getReportingLevelForIssue(CodeIssue $e): string
{
$fqcn_parts = explode('\\', get_class($e));
$issue_type = array_pop($fqcn_parts);
$reporting_level = null;
if ($e instanceof ClassIssue) {
$reporting_level = $this->getReportingLevelForClass($issue_type, $e->fq_classlike_name);
} elseif ($e instanceof MethodIssue) {
$reporting_level = $this->getReportingLevelForMethod($issue_type, $e->method_id);
} elseif ($e instanceof FunctionIssue) {
$reporting_level = $this->getReportingLevelForFunction($issue_type, $e->function_id);
} elseif ($e instanceof PropertyIssue) {
$reporting_level = $this->getReportingLevelForProperty($issue_type, $e->property_id);
} elseif ($e instanceof ArgumentIssue && $e->function_id) {
$reporting_level = $this->getReportingLevelForArgument($issue_type, $e->function_id);
} elseif ($e instanceof VariableIssue) {
$reporting_level = $this->getReportingLevelForVariable($issue_type, $e->var_name);
}
if ($reporting_level === null) {
$reporting_level = $this->getReportingLevelForFile($issue_type, $e->getFilePath());
}
if (!$this->report_info && $reporting_level === self::REPORT_INFO) {
$reporting_level = self::REPORT_SUPPRESS;
}
$parent_issue_type = self::getParentIssueType($issue_type);
if ($parent_issue_type && $reporting_level === Config::REPORT_ERROR) {
$parent_reporting_level = $this->getReportingLevelForFile($parent_issue_type, $e->getFilePath());
if ($parent_reporting_level !== $reporting_level) {
return $parent_reporting_level;
}
}
return $reporting_level;
}
/**
* @psalm-pure
*/
public static function getParentIssueType(string $issue_type): ?string
{
if ($issue_type === 'PossiblyUndefinedIntArrayOffset'
|| $issue_type === 'PossiblyUndefinedStringArrayOffset'
) {
return 'PossiblyUndefinedArrayOffset';
}
if ($issue_type === 'PossiblyNullReference') {
return 'NullReference';
}
if ($issue_type === 'PossiblyFalseReference') {
return null;
}
if ($issue_type === 'PossiblyUndefinedArrayOffset') {
return null;
}
if (strpos($issue_type, 'Possibly') === 0) {
$stripped_issue_type = preg_replace('/^Possibly(False|Null)?/', '', $issue_type);
if (strpos($stripped_issue_type, 'Invalid') === false && strpos($stripped_issue_type, 'Un') !== 0) {
$stripped_issue_type = 'Invalid' . $stripped_issue_type;
}
return $stripped_issue_type;
}
if (strpos($issue_type, 'Tainted') === 0) {
return 'TaintedInput';
}
if (preg_match('/^(False|Null)[A-Z]/', $issue_type) && !strpos($issue_type, 'Reference')) {
return preg_replace('/^(False|Null)/', 'Invalid', $issue_type);
}
if ($issue_type === 'UndefinedInterfaceMethod') {
return 'UndefinedMethod';
}
if ($issue_type === 'UndefinedMagicPropertyFetch') {
return 'UndefinedPropertyFetch';
}
if ($issue_type === 'UndefinedMagicPropertyAssignment') {
return 'UndefinedPropertyAssignment';
}
if ($issue_type === 'UndefinedMagicMethod') {
return 'UndefinedMethod';
}
if ($issue_type === 'PossibleRawObjectIteration') {
return 'RawObjectIteration';
}
if ($issue_type === 'UninitializedProperty') {
return 'PropertyNotSetInConstructor';
}
if ($issue_type === 'InvalidDocblockParamName') {
return 'InvalidDocblock';
}
if ($issue_type === 'UnusedClosureParam') {
return 'UnusedParam';
}
if ($issue_type === 'UnusedConstructor') {
return 'UnusedMethod';
}
if ($issue_type === 'StringIncrement') {
return 'InvalidOperand';
}
if ($issue_type === 'InvalidLiteralArgument') {
return 'InvalidArgument';
}
if ($issue_type === 'RedundantConditionGivenDocblockType') {
return 'RedundantCondition';
}
if ($issue_type === 'RedundantCastGivenDocblockType') {
return 'RedundantCast';
}
if ($issue_type === 'TraitMethodSignatureMismatch') {
return 'MethodSignatureMismatch';
}
if ($issue_type === 'ImplementedParamTypeMismatch') {
return 'MoreSpecificImplementedParamType';
}
if ($issue_type === 'UndefinedDocblockClass') {
return 'UndefinedClass';
}
if ($issue_type === 'UnusedForeachValue') {
return 'UnusedVariable';
}
return null;
}
public function getReportingLevelForFile(string $issue_type, string $file_path): string
{
if (isset($this->issue_handlers[$issue_type])) {
return $this->issue_handlers[$issue_type]->getReportingLevelForFile($file_path);
}
// this string is replaced by scoper for Phars, so be careful
$issue_class = 'Psalm\\Issue\\' . $issue_type;
if (!class_exists($issue_class) || !is_a($issue_class, \Psalm\Issue\CodeIssue::class, true)) {
return self::REPORT_ERROR;
}
/** @var int */
$issue_level = $issue_class::ERROR_LEVEL;
if ($issue_level > 0 && $issue_level < $this->level) {
return self::REPORT_INFO;
}
return self::REPORT_ERROR;
}
public function getReportingLevelForClass(string $issue_type, string $fq_classlike_name): ?string
{
if (isset($this->issue_handlers[$issue_type])) {
return $this->issue_handlers[$issue_type]->getReportingLevelForClass($fq_classlike_name);
}
return null;
}
public function getReportingLevelForMethod(string $issue_type, string $method_id): ?string
{
if (isset($this->issue_handlers[$issue_type])) {
return $this->issue_handlers[$issue_type]->getReportingLevelForMethod($method_id);
}
return null;
}
public function getReportingLevelForFunction(string $issue_type, string $function_id): ?string
{
if (isset($this->issue_handlers[$issue_type])) {
return $this->issue_handlers[$issue_type]->getReportingLevelForFunction($function_id);
}
return null;
}
public function getReportingLevelForArgument(string $issue_type, string $function_id): ?string
{
if (isset($this->issue_handlers[$issue_type])) {
return $this->issue_handlers[$issue_type]->getReportingLevelForArgument($function_id);
}
return null;
}
public function getReportingLevelForProperty(string $issue_type, string $property_id): ?string
{
if (isset($this->issue_handlers[$issue_type])) {
return $this->issue_handlers[$issue_type]->getReportingLevelForProperty($property_id);
}
return null;
}
public function getReportingLevelForVariable(string $issue_type, string $var_name): ?string
{
if (isset($this->issue_handlers[$issue_type])) {
return $this->issue_handlers[$issue_type]->getReportingLevelForVariable($var_name);
}
return null;
}
/**
* @return array<string>
*/
public function getProjectDirectories(): array
{
if (!$this->project_files) {
return [];
}
return $this->project_files->getDirectories();
}
/**
* @return array<string>
*/
public function getProjectFiles(): array
{
if (!$this->project_files) {
return [];
}
return $this->project_files->getFiles();
}
/**
* @return array<string>
*/
public function getExtraDirectories(): array
{
if (!$this->extra_files) {
return [];
}
return $this->extra_files->getDirectories();
}
public function reportTypeStatsForFile(string $file_path): bool
{
return $this->project_files
&& $this->project_files->allows($file_path)
&& $this->project_files->reportTypeStats($file_path);
}
public function useStrictTypesForFile(string $file_path): bool
{
return $this->project_files && $this->project_files->useStrictTypes($file_path);
}
/**
* @return array<int, string>
*/
public function getFileExtensions(): array
{
return $this->file_extensions;
}
/**
* @return array<string, class-string<FileScanner>>
*/
public function getFiletypeScanners(): array
{
return $this->filetype_scanners;
}
/**
* @return array<string, class-string<FileAnalyzer>>
*/
public function getFiletypeAnalyzers(): array
{
return $this->filetype_analyzers;
}
/**
* @return array<int, string>
*/
public function getMockClasses(): array
{
return $this->mock_classes;
}
public function visitPreloadedStubFiles(Codebase $codebase, ?Progress $progress = null): void
{
if ($progress === null) {
$progress = new VoidProgress();
}
$core_generic_files = [];
if (\PHP_VERSION_ID < 80000 && $codebase->php_major_version >= 8) {
$stringable_path = dirname(__DIR__, 2) . '/stubs/Php80.phpstub';
if (!file_exists($stringable_path)) {
throw new \UnexpectedValueException('Cannot locate PHP 8.0 classes');
}
$core_generic_files[] = $stringable_path;
}
if (\PHP_VERSION_ID < 80100 && $codebase->php_major_version >= 8 && $codebase->php_minor_version >= 1) {
$stringable_path = dirname(__DIR__, 2) . '/stubs/Php81.phpstub';
if (!file_exists($stringable_path)) {
throw new \UnexpectedValueException('Cannot locate PHP 8.1 classes');
}
$core_generic_files[] = $stringable_path;
}
$stub_files = array_merge($core_generic_files, $this->preloaded_stub_files);
if (!$stub_files) {
return;
}
foreach ($stub_files as $file_path) {
$file_path = \str_replace(['/', '\\'], DIRECTORY_SEPARATOR, $file_path);
$codebase->scanner->addFileToDeepScan($file_path);
}
$progress->debug('Registering preloaded stub files' . "\n");
$codebase->register_stub_files = true;
$codebase->scanFiles();
$codebase->register_stub_files = false;
$progress->debug('Finished registering preloaded stub files' . "\n");
}
public function visitStubFiles(Codebase $codebase, ?Progress $progress = null): void
{
if ($progress === null) {
$progress = new VoidProgress();
}
$codebase->register_stub_files = true;
$core_generic_files = [
dirname(__DIR__, 2) . '/stubs/CoreGenericFunctions.phpstub',
dirname(__DIR__, 2) . '/stubs/CoreGenericClasses.phpstub',
dirname(__DIR__, 2) . '/stubs/CoreGenericIterators.phpstub',
dirname(__DIR__, 2) . '/stubs/CoreImmutableClasses.phpstub',
dirname(__DIR__, 2) . '/stubs/DOM.phpstub',
dirname(__DIR__, 2) . '/stubs/Reflection.phpstub',
dirname(__DIR__, 2) . '/stubs/SPL.phpstub',
];
foreach ($core_generic_files as $stub_path) {
if (!file_exists($stub_path)) {
throw new \UnexpectedValueException('Cannot locate ' . $stub_path);
}
}
if (\PHP_VERSION_ID >= 80000 && $codebase->php_major_version >= 8) {
$stringable_path = dirname(__DIR__, 2) . '/stubs/Php80.phpstub';
if (!file_exists($stringable_path)) {
throw new \UnexpectedValueException('Cannot locate PHP 8.0 classes');
}
$core_generic_files[] = $stringable_path;
}
if (\PHP_VERSION_ID >= 80100 && $codebase->php_major_version >= 8 && $codebase->php_minor_version >= 1) {
$stringable_path = dirname(__DIR__, 2) . '/stubs/Php81.phpstub';
if (!file_exists($stringable_path)) {
throw new \UnexpectedValueException('Cannot locate PHP 8.1 classes');
}
$core_generic_files[] = $stringable_path;
}
if (\extension_loaded('PDO')) {
$ext_pdo_path = dirname(__DIR__, 2) . '/stubs/pdo.phpstub';
if (!file_exists($ext_pdo_path)) {
throw new \UnexpectedValueException('Cannot locate pdo classes');
}
$core_generic_files[] = $ext_pdo_path;
}
if (\extension_loaded('soap')) {
$ext_soap_path = dirname(__DIR__, 2) . '/stubs/soap.phpstub';
if (!file_exists($ext_soap_path)) {
throw new \UnexpectedValueException('Cannot locate soap classes');
}
$core_generic_files[] = $ext_soap_path;
}
if (\extension_loaded('ds')) {
$ext_ds_path = dirname(__DIR__, 2) . '/stubs/ext-ds.phpstub';
if (!file_exists($ext_ds_path)) {
throw new \UnexpectedValueException('Cannot locate ext-ds classes');
}
$core_generic_files[] = $ext_ds_path;
}
if (\extension_loaded('mongodb')) {
$ext_mongodb_path = dirname(__DIR__, 2) . '/stubs/mongodb.phpstub';
if (!file_exists($ext_mongodb_path)) {
throw new \UnexpectedValueException('Cannot locate mongodb classes');
}
$core_generic_files[] = $ext_mongodb_path;
}
$stub_files = array_merge($core_generic_files, $this->stub_files);
if ($this->load_xdebug_stub) {
$xdebug_stub_path = dirname(__DIR__, 2) . '/stubs/Xdebug.phpstub';
if (!file_exists($xdebug_stub_path)) {
throw new \UnexpectedValueException('Cannot locate Xdebug stub');
}
$stub_files[] = $xdebug_stub_path;
}
$phpstorm_meta_path = $this->base_dir . DIRECTORY_SEPARATOR . '.phpstorm.meta.php';
if ($this->use_phpstorm_meta_path) {
if (is_file($phpstorm_meta_path)) {
$stub_files[] = $phpstorm_meta_path;
} elseif (is_dir($phpstorm_meta_path)) {
$phpstorm_meta_path = realpath($phpstorm_meta_path);
foreach (glob($phpstorm_meta_path . '/*.meta.php', GLOB_NOSORT) as $glob) {
if (is_file($glob) && realpath(dirname($glob)) === $phpstorm_meta_path) {
$stub_files[] = $glob;
}
}
}
}
foreach ($stub_files as $file_path) {
$file_path = \str_replace(['/', '\\'], DIRECTORY_SEPARATOR, $file_path);
$codebase->scanner->addFileToDeepScan($file_path);
}
$progress->debug('Registering stub files' . "\n");
$codebase->scanFiles();
$progress->debug('Finished registering stub files' . "\n");
$codebase->register_stub_files = false;
}
public function getCacheDirectory(): ?string
{
return $this->cache_directory;
}
public function getGlobalCacheDirectory(): ?string
{
return $this->global_cache_directory;
}
/**
* @return array<string, mixed>
*/
public function getPredefinedConstants(): array
{
return $this->predefined_constants;
}
public function collectPredefinedConstants(): void
{
$this->predefined_constants = get_defined_constants();
}
/**
* @return array<callable-string, bool>
*/
public function getPredefinedFunctions(): array
{
return $this->predefined_functions;
}
public function collectPredefinedFunctions(): void
{
$defined_functions = get_defined_functions();
if (isset($defined_functions['user'])) {
foreach ($defined_functions['user'] as $function_name) {
$this->predefined_functions[$function_name] = true;
}
}
if (isset($defined_functions['internal'])) {
foreach ($defined_functions['internal'] as $function_name) {
$this->predefined_functions[$function_name] = true;
}
}
}
public function setIncludeCollector(IncludeCollector $include_collector): void
{
$this->include_collector = $include_collector;
}
/**
* @psalm-suppress MixedAssignment
* @psalm-suppress MixedArrayAccess
*/
public function visitComposerAutoloadFiles(ProjectAnalyzer $project_analyzer, ?Progress $progress = null): void
{
if ($progress === null) {
$progress = new VoidProgress();
}
if (!$this->include_collector) {
throw new LogicException("IncludeCollector should be set at this point");
}
$vendor_autoload_files_path
= $this->base_dir . DIRECTORY_SEPARATOR . 'vendor'
. DIRECTORY_SEPARATOR . 'composer' . DIRECTORY_SEPARATOR . 'autoload_files.php';
if (file_exists($vendor_autoload_files_path)) {
$this->include_collector->runAndCollect(
function () use ($vendor_autoload_files_path) {
/**
* @psalm-suppress UnresolvableInclude
* @var string[]
*/
return require $vendor_autoload_files_path;
}
);
}
$codebase = $project_analyzer->getCodebase();
$this->collectPredefinedFunctions();
if ($this->autoloader) {
// somee classes that we think are missing may not actually be missing
// as they might be autoloadable once we require the autoloader below
$codebase->classlikes->forgetMissingClassLikes();
$this->include_collector->runAndCollect(
function () {
// do this in a separate method so scope does not leak
/** @psalm-suppress UnresolvableInclude */
require $this->autoloader;
}
);
}
$this->collectPredefinedConstants();
$autoload_included_files = $this->include_collector->getFilteredIncludedFiles();
if ($autoload_included_files) {
$codebase->register_autoload_files = true;
$progress->debug('Registering autoloaded files' . "\n");
foreach ($autoload_included_files as $file_path) {
$file_path = \str_replace(['/', '\\'], DIRECTORY_SEPARATOR, $file_path);
$progress->debug(' ' . $file_path . "\n");
$codebase->scanner->addFileToDeepScan($file_path);
}
$codebase->scanner->scanFiles($codebase->classlikes);
$progress->debug('Finished registering autoloaded files' . "\n");
$codebase->register_autoload_files = false;
}
}
/**
* @return string|false
*/
public function getComposerFilePathForClassLike(string $fq_classlike_name)
{
if (!$this->composer_class_loader) {
return false;
}
return $this->composer_class_loader->findFile($fq_classlike_name);
}
public function getPotentialComposerFilePathForClassLike(string $class): ?string
{
if (!$this->composer_class_loader) {
return null;
}
/** @var array<string, array<int, string>> */
$psr4_prefixes = $this->composer_class_loader->getPrefixesPsr4();
// PSR-4 lookup
$logicalPathPsr4 = strtr($class, '\\', DIRECTORY_SEPARATOR) . '.php';
$candidate_path = null;
$maxDepth = 0;
$subPath = $class;
while (false !== $lastPos = strrpos($subPath, '\\')) {
$subPath = substr($subPath, 0, $lastPos);
$search = $subPath . '\\';
if (isset($psr4_prefixes[$search])) {
$depth = substr_count($search, '\\');
$pathEnd = DIRECTORY_SEPARATOR . substr($logicalPathPsr4, $lastPos + 1);
foreach ($psr4_prefixes[$search] as $dir) {
$dir = realpath($dir);
if ($dir
&& $depth > $maxDepth
&& $this->isInProjectDirs($dir . DIRECTORY_SEPARATOR . 'testdummy.php')
) {
$maxDepth = $depth;
$candidate_path = realpath($dir) . $pathEnd;
}
}
}
}
return $candidate_path;
}
public static function removeCacheDirectory(string $dir): void
{
if (is_dir($dir)) {
$objects = scandir($dir, SCANDIR_SORT_NONE);
if ($objects === false) {
throw new \UnexpectedValueException('Not expecting false here');
}
foreach ($objects as $object) {
if ($object !== '.' && $object !== '..') {
if (filetype($dir . '/' . $object) === 'dir') {
self::removeCacheDirectory($dir . '/' . $object);
} else {
unlink($dir . '/' . $object);
}
}
}
reset($objects);
rmdir($dir);
}
}
public function setServerMode(): void
{
$this->cache_directory .= '-s';
}
public function addStubFile(string $stub_file): void
{
$this->stub_files[$stub_file] = $stub_file;
}
public function hasStubFile(string $stub_file): bool
{
return isset($this->stub_files[$stub_file]);
}
/**
* @return array<string, string>
*/
public function getStubFiles(): array
{
return $this->stub_files;
}
public function addPreloadedStubFile(string $stub_file): void
{
$this->preloaded_stub_files[$stub_file] = $stub_file;
}
public function getPhpVersion(): ?string
{
if (isset($this->configured_php_version)) {
return $this->configured_php_version;
}
return $this->getPHPVersionFromComposerJson();
}
private function setBooleanAttribute(string $name, bool $value): void
{
$this->$name = $value;
}
/**
* @psalm-suppress MixedAssignment
* @psalm-suppress MixedArrayAccess
*/
private function getPHPVersionFromComposerJson(): ?string
{
$composer_json_path = Composer::getJsonFilePath($this->base_dir);
if (file_exists($composer_json_path)) {
if (!$composer_json = json_decode(file_get_contents($composer_json_path), true)) {
throw new \UnexpectedValueException('Invalid composer.json at ' . $composer_json_path);
}
$php_version = $composer_json['require']['php'] ?? null;
if (\is_string($php_version)) {
$version_parser = new VersionParser();
$constraint = $version_parser->parseConstraints($php_version);
foreach (['5.4', '5.5', '5.6', '7.0', '7.1', '7.2', '7.3', '7.4', '8.0'] as $candidate) {
if ($constraint->matches(new \Composer\Semver\Constraint\Constraint('<=', "$candidate.0.0-dev"))
|| $constraint->matches(new \Composer\Semver\Constraint\Constraint('<=', "$candidate.999"))
) {
return $candidate;
}
}
}
}
return null;
}
public function addUniversalObjectCrate(string $class): void
{
if (!class_exists($class, true)) {
throw new \UnexpectedValueException($class . ' is not a known class');
}
$this->universal_object_crates[] = $class;
}
/**
* @return array<int, lowercase-string>
*/
public function getUniversalObjectCrates(): array
{
return array_map('strtolower', $this->universal_object_crates);
}
}
| 1 | 11,020 | I saw a wrong reuse of the cache between a partial analysis of a single file and a full run where errors due to lack of context on the first partial run was reported on the full run. Shouldn't we use a hash that is composer.lock + psalm.xml + command line to be safe? | vimeo-psalm | php |
@@ -3077,14 +3077,12 @@ defaultdict(<class 'list'>, {'col..., 'col...})]
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
- from databricks.koalas.indexes import DatetimeIndex
-
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("between_time currently only works for axis=0")
- if not isinstance(self.index, DatetimeIndex):
+ if not isinstance(self.index, ks.DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
kdf = self.copy() | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from collections import OrderedDict, defaultdict, namedtuple
from collections.abc import Mapping
from distutils.version import LooseVersion
import re
import warnings
import inspect
import json
import types
from functools import partial, reduce
import sys
from itertools import zip_longest
from typing import (
Any,
Optional,
List,
Tuple,
Union,
Generic,
TypeVar,
Iterable,
Iterator,
Dict,
Callable,
cast,
TYPE_CHECKING,
)
import datetime
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like, is_scalar
from pandas.api.extensions import ExtensionDtype
from pandas.tseries.frequencies import DateOffset, to_offset
if TYPE_CHECKING:
from pandas.io.formats.style import Styler
if LooseVersion(pd.__version__) >= LooseVersion("0.24"):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
import pyspark
from pyspark import StorageLevel
from pyspark import sql as spark
from pyspark.sql import Column, DataFrame as SparkDataFrame, functions as F
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (
BooleanType,
DoubleType,
FloatType,
NumericType,
StringType,
StructType,
StructField,
ArrayType,
)
from pyspark.sql.window import Window
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.accessors import KoalasFrameMethods
from databricks.koalas.config import option_context, get_option
from databricks.koalas.spark import functions as SF
from databricks.koalas.spark.accessors import SparkFrameMethods, CachedSparkFrameMethods
from databricks.koalas.utils import (
align_diff_frames,
column_labels_level,
combine_frames,
default_session,
is_name_like_tuple,
is_name_like_value,
is_testing,
name_like_string,
same_anchor,
scol_for,
validate_arguments_and_invoke_function,
validate_axis,
validate_bool_kwarg,
validate_how,
verify_temp_column_name,
)
from databricks.koalas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from databricks.koalas.generic import Frame
from databricks.koalas.internal import (
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
SPARK_DEFAULT_INDEX_NAME,
SPARK_DEFAULT_SERIES_NAME,
)
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.typedef import (
as_spark_type,
infer_return_type,
spark_type_to_pandas_dtype,
DataFrameType,
SeriesType,
Scalar,
ScalarType,
)
from databricks.koalas.plot import KoalasPlotAccessor
if TYPE_CHECKING:
from databricks.koalas.indexes import Index
from databricks.koalas.series import Series
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$"
)
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results. Also reverse version.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(df)
angles degrees
circle 0 720
triangle 6 360
rectangle 8 720
>>> df + df + df
angles degrees
circle 0 1080
triangle 9 540
rectangle 12 1080
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide and true divide by constant with reverse version.
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rtruediv(10)
angles degrees
circle inf 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant with reverse version.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
Multiply by constant with reverse version.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Floor Divide by constant with reverse version.
>>> df // 10
angles degrees
circle 0.0 36.0
triangle 0.0 18.0
rectangle 0.0 36.0
>>> df.floordiv(10)
angles degrees
circle 0.0 36.0
triangle 0.0 18.0
rectangle 0.0 36.0
>>> df.rfloordiv(10) # doctest: +SKIP
angles degrees
circle inf 0.0
triangle 3.0 0.0
rectangle 2.0 0.0
Mod by constant with reverse version.
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.mod(2)
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.rmod(2)
angles degrees
circle NaN 2
triangle 2.0 2
rectangle 2.0 2
Power by constant with reverse version.
>>> df ** 2
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.rpow(2)
angles degrees
circle 1.0 2.348543e+108
triangle 8.0 1.532496e+54
rectangle 16.0 2.348543e+108
"""
T = TypeVar("T")
def _create_tuple_for_frame_type(params):
"""
This is a workaround to support variadic generic in DataFrame.
See https://github.com/python/typing/issues/193
we always wraps the given type hints by a tuple to mimic the variadic generic.
"""
from databricks.koalas.typedef import NameTypeHolder
if isinstance(params, zip):
params = [slice(name, tpe) for name, tpe in params]
if isinstance(params, slice):
params = (params,)
if (
hasattr(params, "__len__")
and isinstance(params, Iterable)
and all(isinstance(param, slice) for param in params)
):
for param in params:
if isinstance(param.start, str) and param.step is not None:
raise TypeError(
"Type hints should be specified as "
"DataFrame['name': type]; however, got %s" % param
)
name_classes = []
for param in params:
new_class = type("NameType", (NameTypeHolder,), {})
new_class.name = param.start
# When the given argument is a numpy's dtype instance.
new_class.tpe = param.stop.type if isinstance(param.stop, np.dtype) else param.stop
name_classes.append(new_class)
return Tuple[tuple(name_classes)]
if not isinstance(params, Iterable):
params = [params]
new_params = []
for param in params:
if isinstance(param, ExtensionDtype):
new_class = type("NameType", (NameTypeHolder,), {})
new_class.tpe = param
new_params.append(new_class)
else:
new_params.append(param.type if isinstance(param, np.dtype) else param)
return Tuple[tuple(new_params)]
if (3, 5) <= sys.version_info < (3, 7):
from typing import GenericMeta # type: ignore
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, _create_tuple_for_frame_type(params))
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(Frame, Generic[T]):
"""
Koalas DataFrame that corresponds to pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, pandas DataFrame, Spark DataFrame \
or Koalas Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a pandas DataFrame, a Spark DataFrame, and a Koalas Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
internal = data
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
internal = InternalFrame(spark_frame=data, index_spark_columns=None)
elif isinstance(data, ks.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_frame()
internal = data._internal
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
internal = InternalFrame.from_pandas(pdf)
object.__setattr__(self, "_internal_frame", internal)
@property
def _ksers(self):
""" Return a dict of column label -> Series which anchors `self`. """
from databricks.koalas.series import Series
if not hasattr(self, "_kseries"):
object.__setattr__(
self,
"_kseries",
{label: Series(data=self, index=label) for label in self._internal.column_labels},
)
else:
kseries = self._kseries
assert len(self._internal.column_labels) == len(kseries), (
len(self._internal.column_labels),
len(kseries),
)
if any(self is not kser._kdf for kser in kseries.values()):
# Refresh the dict to contain only Series anchoring `self`.
self._kseries = {
label: kseries[label]
if self is kseries[label]._kdf
else Series(data=self, index=label)
for label in self._internal.column_labels
}
return self._kseries
@property
def _internal(self) -> InternalFrame:
return self._internal_frame
def _update_internal_frame(self, internal: InternalFrame, requires_same_anchor: bool = True):
"""
Update InternalFrame with the given one.
If the column_label is changed or the new InternalFrame is not the same `anchor`,
disconnect the link to the Series and create a new one.
If `requires_same_anchor` is `False`, checking whether or not the same anchor is ignored
and force to update the InternalFrame, e.g., replacing the internal with the resolved_copy,
updating the underlying Spark DataFrame which need to combine a different Spark DataFrame.
:param internal: the new InternalFrame
:param requires_same_anchor: whether checking the same anchor
"""
from databricks.koalas.series import Series
if hasattr(self, "_kseries"):
kseries = {}
for old_label, new_label in zip_longest(
self._internal.column_labels, internal.column_labels
):
if old_label is not None:
kser = self._ksers[old_label]
renamed = old_label != new_label
not_same_anchor = requires_same_anchor and not same_anchor(internal, kser)
if renamed or not_same_anchor:
kdf = DataFrame(self._internal.select_column(old_label)) # type: DataFrame
kser._update_anchor(kdf)
kser = None
else:
kser = None
if new_label is not None:
if kser is None:
kser = Series(data=self, index=new_label)
kseries[new_label] = kser
self._kseries = kseries
self._internal_frame = internal
if hasattr(self, "_repr_pandas_cache"):
del self._repr_pandas_cache
@property
def ndim(self) -> int:
"""
Return an int representing the number of array dimensions.
return 2 for DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', None],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
NaN 7 8
>>> df.ndim
2
"""
return 2
@property
def axes(self) -> List:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')]
"""
return [self.index, self.columns]
def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=True, **kwargs):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : bool, default True
Include only float, int, boolean columns. False is not supported. This parameter
is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter
currently.
"""
from inspect import signature
from databricks.koalas.series import Series, first_series
axis = validate_axis(axis)
if axis == 0:
min_count = kwargs.get("min_count", 0)
exprs = [F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]
new_column_labels = []
num_args = len(signature(sfun).parameters)
for label in self._internal.column_labels:
spark_column = self._internal.spark_column_for(label)
spark_type = self._internal.spark_type_for(label)
is_numeric_or_boolean = isinstance(spark_type, (NumericType, BooleanType))
keep_column = not numeric_only or is_numeric_or_boolean
if keep_column:
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
scol = sfun(spark_column)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
scol = sfun(spark_column, spark_type)
if min_count > 0:
scol = F.when(
Frame._count_expr(spark_column, spark_type) >= min_count, scol
)
exprs.append(scol.alias(name_like_string(label)))
new_column_labels.append(label)
if len(exprs) == 1:
return Series([])
sdf = self._internal.spark_frame.select(*exprs)
# The data is expected to be small so it's fine to transpose/use default index.
with ks.option_context("compute.max_rows", 1):
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=new_column_labels,
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal).transpose())
else:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only, **kwargs)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type))
def calculate_columns_axis(*cols):
return getattr(pd.concat(cols, axis=1), name)(
axis=axis, numeric_only=numeric_only, **kwargs
)
column_name = verify_temp_column_name(
self._internal.spark_frame.select(self._internal.index_spark_columns),
"__calculate_columns_axis__",
)
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns
+ [calculate_columns_axis(*self._internal.data_spark_columns).alias(column_name)]
)
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
)
return first_series(DataFrame(internal)).rename(pser.name)
def _kser_for(self, label):
"""
Create Series with a proper column label.
The given label must be verified to exist in `InternalFrame.column_labels`.
For example, in some method, self is like:
>>> self = ks.range(3)
`self._kser_for(label)` can be used with `InternalFrame.column_labels`:
>>> self._kser_for(self._internal.column_labels[0])
0 0
1 1
2 2
Name: id, dtype: int64
`self._kser_for(label)` must not be used directly with user inputs.
In that case, `self[label]` should be used instead, which checks the label exists or not:
>>> self['id']
0 0
1 1
2 2
Name: id, dtype: int64
"""
return self._ksers[label]
def _apply_series_op(self, op, should_resolve: bool = False):
applied = []
for label in self._internal.column_labels:
applied.append(op(self._kser_for(label)))
internal = self._internal.with_new_columns(applied)
if should_resolve:
internal = internal.resolved_copy
return DataFrame(internal)
# Arithmetic Operators
def _map_series_op(self, op, other):
from databricks.koalas.base import IndexOpsMixin
if not isinstance(other, DataFrame) and (
isinstance(other, IndexOpsMixin) or is_sequence(other)
):
raise ValueError(
"%s with a sequence is currently not supported; "
"however, got %s." % (op, type(other).__name__)
)
if isinstance(other, DataFrame):
if self._internal.column_labels_level != other._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
if not same_anchor(self, other):
# Different DataFrames
def apply_op(kdf, this_column_labels, that_column_labels):
for this_label, that_label in zip(this_column_labels, that_column_labels):
yield (
getattr(kdf._kser_for(this_label), op)(
kdf._kser_for(that_label)
).rename(this_label),
this_label,
)
return align_diff_frames(apply_op, self, other, fillna=True, how="full")
else:
applied = []
column_labels = []
for label in self._internal.column_labels:
if label in other._internal.column_labels:
applied.append(getattr(self._kser_for(label), op)(other._kser_for(label)))
else:
applied.append(
F.lit(None)
.cast(self._internal.spark_type_for(label))
.alias(name_like_string(label))
)
column_labels.append(label)
for label in other._internal.column_labels:
if label not in column_labels:
applied.append(
F.lit(None)
.cast(other._internal.spark_type_for(label))
.alias(name_like_string(label))
)
column_labels.append(label)
internal = self._internal.with_new_columns(applied, column_labels=column_labels)
return DataFrame(internal)
else:
return self._apply_series_op(lambda kser: getattr(kser, op)(other))
def __add__(self, other) -> "DataFrame":
return self._map_series_op("add", other)
def __radd__(self, other) -> "DataFrame":
return self._map_series_op("radd", other)
def __div__(self, other) -> "DataFrame":
return self._map_series_op("div", other)
def __rdiv__(self, other) -> "DataFrame":
return self._map_series_op("rdiv", other)
def __truediv__(self, other) -> "DataFrame":
return self._map_series_op("truediv", other)
def __rtruediv__(self, other) -> "DataFrame":
return self._map_series_op("rtruediv", other)
def __mul__(self, other) -> "DataFrame":
return self._map_series_op("mul", other)
def __rmul__(self, other) -> "DataFrame":
return self._map_series_op("rmul", other)
def __sub__(self, other) -> "DataFrame":
return self._map_series_op("sub", other)
def __rsub__(self, other) -> "DataFrame":
return self._map_series_op("rsub", other)
def __pow__(self, other) -> "DataFrame":
return self._map_series_op("pow", other)
def __rpow__(self, other) -> "DataFrame":
return self._map_series_op("rpow", other)
def __mod__(self, other) -> "DataFrame":
return self._map_series_op("mod", other)
def __rmod__(self, other) -> "DataFrame":
return self._map_series_op("rmod", other)
def __floordiv__(self, other) -> "DataFrame":
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other) -> "DataFrame":
return self._map_series_op("rfloordiv", other)
def __abs__(self) -> "DataFrame":
return self._apply_series_op(lambda kser: abs(kser))
def __neg__(self) -> "DataFrame":
return self._apply_series_op(lambda kser: -kser)
def add(self, other) -> "DataFrame":
return self + other
# create accessor for plot
plot = CachedAccessor("plot", KoalasPlotAccessor)
# create accessor for Spark related methods.
spark = CachedAccessor("spark", SparkFrameMethods)
# create accessor for Koalas specific methods.
koalas = CachedAccessor("koalas", KoalasFrameMethods)
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = KoalasPlotAccessor.hist.__doc__
def kde(self, bw_method=None, ind=None, **kwds):
return self.plot.kde(bw_method, ind, **kwds)
kde.__doc__ = KoalasPlotAccessor.kde.__doc__
add.__doc__ = _flex_doc_FRAME.format(
desc="Addition", op_name="+", equiv="dataframe + other", reverse="radd"
)
def radd(self, other) -> "DataFrame":
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc="Addition", op_name="+", equiv="other + dataframe", reverse="add"
)
def div(self, other) -> "DataFrame":
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rdiv"
)
divide = div
def rdiv(self, other) -> "DataFrame":
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="other / dataframe", reverse="div"
)
def truediv(self, other) -> "DataFrame":
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rtruediv"
)
def rtruediv(self, other) -> "DataFrame":
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc="Floating division", op_name="/", equiv="other / dataframe", reverse="truediv"
)
def mul(self, other) -> "DataFrame":
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc="Multiplication", op_name="*", equiv="dataframe * other", reverse="rmul"
)
multiply = mul
def rmul(self, other) -> "DataFrame":
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc="Multiplication", op_name="*", equiv="other * dataframe", reverse="mul"
)
def sub(self, other) -> "DataFrame":
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc="Subtraction", op_name="-", equiv="dataframe - other", reverse="rsub"
)
subtract = sub
def rsub(self, other) -> "DataFrame":
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc="Subtraction", op_name="-", equiv="other - dataframe", reverse="sub"
)
def mod(self, other) -> "DataFrame":
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc="Modulo", op_name="%", equiv="dataframe % other", reverse="rmod"
)
def rmod(self, other) -> "DataFrame":
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc="Modulo", op_name="%", equiv="other % dataframe", reverse="mod"
)
def pow(self, other) -> "DataFrame":
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc="Exponential power of series", op_name="**", equiv="dataframe ** other", reverse="rpow"
)
def rpow(self, other) -> "DataFrame":
return other ** self
rpow.__doc__ = _flex_doc_FRAME.format(
desc="Exponential power", op_name="**", equiv="other ** dataframe", reverse="pow"
)
def floordiv(self, other) -> "DataFrame":
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc="Integer division", op_name="//", equiv="dataframe // other", reverse="rfloordiv"
)
def rfloordiv(self, other) -> "DataFrame":
return other // self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc="Integer division", op_name="//", equiv="other // dataframe", reverse="floordiv"
)
# Comparison Operators
def __eq__(self, other) -> "DataFrame": # type: ignore
return self._map_series_op("eq", other)
def __ne__(self, other) -> "DataFrame": # type: ignore
return self._map_series_op("ne", other)
def __lt__(self, other) -> "DataFrame":
return self._map_series_op("lt", other)
def __le__(self, other) -> "DataFrame":
return self._map_series_op("le", other)
def __ge__(self, other) -> "DataFrame":
return self._map_series_op("ge", other)
def __gt__(self, other) -> "DataFrame":
return self._map_series_op("gt", other)
def eq(self, other) -> "DataFrame":
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False False
c False True
d False False
"""
return self == other
equals = eq
def gt(self, other) -> "DataFrame":
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False False
c True False
d True False
"""
return self > other
def ge(self, other) -> "DataFrame":
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True False
c True True
d True False
"""
return self >= other
def lt(self, other) -> "DataFrame":
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False False
c False False
d False False
"""
return self < other
def le(self, other) -> "DataFrame":
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True False
c False True
d False False
"""
return self <= other
def ne(self, other) -> "DataFrame":
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True True
c True False
d True True
"""
return self != other
def applymap(self, func) -> "DataFrame":
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
You can omit the type hint and let Koalas infer its type.
>>> df.applymap(lambda x: x ** 2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# TODO: We can implement shortcut theoretically since it creates new DataFrame
# anyway and we don't have to worry about operations on different DataFrames.
return self._apply_series_op(lambda kser: kser.apply(func))
# TODO: not all arguments are implemented comparing to pandas' for now.
def aggregate(
self, func: Union[List[str], Dict[Any, List[str]]]
) -> Union["Series", "DataFrame", "Index"]:
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : dict or a list
a dict mapping from column name (string) to
aggregate functions (list of strings).
If a list is given, the aggregation is performed against
all columns.
Returns
-------
DataFrame
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Invoke function on DataFrame.
DataFrame.transform : Only perform transforming type operations.
DataFrame.groupby : Perform operations over groups.
Series.aggregate : The equivalent function for Series.
Examples
--------
>>> df = ks.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1.0 2.0 3.0
1 4.0 5.0 6.0
2 7.0 8.0 9.0
3 NaN NaN NaN
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])[['A', 'B', 'C']].sort_index()
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']].sort_index()
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
For multi-index columns:
>>> df.columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C")])
>>> df.agg(['sum', 'min'])[[("X", "A"), ("X", "B"), ("Y", "C")]].sort_index()
X Y
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
>>> aggregated = df.agg({("X", "A") : ['sum', 'min'], ("X", "B") : ['min', 'max']})
>>> aggregated[[("X", "A"), ("X", "B")]].sort_index() # doctest: +NORMALIZE_WHITESPACE
X
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
"""
from databricks.koalas.groupby import GroupBy
if isinstance(func, list):
if all((isinstance(f, str) for f in func)):
func = dict([(column, func) for column in self.columns])
else:
raise ValueError(
"If the given function is a list, it "
"should only contains function names as strings."
)
if not isinstance(func, dict) or not all(
is_name_like_value(key)
and (
isinstance(value, str)
or (isinstance(value, list) and all(isinstance(v, str) for v in value))
)
for key, value in func.items()
):
raise ValueError(
"aggs must be a dict mapping from column name to aggregate "
"functions (string or list of strings)."
)
with option_context("compute.default_index_type", "distributed"):
kdf = DataFrame(GroupBy._spark_groupby(self, func)) # type: DataFrame
# The codes below basically converts:
#
# A B
# sum min min max
# 0 12.0 1.0 2.0 8.0
#
# to:
# A B
# max NaN 8.0
# min 1.0 2.0
# sum 12.0 NaN
#
# Aggregated output is usually pretty much small.
if LooseVersion(pyspark.__version__) >= LooseVersion("2.4"):
return kdf.stack().droplevel(0)[list(func.keys())]
else:
pdf = kdf._to_internal_pandas().stack()
pdf.index = pdf.index.droplevel()
return ks.from_pandas(pdf[list(func.keys())])
agg = aggregate
def corr(self, method="pearson") -> Union["Series", "DataFrame", "Index"]:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return ks.from_pandas(corr(self, method))
def iteritems(self) -> Iterator:
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
return (
(label if len(label) > 1 else label[0], self._kser_for(label))
for label in self._internal.column_labels
)
def iterrows(self) -> Iterator:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : pandas.Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = ks.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
internal_index_columns = self._internal.index_spark_column_names
internal_data_columns = self._internal.data_spark_column_names
def extract_kv_from_spark_row(row):
k = (
row[internal_index_columns[0]]
if len(internal_index_columns) == 1
else tuple(row[c] for c in internal_index_columns)
)
v = [row[c] for c in internal_data_columns]
return k, v
for k, v in map(
extract_kv_from_spark_row, self._internal.resolved_copy.spark_frame.toLocalIterator()
):
s = pd.Series(v, index=columns, name=k)
yield k, s
def itertuples(self, index: bool = True, name: Optional[str] = "Koalas") -> Iterator:
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Koalas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = ks.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Koalas(Index='dog', num_legs=4, num_wings=0)
Koalas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Koalas(num_legs=4, num_wings=0)
Koalas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
fields = list(self.columns)
if index:
fields.insert(0, "Index")
index_spark_column_names = self._internal.index_spark_column_names
data_spark_column_names = self._internal.data_spark_column_names
def extract_kv_from_spark_row(row):
k = (
row[index_spark_column_names[0]]
if len(index_spark_column_names) == 1
else tuple(row[c] for c in index_spark_column_names)
)
v = [row[c] for c in data_spark_column_names]
return k, v
can_return_named_tuples = sys.version_info >= (3, 7) or len(self.columns) + index < 255
if name is not None and can_return_named_tuples:
itertuple = namedtuple(name, fields, rename=True) # type: ignore
for k, v in map(
extract_kv_from_spark_row,
self._internal.resolved_copy.spark_frame.toLocalIterator(),
):
yield itertuple._make(([k] if index else []) + list(v))
else:
for k, v in map(
extract_kv_from_spark_row,
self._internal.resolved_copy.spark_frame.toLocalIterator(),
):
yield tuple(([k] if index else []) + list(v))
def items(self) -> Iterator:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def to_clipboard(self, excel=True, sep=None, **kwargs) -> None:
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args
)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
) -> Optional[str]:
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args
)
def to_string(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
line_width=None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args
)
def to_dict(self, orient="dict", into=dict) -> Union[List, Mapping]:
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args
)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
) -> Optional[str]:
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
<BLANKLINE>
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args
)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self) -> "DataFrame":
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from databricks.koalas.config import option_context
>>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE
... ks.DataFrame({'a': range(1001)}).transpose()
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
pdf = self.head(max_compute_count + 1)._to_internal_pandas()
if len(pdf) > max_compute_count:
raise ValueError(
"Current DataFrame has more then the given limit {0} rows. "
"Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' "
"to retrieve to retrieve more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive.".format(
max_compute_count
)
)
return DataFrame(pdf.transpose())
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +------+------+------+------+------+
# |index1|index2|(a,x1)|(a,x2)|(b,x3)|
# +------+------+------+------+------+
# | y1| z1| 1| 0| 0|
# | y2| z2| 0| 50| 0|
# | y3| z3| 3| 2| 1|
# +------+------+------+------+------+
#
# Output of `exploded_df` becomes as below:
#
# +-----------------+-----------------+-----------------+-----+
# | index|__index_level_0__|__index_level_1__|value|
# +-----------------+-----------------+-----------------+-----+
# |{"a":["y1","z1"]}| a| x1| 1|
# |{"a":["y1","z1"]}| a| x2| 0|
# |{"a":["y1","z1"]}| b| x3| 0|
# |{"a":["y2","z2"]}| a| x1| 0|
# |{"a":["y2","z2"]}| a| x2| 50|
# |{"a":["y2","z2"]}| b| x3| 0|
# |{"a":["y3","z3"]}| a| x1| 3|
# |{"a":["y3","z3"]}| a| x2| 2|
# |{"a":["y3","z3"]}| b| x3| 1|
# +-----------------+-----------------+-----------------+-----+
pairs = F.explode(
F.array(
*[
F.struct(
[
F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i))
for i, col in enumerate(label)
]
+ [self._internal.spark_column_for(label).alias("value")]
)
for label in self._internal.column_labels
]
)
)
exploded_df = self._internal.spark_frame.withColumn("pairs", pairs).select(
[
F.to_json(
F.struct(
F.array([scol for scol in self._internal.index_spark_columns]).alias("a")
)
).alias("index"),
F.col("pairs.*"),
]
)
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
internal_index_columns = [
SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)
]
pivoted_df = exploded_df.groupBy(internal_index_columns).pivot("index")
transposed_df = pivoted_df.agg(F.first(F.col("value")))
new_data_columns = list(
filter(lambda x: x not in internal_index_columns, transposed_df.columns)
)
column_labels = [
None if len(label) == 1 and label[0] is None else label
for label in (tuple(json.loads(col)["a"]) for col in new_data_columns)
]
internal = InternalFrame(
spark_frame=transposed_df,
index_spark_columns=[scol_for(transposed_df, col) for col in internal_index_columns],
index_names=self._internal.column_label_names,
column_labels=column_labels,
data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns],
column_label_names=self._internal.index_names,
)
return DataFrame(internal)
T = property(transpose)
def apply_batch(self, func, args=(), **kwds) -> "DataFrame":
warnings.warn(
"DataFrame.apply_batch is deprecated as of DataFrame.koalas.apply_batch. "
"Please use the API instead.",
FutureWarning,
)
return self.koalas.apply_batch(func, args=args, **kwds)
apply_batch.__doc__ = KoalasFrameMethods.apply_batch.__doc__
# TODO: Remove this API when Koalas 2.0.0.
def map_in_pandas(self, func) -> "DataFrame":
warnings.warn(
"DataFrame.map_in_pandas is deprecated as of DataFrame.koalas.apply_batch. "
"Please use the API instead.",
FutureWarning,
)
return self.koalas.apply_batch(func)
map_in_pandas.__doc__ = KoalasFrameMethods.apply_batch.__doc__
def apply(self, func, axis=0, args=(), **kwds) -> Union["Series", "DataFrame", "Index"]:
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``).
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: when `axis` is 0 or 'index', the `func` is unable to access
to the whole input series. Koalas internally splits the input series into multiple
batches and calls `func` with each batch multiple times. Therefore, operations
such as global aggregations are impossible. See the example below.
>>> # This case does not return the length of whole series but of the batch internally
... # used.
... def length(s) -> int:
... return len(s)
...
>>> df = ks.DataFrame({'A': range(1000)})
>>> df.apply(length, axis=0) # doctest: +SKIP
0 83
1 83
2 83
...
10 83
11 83
dtype: int32
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify the return type as `Series` or scalar value in ``func``,
for instance, as below:
>>> def square(s) -> ks.Series[np.int32]:
... return s ** 2
Koalas uses return type hint and does not try to infer the type.
In case when axis is 1, it requires to specify `DataFrame` or scalar value
with type hints as below:
>>> def plus_one(x) -> ks.DataFrame[float, float]:
... return x + 1
If the return type is specified as `DataFrame`, the output column names become
`c0, c1, c2 ... cn`. These names are positionally mapped to the returned
DataFrame in ``func``.
To specify the column names, you can assign them in a pandas friendly style as below:
>>> def plus_one(x) -> ks.DataFrame["a": float, "b": float]:
... return x + 1
>>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
>>> def plus_one(x) -> ks.DataFrame[zip(pdf.dtypes, pdf.columns)]:
... return x + 1
However, this way switches the index type to default index type in the output
because the type hint cannot express the index type at this moment. Use
`reset_index()` to keep index as a workaround.
When the given function has the return type annotated, the original index of the
DataFrame will be lost and then a default index will be attached to the result.
Please be careful about configuring the default index. See also `Default Index Type
<https://koalas.readthedocs.io/en/latest/user_guide/options.html#default-index-type>`_.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap : For elementwise operations.
DataFrame.aggregate : Only perform aggregating type operations.
DataFrame.transform : Only perform transforming type operations.
Series.apply : The equivalent function for Series.
Examples
--------
>>> df = ks.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> def sqrt(x) -> ks.Series[float]:
... return np.sqrt(x)
...
>>> df.apply(sqrt, axis=0)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
You can omit the type hint and let Koalas infer its type.
>>> df.apply(np.sqrt, axis=0)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
When `axis` is 1 or 'columns', it applies the function for each row.
>>> def summation(x) -> np.int64:
... return np.sum(x)
...
>>> df.apply(summation, axis=1)
0 13
1 13
2 13
dtype: int64
Likewise, you can omit the type hint and let Koalas infer its type.
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
>>> df.apply(max, axis=1)
0 9
1 9
2 9
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
In order to specify the types when `axis` is '1', it should use DataFrame[...]
annotation. In this case, the column names are automatically generated.
>>> def identify(x) -> ks.DataFrame['A': np.int64, 'B': np.int64]:
... return x
...
>>> df.apply(identify, axis=1)
A B
0 4 9
1 4 9
2 4 9
You can also specify extra arguments.
>>> def plus_two(a, b, c) -> ks.DataFrame[np.int64, np.int64]:
... return a + b + c
...
>>> df.apply(plus_two, axis=1, args=(1,), c=3)
c0 c1
0 8 13
1 8 13
2 8 13
"""
from databricks.koalas.groupby import GroupBy
from databricks.koalas.series import first_series
if not isinstance(func, types.FunctionType):
assert callable(func), "the first argument should be a callable function."
f = func
func = lambda *args, **kwargs: f(*args, **kwargs)
axis = validate_axis(axis)
should_return_series = False
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
should_use_map_in_pandas = LooseVersion(pyspark.__version__) >= "3.0"
def apply_func(pdf):
pdf_or_pser = pdf.apply(func, axis=axis, args=args, **kwds)
if isinstance(pdf_or_pser, pd.Series):
return pdf_or_pser.to_frame()
else:
return pdf_or_pser
self_applied = DataFrame(self._internal.resolved_copy) # type: "DataFrame"
column_labels = None # type: Optional[List[Tuple]]
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self_applied.head(limit + 1)._to_internal_pandas()
applied = pdf.apply(func, axis=axis, args=args, **kwds)
kser_or_kdf = ks.from_pandas(applied)
if len(pdf) <= limit:
return kser_or_kdf
kdf = kser_or_kdf
if isinstance(kser_or_kdf, ks.Series):
should_return_series = True
kdf = kser_or_kdf._kdf
return_schema = force_decimal_precision_scale(
as_nullable_spark_type(kdf._internal.to_internal_spark_frame.schema)
)
if should_use_map_in_pandas:
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, apply_func, return_schema, retain_index=True
)
sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(
lambda iterator: map(output_func, iterator), schema=return_schema
)
else:
sdf = GroupBy._spark_group_map_apply(
self_applied,
apply_func,
(F.spark_partition_id(),),
return_schema,
retain_index=True,
)
# If schema is inferred, we can restore indexes too.
internal = kdf._internal.with_new_sdf(sdf)
else:
return_type = infer_return_type(func)
require_index_axis = isinstance(return_type, SeriesType)
require_column_axis = isinstance(return_type, DataFrameType)
if require_index_axis:
if axis != 0:
raise TypeError(
"The given function should specify a scalar or a series as its type "
"hints when axis is 0 or 'index'; however, the return type "
"was %s" % return_sig
)
return_schema = cast(SeriesType, return_type).spark_type
fields_types = zip(
self_applied.columns, [return_schema] * len(self_applied.columns)
)
return_schema = StructType([StructField(c, t) for c, t in fields_types])
data_dtypes = [cast(SeriesType, return_type).dtype] * len(self_applied.columns)
elif require_column_axis:
if axis != 1:
raise TypeError(
"The given function should specify a scalar or a frame as its type "
"hints when axis is 1 or 'column'; however, the return type "
"was %s" % return_sig
)
return_schema = cast(DataFrameType, return_type).spark_type
data_dtypes = cast(DataFrameType, return_type).dtypes
else:
# any axis is fine.
should_return_series = True
return_schema = cast(ScalarType, return_type).spark_type
return_schema = StructType([StructField(SPARK_DEFAULT_SERIES_NAME, return_schema)])
data_dtypes = [cast(ScalarType, return_type).dtype]
column_labels = [None]
if should_use_map_in_pandas:
output_func = GroupBy._make_pandas_df_builder_func(
self_applied, apply_func, return_schema, retain_index=False
)
sdf = self_applied._internal.to_internal_spark_frame.mapInPandas(
lambda iterator: map(output_func, iterator), schema=return_schema
)
else:
sdf = GroupBy._spark_group_map_apply(
self_applied,
apply_func,
(F.spark_partition_id(),),
return_schema,
retain_index=False,
)
# Otherwise, it loses index.
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=None,
column_labels=column_labels,
data_dtypes=data_dtypes,
)
result = DataFrame(internal) # type: "DataFrame"
if should_return_series:
return first_series(result)
else:
return result
def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame":
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
See also `Transform and apply a function
<https://koalas.readthedocs.io/en/latest/user_guide/transform_apply.html>`_.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
.. note:: the series within ``func`` is actually multiple pandas series as the
segments of the whole Koalas series; therefore, the length of each series
is not guaranteed. As an example, an aggregation against each series
does work as a global aggregation but an aggregation of each segment. See
below:
>>> def func(x) -> ks.Series[np.int32]:
... return x + sum(x)
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
*args
Positional arguments to pass to func.
**kwargs
Keyword arguments to pass to func.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
See Also
--------
DataFrame.aggregate : Only perform aggregating type operations.
DataFrame.apply : Invoke function on DataFrame.
Series.transform : The equivalent function for Series.
Examples
--------
>>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
You can omit the type hint and let Koalas infer its type.
>>> df.transform(lambda x: x ** 2)
A B
0 0 1
1 1 4
2 4 9
For multi-index columns:
>>> df.columns = [('X', 'A'), ('X', 'B')]
>>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
>>> (df * -1).transform(abs) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 2
2 2 3
You can also specify extra arguments.
>>> def calculation(x, y, z) -> ks.Series[int]:
... return x ** y + z
>>> df.transform(calculation, y=10, z=20) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 20 21
1 21 1044
2 1044 59069
"""
if not isinstance(func, types.FunctionType):
assert callable(func), "the first argument should be a callable function."
f = func
func = lambda *args, **kwargs: f(*args, **kwargs)
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
transformed = pdf.transform(func, axis, *args, **kwargs)
kdf = DataFrame(transformed) # type: "DataFrame"
if len(pdf) <= limit:
return kdf
applied = []
for input_label, output_label in zip(
self._internal.column_labels, kdf._internal.column_labels
):
kser = self._kser_for(input_label)
return_schema = force_decimal_precision_scale(
as_nullable_spark_type(kdf._internal.spark_type_for(output_label))
)
applied.append(
kser.koalas._transform_batch(
func=lambda c: func(c, *args, **kwargs), return_schema=return_schema
)
)
internal = self._internal.with_new_columns(
applied, data_dtypes=kdf._internal.data_dtypes
)
return DataFrame(internal)
else:
return self._apply_series_op(
lambda kser: kser.koalas.transform_batch(func, *args, **kwargs)
)
def transform_batch(self, func, *args, **kwargs) -> "DataFrame":
warnings.warn(
"DataFrame.transform_batch is deprecated as of DataFrame.koalas.transform_batch. "
"Please use the API instead.",
FutureWarning,
)
return self.koalas.transform_batch(func, *args, **kwargs)
transform_batch.__doc__ = KoalasFrameMethods.transform_batch.__doc__
def pop(self, item) -> "DataFrame":
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
Also support for MultiIndex
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df
a b
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('a')
name class
0 falcon bird
1 parrot bird
2 lion mammal
3 monkey mammal
>>> df
b
max_speed
0 389.0
1 24.0
2 80.5
3 NaN
"""
result = self[item]
self._update_internal_frame(self.drop(item)._internal)
return result
# TODO: add axis parameter can work when '1' or 'columns'
def xs(self, key, axis=0, level=None) -> Union["DataFrame", "Series"]:
"""
Return cross-section from the DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : 0 or 'index', default 0
Axis to retrieve cross-section on.
currently only support 0 or 'index'
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
DataFrame or Series
Cross-section from the original DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = ks.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
locomotion
walks 4 0
>>> df.xs(('mammal', 'dog', 'walks')) # doctest: +NORMALIZE_WHITESPACE
num_legs 4
num_wings 0
Name: (mammal, dog, walks), dtype: int64
Get values at specified index and level
>>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class locomotion
mammal walks 4 0
"""
from databricks.koalas.series import first_series
if not is_name_like_value(key):
raise ValueError("'key' should be a scalar value or tuple that contains scalar values")
if level is not None and is_name_like_tuple(key):
raise KeyError(key)
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if not is_name_like_tuple(key):
key = (key,)
if len(key) > self._internal.index_level:
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
len(key), self._internal.index_level
)
)
if level is None:
level = 0
rows = [
self._internal.index_spark_columns[lvl] == index for lvl, index in enumerate(key, level)
]
internal = self._internal.with_filter(reduce(lambda x, y: x & y, rows))
if len(key) == self._internal.index_level:
kdf = DataFrame(internal) # type: DataFrame
pdf = kdf.head(2)._to_internal_pandas()
if len(pdf) == 0:
raise KeyError(key)
elif len(pdf) > 1:
return kdf
else:
return first_series(DataFrame(pdf.transpose()))
else:
index_spark_columns = (
internal.index_spark_columns[:level]
+ internal.index_spark_columns[level + len(key) :]
)
index_names = internal.index_names[:level] + internal.index_names[level + len(key) :]
index_dtypes = internal.index_dtypes[:level] + internal.index_dtypes[level + len(key) :]
internal = internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_dtypes=index_dtypes,
).resolved_copy
return DataFrame(internal)
def between_time(
self,
start_time: Union[datetime.time, str],
end_time: Union[datetime.time, str],
include_start: bool = True,
include_end: bool = True,
axis: Union[int, str] = 0,
) -> Union["Series", "DataFrame"]:
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
Initial time as a time filter limit.
end_time : datetime.time or str
End time as a time filter limit.
include_start : bool, default True
Whether the start time needs to be included in the result.
include_end : bool, default True
Whether the end time needs to be included in the result.
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine range time on index or columns value.
Returns
-------
Series or DataFrame
Data from the original object filtered to the specified dates range.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> idx = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> kdf = ks.DataFrame({'A': [1, 2, 3, 4]}, index=idx)
>>> kdf
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> kdf.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> kdf.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
from databricks.koalas.indexes import DatetimeIndex
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("between_time currently only works for axis=0")
if not isinstance(self.index, DatetimeIndex):
raise TypeError("Index must be DatetimeIndex")
kdf = self.copy()
kdf.index.name = verify_temp_column_name(kdf, "__index_name__")
return_types = [kdf.index.dtype] + list(kdf.dtypes)
def pandas_between_time(pdf) -> ks.DataFrame[return_types]: # type: ignore
return pdf.between_time(start_time, end_time, include_start, include_end).reset_index()
# apply_batch will remove the index of the Koalas DataFrame and attach a default index,
# which will never be used. So use "distributed" index as a dummy to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
kdf = kdf.koalas.apply_batch(pandas_between_time)
return DataFrame(
self._internal.copy(
spark_frame=kdf._internal.spark_frame,
index_spark_columns=kdf._internal.data_spark_columns[:1],
data_spark_columns=kdf._internal.data_spark_columns[1:],
)
)
def where(self, cond, other=np.nan) -> "DataFrame":
"""
Replace values where the condition is False.
Parameters
----------
cond : boolean DataFrame
Where cond is True, keep the original value. Where False,
replace with corresponding value from other.
other : scalar, DataFrame
Entries where cond is False are replaced with corresponding value from other.
Returns
-------
DataFrame
Examples
--------
>>> from databricks.koalas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})
>>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})
>>> df1
A B
0 0 100
1 1 200
2 2 300
3 3 400
4 4 500
>>> df2
A B
0 0 -100
1 -1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> df1.where(df1 > 0).sort_index()
A B
0 NaN 100.0
1 1.0 200.0
2 2.0 300.0
3 3.0 400.0
4 4.0 500.0
>>> df1.where(df1 > 1, 10).sort_index()
A B
0 10 100
1 10 200
2 2 300
3 3 400
4 4 500
>>> df1.where(df1 > 1, df1 + 100).sort_index()
A B
0 100 100
1 101 200
2 2 300
3 3 400
4 4 500
>>> df1.where(df1 > 1, df2).sort_index()
A B
0 0 100
1 -1 200
2 2 300
3 3 400
4 4 500
When the column name of cond is different from self, it treats all values are False
>>> cond = ks.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0
>>> cond
C D
0 True False
1 False True
2 False False
3 True False
4 False True
>>> df1.where(cond).sort_index()
A B
0 NaN NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
4 NaN NaN
When the type of cond is Series, it just check boolean regardless of column name
>>> cond = ks.Series([1, 2]) > 1
>>> cond
0 False
1 True
dtype: bool
>>> df1.where(cond).sort_index()
A B
0 NaN NaN
1 1.0 200.0
2 NaN NaN
3 NaN NaN
4 NaN NaN
>>> reset_option("compute.ops_on_diff_frames")
"""
from databricks.koalas.series import Series
tmp_cond_col_name = "__tmp_cond_col_{}__".format
tmp_other_col_name = "__tmp_other_col_{}__".format
kdf = self.copy()
tmp_cond_col_names = [
tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels
]
if isinstance(cond, DataFrame):
cond = cond[
[
(
cond._internal.spark_column_for(label)
if label in cond._internal.column_labels
else F.lit(False)
).alias(name)
for label, name in zip(self._internal.column_labels, tmp_cond_col_names)
]
]
kdf[tmp_cond_col_names] = cond
elif isinstance(cond, Series):
cond = cond.to_frame()
cond = cond[
[cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names]
]
kdf[tmp_cond_col_names] = cond
else:
raise ValueError("type of cond must be a DataFrame or Series")
tmp_other_col_names = [
tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels
]
if isinstance(other, DataFrame):
other = other[
[
(
other._internal.spark_column_for(label)
if label in other._internal.column_labels
else F.lit(np.nan)
).alias(name)
for label, name in zip(self._internal.column_labels, tmp_other_col_names)
]
]
kdf[tmp_other_col_names] = other
elif isinstance(other, Series):
other = other.to_frame()
other = other[
[other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names]
]
kdf[tmp_other_col_names] = other
else:
for label in self._internal.column_labels:
kdf[tmp_other_col_name(name_like_string(label))] = other
# above logic make spark dataframe looks like below:
# +-----------------+---+---+------------------+-------------------+------------------+--...
# |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__...
# +-----------------+---+---+------------------+-------------------+------------------+--...
# | 0| 0|100| true| 0| false| ...
# | 1| 1|200| false| -1| false| ...
# | 3| 3|400| true| -3| false| ...
# | 2| 2|300| false| -2| true| ...
# | 4| 4|500| false| -4| false| ...
# +-----------------+---+---+------------------+-------------------+------------------+--...
data_spark_columns = []
for label in self._internal.column_labels:
data_spark_columns.append(
F.when(
kdf[tmp_cond_col_name(name_like_string(label))].spark.column,
kdf._internal.spark_column_for(label),
)
.otherwise(kdf[tmp_other_col_name(name_like_string(label))].spark.column)
.alias(kdf._internal.spark_column_name_for(label))
)
return DataFrame(
kdf._internal.with_new_columns(
data_spark_columns, column_labels=self._internal.column_labels # TODO: dtypes?
)
)
def mask(self, cond, other=np.nan) -> "DataFrame":
"""
Replace values where the condition is True.
Parameters
----------
cond : boolean DataFrame
Where cond is False, keep the original value. Where True,
replace with corresponding value from other.
other : scalar, DataFrame
Entries where cond is True are replaced with corresponding value from other.
Returns
-------
DataFrame
Examples
--------
>>> from databricks.koalas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]})
>>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]})
>>> df1
A B
0 0 100
1 1 200
2 2 300
3 3 400
4 4 500
>>> df2
A B
0 0 -100
1 -1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> df1.mask(df1 > 0).sort_index()
A B
0 0.0 NaN
1 NaN NaN
2 NaN NaN
3 NaN NaN
4 NaN NaN
>>> df1.mask(df1 > 1, 10).sort_index()
A B
0 0 10
1 1 10
2 10 10
3 10 10
4 10 10
>>> df1.mask(df1 > 1, df1 + 100).sort_index()
A B
0 0 200
1 1 300
2 102 400
3 103 500
4 104 600
>>> df1.mask(df1 > 1, df2).sort_index()
A B
0 0 -100
1 1 -200
2 -2 -300
3 -3 -400
4 -4 -500
>>> reset_option("compute.ops_on_diff_frames")
"""
from databricks.koalas.series import Series
if not isinstance(cond, (DataFrame, Series)):
raise ValueError("type of cond must be a DataFrame or Series")
cond_inversed = cond._apply_series_op(lambda kser: ~kser)
return self.where(cond_inversed, other)
@property
def index(self) -> "Index":
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes.base import Index
return Index._new_instance(self)
@property
def empty(self) -> bool:
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return (
len(self._internal.column_labels) == 0
or self._internal.resolved_copy.spark_frame.rdd.isEmpty()
)
@property
def style(self) -> "Styler":
"""
Property returning a Styler object containing methods for
building a styled HTML representation for the DataFrame.
.. note:: currently it collects top 1000 rows and return its
pandas `pandas.io.formats.style.Styler` instance.
Examples
--------
>>> ks.range(1001).style # doctest: +ELLIPSIS
<pandas.io.formats.style.Styler object at ...>
"""
max_results = get_option("compute.max_rows")
pdf = self.head(max_results + 1)._to_internal_pandas()
if len(pdf) > max_results:
warnings.warn("'style' property will only use top %s rows." % max_results, UserWarning)
return pdf.head(max_results).style
def set_index(self, keys, drop=True, append=False, inplace=False) -> Optional["DataFrame"]:
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_name_like_tuple(keys):
keys = [keys]
elif is_name_like_value(keys):
keys = [(keys,)]
else:
keys = [key if is_name_like_tuple(key) else (key,) for key in keys]
columns = set(self._internal.column_labels)
for key in keys:
if key not in columns:
raise KeyError(name_like_string(key))
if drop:
column_labels = [label for label in self._internal.column_labels if label not in keys]
else:
column_labels = self._internal.column_labels
if append:
index_spark_columns = self._internal.index_spark_columns + [
self._internal.spark_column_for(label) for label in keys
]
index_names = self._internal.index_names + keys
index_dtypes = self._internal.index_dtypes + [
self._internal.dtype_for(label) for label in keys
]
else:
index_spark_columns = [self._internal.spark_column_for(label) for label in keys]
index_names = keys
index_dtypes = [self._internal.dtype_for(label) for label in keys]
internal = self._internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_dtypes=index_dtypes,
column_labels=column_labels,
data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels],
data_dtypes=[self._internal.dtype_for(label) for label in column_labels],
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
) -> Optional["DataFrame"]:
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ks.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
multi_index = self._internal.index_level > 1
def rename(index):
if multi_index:
return ("level_{}".format(index),)
else:
if ("index",) not in self._internal.column_labels:
return ("index",)
else:
return ("level_{}".format(index),)
if level is None:
new_column_labels = [
name if name is not None else rename(i)
for i, name in enumerate(self._internal.index_names)
]
new_data_spark_columns = [
scol.alias(name_like_string(label))
for scol, label in zip(self._internal.index_spark_columns, new_column_labels)
]
new_data_dtypes = self._internal.index_dtypes
index_spark_columns = []
index_names = []
index_dtypes = []
else:
if is_list_like(level):
level = list(level)
if isinstance(level, int) or is_name_like_tuple(level):
level = [level]
elif is_name_like_value(level):
level = [(level,)]
else:
level = [
lvl if isinstance(lvl, int) or is_name_like_tuple(lvl) else (lvl,)
for lvl in level
]
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= self._internal.index_level:
raise IndexError(
"Too many levels: Index has only {} level, not {}".format(
self._internal.index_level, lev + 1
)
)
idx = level
elif all(is_name_like_tuple(lev) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_names.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError("Level unknown not found")
else:
raise KeyError(
"Level unknown must be same as name ({})".format(
name_like_string(self._internal.index_names[0])
)
)
else:
raise ValueError("Level should be all int or all string.")
idx.sort()
new_column_labels = []
new_data_spark_columns = []
new_data_dtypes = []
index_spark_columns = self._internal.index_spark_columns.copy()
index_names = self._internal.index_names.copy()
index_dtypes = self._internal.index_dtypes.copy()
for i in idx[::-1]:
name = index_names.pop(i)
new_column_labels.insert(0, name if name is not None else rename(i))
scol = index_spark_columns.pop(i)
new_data_spark_columns.insert(0, scol.alias(name_like_string(name)))
new_data_dtypes.insert(0, index_dtypes.pop(i))
if drop:
new_data_spark_columns = []
new_column_labels = []
new_data_dtypes = []
for label in new_column_labels:
if label in self._internal.column_labels:
raise ValueError("cannot insert {}, already exists".format(name_like_string(label)))
if self._internal.column_labels_level > 1:
column_depth = len(self._internal.column_labels[0])
if col_level >= column_depth:
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(
column_depth, col_level + 1
)
)
if any(col_level + len(label) > column_depth for label in new_column_labels):
raise ValueError("Item must have length equal to number of levels.")
new_column_labels = [
tuple(
([col_fill] * col_level)
+ list(label)
+ ([col_fill] * (column_depth - (len(label) + col_level)))
)
for label in new_column_labels
]
internal = self._internal.copy(
index_spark_columns=index_spark_columns,
index_names=index_names,
index_dtypes=index_dtypes,
column_labels=new_column_labels + self._internal.column_labels,
data_spark_columns=new_data_spark_columns + self._internal.data_spark_columns,
data_dtypes=new_data_dtypes + self._internal.data_dtypes,
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def isnull(self) -> "DataFrame":
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
DataFrame.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
return self._apply_series_op(lambda kser: kser.isnull())
isna = isnull
def notnull(self) -> "DataFrame":
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
DataFrame.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
return self._apply_series_op(lambda kser: kser.notnull())
notna = notnull
def insert(
self,
loc: int,
column,
value: Union[Scalar, "Series", Iterable],
allow_duplicates: bool = False,
) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
Examples
--------
>>> kdf = ks.DataFrame([1, 2, 3])
>>> kdf.sort_index()
0
0 1
1 2
2 3
>>> kdf.insert(0, 'x', 4)
>>> kdf.sort_index()
x 0
0 4 1
1 4 2
2 4 3
>>> from databricks.koalas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> kdf.insert(1, 'y', [5, 6, 7])
>>> kdf.sort_index()
x y 0
0 4 5 1
1 4 6 2
2 4 7 3
>>> kdf.insert(2, 'z', ks.Series([8, 9, 10]))
>>> kdf.sort_index()
x y z 0
0 4 5 8 1
1 4 6 9 2
2 4 7 10 3
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(loc, int):
raise TypeError("loc must be int")
assert 0 <= loc <= len(self.columns)
assert allow_duplicates is False
if not is_name_like_value(column):
raise ValueError(
'"column" should be a scalar value or tuple that contains scalar values'
)
if is_name_like_tuple(column):
if len(column) != len(self.columns.levels):
# To be consistent with pandas
raise ValueError('"column" must have length equal to number of column levels.')
if column in self.columns:
raise ValueError("cannot insert %s, already exists" % column)
kdf = self.copy()
kdf[column] = value
columns = kdf.columns[:-1].insert(loc, kdf.columns[-1])
kdf = kdf[columns]
self._update_internal_frame(kdf._internal)
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None) -> "DataFrame":
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
return self._apply_series_op(
lambda kser: kser._shift(periods, fill_value), should_resolve=True
)
# TODO: axis should support 1 or 'columns' either at this moment
def diff(self, periods: int = 1, axis: Union[int, str] = 0) -> "DataFrame":
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
return self._apply_series_op(lambda kser: kser._diff(periods), should_resolve=True)
# TODO: axis should support 1 or 'columns' either at this moment
def nunique(
self,
axis: Union[int, str] = 0,
dropna: bool = True,
approx: bool = False,
rsd: float = 0.05,
) -> "Series":
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a Koalas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
dtype: int64
"""
from databricks.koalas.series import first_series
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(
[F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)]
+ [
self._kser_for(label)._nunique(dropna, approx, rsd)
for label in self._internal.column_labels
]
)
# The data is expected to be small so it's fine to transpose/use default index.
with ks.option_context("compute.max_rows", 1):
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
index_names=[None],
index_dtypes=[None],
data_spark_columns=[
scol_for(sdf, col) for col in self._internal.data_spark_column_names
],
data_dtypes=None,
)
return first_series(DataFrame(internal).transpose())
def round(self, decimals=0) -> "DataFrame":
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
.. note:: If `decimals` is a Series, it is expected to be small,
as all the data is loaded into the driver's memory.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ks.Series):
decimals = {
k if isinstance(k, tuple) else (k,): v
for k, v in decimals._to_internal_pandas().items()
}
elif isinstance(decimals, dict):
decimals = {k if is_name_like_tuple(k) else (k,): v for k, v in decimals.items()}
elif isinstance(decimals, int):
decimals = {k: decimals for k in self._internal.column_labels}
else:
raise ValueError("decimals must be an integer, a dict-like or a Series")
def op(kser):
label = kser._column_label
if label in decimals:
return F.round(kser.spark.column, decimals[label]).alias(
kser._internal.data_spark_column_names[0]
)
else:
return kser
return self._apply_series_op(op)
def _mark_duplicates(self, subset=None, keep="first"):
if subset is None:
subset = self._internal.column_labels
else:
if is_name_like_tuple(subset):
subset = [subset]
elif is_name_like_value(subset):
subset = [(subset,)]
else:
subset = [sub if is_name_like_tuple(sub) else (sub,) for sub in subset]
diff = set(subset).difference(set(self._internal.column_labels))
if len(diff) > 0:
raise KeyError(", ".join([name_like_string(d) for d in diff]))
group_cols = [self._internal.spark_column_name_for(label) for label in subset]
sdf = self._internal.resolved_copy.spark_frame
column = verify_temp_column_name(sdf, "__duplicated__")
if keep == "first" or keep == "last":
if keep == "first":
ord_func = spark.functions.asc
else:
ord_func = spark.functions.desc
window = (
Window.partitionBy(group_cols)
.orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME))
.rowsBetween(Window.unboundedPreceding, Window.currentRow)
)
sdf = sdf.withColumn(column, F.row_number().over(window) > 1)
elif not keep:
window = Window.partitionBy(group_cols).rowsBetween(
Window.unboundedPreceding, Window.unboundedFollowing
)
sdf = sdf.withColumn(column, F.count("*").over(window) > 1)
else:
raise ValueError("'keep' only supports 'first', 'last' and False")
return sdf, column
def duplicated(self, subset=None, keep="first") -> "Series":
"""
Return boolean Series denoting duplicate rows, optionally only considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},
... columns = ['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 1 1 1
2 1 1 1
3 3 4 5
>>> df.duplicated().sort_index()
0 False
1 True
2 True
3 False
dtype: bool
Mark duplicates as ``True`` except for the last occurrence.
>>> df.duplicated(keep='last').sort_index()
0 True
1 True
2 False
3 False
dtype: bool
Mark all duplicates as ``True``.
>>> df.duplicated(keep=False).sort_index()
0 True
1 True
2 True
3 False
dtype: bool
"""
from databricks.koalas.series import first_series
sdf, column = self._mark_duplicates(subset, keep)
sdf = sdf.select(
self._internal.index_spark_columns
+ [scol_for(sdf, column).alias(SPARK_DEFAULT_SERIES_NAME)]
)
return first_series(
DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=self._internal.index_names,
index_dtypes=self._internal.index_dtypes,
column_labels=[None], # type: ignore
data_spark_columns=[scol_for(sdf, SPARK_DEFAULT_SERIES_NAME)],
)
)
)
# TODO: support other as DataFrame or array-like
def dot(self, other: "Series") -> "Series":
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series
It can also be called using ``self @ other`` in Python >= 3.5.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from databricks.koalas.config import option_context
>>> with option_context(
... 'compute.max_rows', 1000, "compute.ops_on_diff_frames", True
... ): # doctest: +NORMALIZE_WHITESPACE
... kdf = ks.DataFrame({'a': range(1001)})
... kser = ks.Series([2], index=['a'])
... kdf.dot(kser)
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Parameters
----------
other : Series
The other object to compute the matrix product with.
Returns
-------
Series
Return the matrix product between self and other as a Series.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
>>> from databricks.koalas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
>>> kdf = ks.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> kser = ks.Series([1, 1, 2, 1])
>>> kdf.dot(kser)
0 -4
1 5
dtype: int64
Note how shuffling of the objects does not change the result.
>>> kser2 = kser.reindex([1, 0, 2, 3])
>>> kdf.dot(kser2)
0 -4
1 5
dtype: int64
>>> kdf @ kser2
0 -4
1 5
dtype: int64
>>> reset_option("compute.ops_on_diff_frames")
"""
if not isinstance(other, ks.Series):
raise TypeError("Unsupported type {}".format(type(other).__name__))
else:
return cast(ks.Series, other.dot(self.transpose())).rename(None)
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def to_koalas(self, index_col: Optional[Union[str, List[str]]] = None) -> "DataFrame":
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
col1 col2
0 1 3
1 2 4
We can specify the index columns.
>>> kdf = spark_df.to_koalas(index_col='col1')
>>> kdf # doctest: +NORMALIZE_WHITESPACE
col2
col1
1 3
2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
assert isinstance(self, spark.DataFrame), type(self)
from databricks.koalas.namespace import _get_index_map
index_spark_columns, index_names = _get_index_map(self, index_col)
internal = InternalFrame(
spark_frame=self, index_spark_columns=index_spark_columns, index_names=index_names
)
return DataFrame(internal)
def cache(self) -> "CachedDataFrame":
warnings.warn(
"DataFrame.cache is deprecated as of DataFrame.spark.cache. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.cache()
cache.__doc__ = SparkFrameMethods.cache.__doc__
def persist(self, storage_level=StorageLevel.MEMORY_AND_DISK) -> "CachedDataFrame":
warnings.warn(
"DataFrame.persist is deprecated as of DataFrame.spark.persist. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.persist(storage_level)
persist.__doc__ = SparkFrameMethods.persist.__doc__
def hint(self, name: str, *parameters) -> "DataFrame":
warnings.warn(
"DataFrame.hint is deprecated as of DataFrame.spark.hint. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.hint(name, *parameters)
hint.__doc__ = SparkFrameMethods.hint.__doc__
def to_table(
self,
name: str,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
return self.spark.to_table(name, format, mode, partition_cols, index_col, **options)
to_table.__doc__ = SparkFrameMethods.to_table.__doc__
def to_delta(
self,
path: str,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default
'overwrite'. Specifies the behavior of the save operation when the destination
exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2012-01-01"')
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self.spark.to_spark_io(
path=path,
mode=mode,
format="delta",
partition_cols=partition_cols,
index_col=index_col,
**options,
)
def to_parquet(
self,
path: str,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
compression: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
builder = self.to_spark(index_col=index_col).write.mode(mode)
if partition_cols is not None:
builder.partitionBy(partition_cols)
builder._set_opts(compression=compression)
builder.options(**options).format("parquet").save(path)
def to_orc(
self,
path: str,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
"""
Write the DataFrame out as a ORC file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'},
default 'overwrite'. Specifies the behavior of the save operation when the
destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_orc
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_orc('%s/to_orc/foo.orc' % path, partition_cols='date')
>>> df.to_orc(
... '%s/to_orc/foo.orc' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
self.spark.to_spark_io(
path=path,
mode=mode,
format="orc",
partition_cols=partition_cols,
index_col=index_col,
**options,
)
def to_spark_io(
self,
path: Optional[str] = None,
format: Optional[str] = None,
mode: str = "overwrite",
partition_cols: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options
) -> None:
return self.spark.to_spark_io(path, format, mode, partition_cols, index_col, **options)
to_spark_io.__doc__ = SparkFrameMethods.to_spark_io.__doc__
def to_spark(self, index_col: Optional[Union[str, List[str]]] = None) -> SparkDataFrame:
return self.spark.frame(index_col)
to_spark.__doc__ = SparkFrameMethods.__doc__
def to_pandas(self) -> pd.DataFrame:
"""
Return a pandas DataFrame.
.. note:: This method should only be used if the resulting pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.to_pandas_frame.copy()
# Alias to maintain backward compatibility with Spark
def toPandas(self) -> pd.DataFrame:
warnings.warn(
"DataFrame.toPandas is deprecated as of DataFrame.to_pandas. "
"Please use the API instead.",
FutureWarning,
)
return self.to_pandas()
toPandas.__doc__ = to_pandas.__doc__
def assign(self, **kwargs) -> "DataFrame":
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable, Series or Index}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15,
... temp_idx=df.index)
>>> assigned[['temp_c', 'temp_f', 'temp_k', 'temp_idx']]
temp_c temp_f temp_k temp_idx
Portland 17.0 62.6 290.15 Portland
Berkeley 25.0 77.0 298.15 Berkeley
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
return self._assign(kwargs)
def _assign(self, kwargs):
assert isinstance(kwargs, dict)
from databricks.koalas.indexes import MultiIndex
from databricks.koalas.series import IndexOpsMixin
for k, v in kwargs.items():
is_invalid_assignee = (
not (isinstance(v, (IndexOpsMixin, spark.Column)) or callable(v) or is_scalar(v))
) or isinstance(v, MultiIndex)
if is_invalid_assignee:
raise TypeError(
"Column assignment doesn't support type " "{0}".format(type(v).__name__)
)
if callable(v):
kwargs[k] = v(self)
pairs = {
(k if is_name_like_tuple(k) else (k,)): (
(v.spark.column, v.dtype)
if isinstance(v, IndexOpsMixin) and not isinstance(v, MultiIndex)
else (v, None)
if isinstance(v, spark.Column)
else (F.lit(v), None)
)
for k, v in kwargs.items()
}
scols = []
data_dtypes = []
for label in self._internal.column_labels:
for i in range(len(label)):
if label[: len(label) - i] in pairs:
scol, dtype = pairs[label[: len(label) - i]]
scol = scol.alias(self._internal.spark_column_name_for(label))
break
else:
scol = self._internal.spark_column_for(label)
dtype = self._internal.dtype_for(label)
scols.append(scol)
data_dtypes.append(dtype)
column_labels = self._internal.column_labels.copy()
for label, (scol, dtype) in pairs.items():
if label not in set(i[: len(label)] for i in self._internal.column_labels):
scols.append(scol.alias(name_like_string(label)))
column_labels.append(label)
data_dtypes.append(dtype)
level = self._internal.column_labels_level
column_labels = [
tuple(list(label) + ([""] * (level - len(label)))) for label in column_labels
]
internal = self._internal.with_new_columns(
scols, column_labels=column_labels, data_dtypes=data_dtypes
)
return DataFrame(internal)
@staticmethod
def from_records(
data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None,
exclude: list = None,
columns: list = None,
coerce_float: bool = False,
nrows: int = None,
) -> "DataFrame":
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(
pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows)
)
def to_records(self, index=True, column_dtypes=None, index_dtypes=None) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args
)
def copy(self, deep=None) -> "DataFrame":
"""
Make a copy of this object's indices and data.
Parameters
----------
deep : None
this parameter is not supported but just dummy parameter to match pandas.
Returns
-------
copy : DataFrame
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df_copy = df.copy()
>>> df_copy
x y z w
0 1 3 5 7
1 2 4 6 8
"""
return DataFrame(self._internal)
def dropna(
self, axis=0, how="any", thresh=None, subset=None, inplace=False
) -> Optional["DataFrame"]:
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
axis = validate_axis(axis)
inplace = validate_bool_kwarg(inplace, "inplace")
if thresh is None:
if how is None:
raise TypeError("must specify how or thresh")
elif how not in ("any", "all"):
raise ValueError("invalid how option: {h}".format(h=how))
if subset is not None:
if isinstance(subset, str):
labels = [(subset,)] # type: Optional[List[Tuple]]
elif isinstance(subset, tuple):
labels = [subset]
else:
labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
else:
labels = None
if axis == 0:
if labels is not None:
invalids = [label for label in labels if label not in self._internal.column_labels]
if len(invalids) > 0:
raise KeyError(invalids)
else:
labels = self._internal.column_labels
cnt = reduce(
lambda x, y: x + y,
[
F.when(self._kser_for(label).notna().spark.column, 1).otherwise(0)
for label in labels
],
F.lit(0),
)
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == "any":
pred = cnt == F.lit(len(labels))
elif how == "all":
pred = cnt > F.lit(0)
internal = self._internal.with_filter(pred)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
else:
assert axis == 1
internal = self._internal.resolved_copy
if labels is not None:
if any(len(lbl) != internal.index_level for lbl in labels):
raise ValueError(
"The length of each subset must be the same as the index size."
)
cond = reduce(
lambda x, y: x | y,
[
reduce(
lambda x, y: x & y,
[
scol == F.lit(l)
for l, scol in zip(lbl, internal.index_spark_columns)
],
)
for lbl in labels
],
)
internal = internal.with_filter(cond)
null_counts = []
for label in internal.column_labels:
scol = internal.spark_column_for(label)
if isinstance(internal.spark_type_for(label), (FloatType, DoubleType)):
cond = scol.isNull() | F.isnan(scol)
else:
cond = scol.isNull()
null_counts.append(
F.sum(F.when(~cond, 1).otherwise(0)).alias(name_like_string(label))
)
counts = internal.spark_frame.select(null_counts + [F.count("*")]).head()
if thresh is not None:
column_labels = [
label
for label, cnt in zip(internal.column_labels, counts)
if (cnt or 0) >= int(thresh)
]
elif how == "any":
column_labels = [
label
for label, cnt in zip(internal.column_labels, counts)
if (cnt or 0) == counts[-1]
]
elif how == "all":
column_labels = [
label for label, cnt in zip(internal.column_labels, counts) if (cnt or 0) > 0
]
kdf = self[column_labels]
if inplace:
self._update_internal_frame(kdf._internal)
return None
else:
return kdf
# TODO: add 'limit' when value parameter exists
def fillna(
self, value=None, method=None, axis=None, inplace=False, limit=None
) -> Optional["DataFrame"]:
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if value is not None:
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value).__name__)
if limit is not None:
raise ValueError("limit parameter for value is not support now")
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v).__name__)
value = {k if is_name_like_tuple(k) else (k,): v for k, v in value.items()}
def op(kser):
label = kser._column_label
for k, v in value.items():
if k == label[: len(k)]:
return kser._fillna(
value=value[k], method=method, axis=axis, limit=limit
)
else:
return kser
else:
op = lambda kser: kser._fillna(value=value, method=method, axis=axis, limit=limit)
elif method is not None:
op = lambda kser: kser._fillna(value=value, method=method, axis=axis, limit=limit)
else:
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
kdf = self._apply_series_op(op, should_resolve=(method is not None))
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._update_internal_frame(kdf._internal, requires_same_anchor=False)
return None
else:
return kdf
def replace(
self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method="pad",
) -> Optional["DataFrame"]:
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, list, tuple or dict
Value to be replaced.
value : int, float, string, list or tuple
Value to use to replace holes. The replacement value must be an int, float,
or string.
If value is a list or tuple, value should be of the same length with to_replace.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Dicts can be used to specify different replacement values for different existing values
To use a dict in this way the value parameter should be None
>>> df.replace({'Mjolnir': 'Stormbuster'})
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict can specify that different values should be replaced in different columns
The value parameter should not be None in this case
>>> df.replace({'weapon': 'Mjolnir'}, 'Stormbuster')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Nested dictionaries
The value parameter should be None to use a nested dict in this way
>>> df.replace({'weapon': {'Mjolnir': 'Stormbuster'}})
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
"""
if method != "pad":
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
inplace = validate_bool_kwarg(inplace, "inplace")
if value is not None and not isinstance(value, (int, float, str, list, tuple, dict)):
raise TypeError("Unsupported type {}".format(type(value).__name__))
if to_replace is not None and not isinstance(
to_replace, (int, float, str, list, tuple, dict)
):
raise TypeError("Unsupported type {}".format(type(to_replace).__name__))
if isinstance(value, (list, tuple)) and isinstance(to_replace, (list, tuple)):
if len(value) != len(to_replace):
raise ValueError("Length of to_replace and value must be same")
if isinstance(to_replace, dict) and (
value is not None or all(isinstance(i, dict) for i in to_replace.values())
):
def op(kser):
if kser.name in to_replace:
return kser.replace(to_replace=to_replace[kser.name], value=value, regex=regex)
else:
return kser
else:
op = lambda kser: kser.replace(to_replace=to_replace, value=value, regex=regex)
kdf = self._apply_series_op(op)
if inplace:
self._update_internal_frame(kdf._internal)
return None
else:
return kdf
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "DataFrame":
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError(
"List-like value are not supported for 'lower' and 'upper' at the " + "moment"
)
if lower is None and upper is None:
return self
return self._apply_series_op(lambda kser: kser.clip(lower=lower, upper=upper))
def head(self, n: int = 5) -> "DataFrame":
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
if n < 0:
n = len(self) + n
if n <= 0:
return DataFrame(self._internal.with_filter(F.lit(False)))
else:
sdf = self._internal.resolved_copy.spark_frame
if get_option("compute.ordered_head"):
sdf = sdf.orderBy(NATURAL_ORDER_COLUMN_NAME)
return DataFrame(self._internal.with_new_sdf(sdf.limit(n)))
def last(self, offset: Union[str, DateOffset]) -> "DataFrame":
"""
Select final periods of time series data based on a date offset.
When having a DataFrame with dates as index, this function can
select the last few rows based on a date offset.
Parameters
----------
offset : str or DateOffset
The offset length of the data that will be selected. For instance,
'3D' will display all the rows having their index within the last 3 days.
Returns
-------
DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
Examples
--------
>>> index = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> kdf = ks.DataFrame({'A': [1, 2, 3, 4]}, index=index)
>>> kdf
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> kdf.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calendar days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
# Check index type should be format DateTime
from databricks.koalas.indexes import DatetimeIndex
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex")
offset = to_offset(offset)
from_date = self.index.max() - offset
return cast(DataFrame, self.loc[from_date:])
def pivot_table(
self, values=None, index=None, columns=None, aggfunc="mean", fill_value=None
) -> "DataFrame":
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list less than three or a string.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4.0 5
two 7.0 6
foo one 4.0 1
two NaN 6
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values=['D'], index =['C'],
... columns="A", aggfunc={'D': 'mean'})
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
D
A bar foo
C
large 5.5 2.000000
small 5.5 2.333333
The next example aggregates on multiple values.
>>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'],
... aggfunc={'D': 'mean', 'E': 'sum'})
>>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE
D E
A bar foo bar foo
C
large 5.5 2.000000 15 9
small 5.5 2.333333 17 13
"""
if not is_name_like_value(columns):
raise ValueError("columns should be one column name.")
if not is_name_like_value(values) and not (
isinstance(values, list) and all(is_name_like_value(v) for v in values)
):
raise ValueError("values should be one column or list of columns.")
if not isinstance(aggfunc, str) and (
not isinstance(aggfunc, dict)
or not all(
is_name_like_value(key) and isinstance(value, str) for key, value in aggfunc.items()
)
):
raise ValueError(
"aggfunc must be a dict mapping from column name "
"to aggregate functions (string)."
)
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError(
"pivot_table doesn't support aggfunc" " as dict and without index."
)
if isinstance(values, list) and index is None:
raise NotImplementedError("values can't be a list without index.")
if columns not in self.columns:
raise ValueError("Wrong columns {}.".format(name_like_string(columns)))
if not is_name_like_tuple(columns):
columns = (columns,)
if isinstance(values, list):
values = [col if is_name_like_tuple(col) else (col,) for col in values]
if not all(
isinstance(self._internal.spark_type_for(col), NumericType) for col in values
):
raise TypeError("values should be a numeric type.")
else:
values = values if is_name_like_tuple(values) else (values,)
if not isinstance(self._internal.spark_type_for(values), NumericType):
raise TypeError("values should be a numeric type.")
if isinstance(aggfunc, str):
if isinstance(values, list):
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(
self._internal.spark_column_name_for(value), aggfunc
)
)
for value in values
]
else:
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(
self._internal.spark_column_name_for(values), aggfunc
)
)
]
elif isinstance(aggfunc, dict):
aggfunc = {
key if is_name_like_tuple(key) else (key,): value for key, value in aggfunc.items()
}
agg_cols = [
F.expr(
"{1}(`{0}`) as `{0}`".format(self._internal.spark_column_name_for(key), value)
)
for key, value in aggfunc.items()
]
agg_columns = [key for key, _ in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
sdf = self._internal.resolved_copy.spark_frame
if index is None:
sdf = (
sdf.groupBy()
.pivot(pivot_col=self._internal.spark_column_name_for(columns))
.agg(*agg_cols)
)
elif isinstance(index, list):
index = [label if is_name_like_tuple(label) else (label,) for label in index]
sdf = (
sdf.groupBy([self._internal.spark_column_name_for(label) for label in index])
.pivot(pivot_col=self._internal.spark_column_name_for(columns))
.agg(*agg_cols)
)
else:
raise ValueError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
index_columns = [self._internal.spark_column_name_for(label) for label in index]
index_dtypes = [self._internal.dtype_for(label) for label in index]
if isinstance(values, list):
data_columns = [column for column in sdf.columns if column not in index_columns]
if len(values) > 1:
# If we have two values, Spark will return column's name
# in this format: column_values, where column contains
# their values in the DataFrame and values is
# the column list passed to the pivot_table().
# E.g. if column is b and values is ['b','e'],
# then ['2_b', '2_e', '3_b', '3_e'].
# We sort the columns of Spark DataFrame by values.
data_columns.sort(key=lambda x: x.split("_", 1)[1])
sdf = sdf.select(index_columns + data_columns)
column_name_to_index = dict(
zip(self._internal.data_spark_column_names, self._internal.column_labels)
)
column_labels = [
tuple(list(column_name_to_index[name.split("_")[1]]) + [name.split("_")[0]])
for name in data_columns
]
column_label_names = ([None] * column_labels_level(values)) + [columns]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_dtypes=index_dtypes,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names, # type: ignore
)
kdf = DataFrame(internal) # type: "DataFrame"
else:
column_labels = [tuple(list(values[0]) + [column]) for column in data_columns]
column_label_names = ([None] * len(values[0])) + [columns]
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_dtypes=index_dtypes,
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names, # type: ignore
)
kdf = DataFrame(internal)
else:
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_columns],
index_names=index,
index_dtypes=index_dtypes,
column_label_names=[columns],
)
kdf = DataFrame(internal)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
index_map = OrderedDict() # type: Dict[str, Optional[Tuple]]
for i, index_value in enumerate(index_values):
colname = SPARK_INDEX_NAME_FORMAT(i)
sdf = sdf.withColumn(colname, F.lit(index_value))
index_map[colname] = None
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_map.keys()],
index_names=list(index_map.values()),
column_label_names=[columns],
)
kdf = DataFrame(internal)
kdf_columns = kdf.columns
if isinstance(kdf_columns, pd.MultiIndex):
kdf.columns = kdf_columns.set_levels(
kdf_columns.levels[-1].astype(
spark_type_to_pandas_dtype(self._kser_for(columns).spark.data_type)
),
level=-1,
)
else:
kdf.columns = kdf_columns.astype(
spark_type_to_pandas_dtype(self._kser_for(columns).spark.data_type)
)
return kdf
def pivot(self, index=None, columns=None, values=None) -> "DataFrame":
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE
bar A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
Koalas' pivot still works with its first value it meets during operation because pivot
is an expensive operation and it is preferred to permissively execute over failing fast
when processing large data.
>>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
It also support multi-index and multi-index column.
>>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')])
>>> df = df.set_index(('a', 'bar'), append=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
foo baz
(a, bar)
0 A one 1
1 A one 2
2 B two 3
3 C two 4
>>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
('a', 'foo') one two
(a, bar)
0 A 1.0 NaN
1 A 2.0 NaN
2 B NaN 3.0
3 C NaN 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
df = self
index = [index]
else:
# The index after `reset_index()` will never be used, so use "distributed" index
# as a dummy to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
df = self.reset_index()
index = df._internal.column_labels[: self._internal.index_level]
df = df.pivot_table(index=index, columns=columns, values=values, aggfunc="first")
if should_use_existing_index:
return df
else:
internal = df._internal.copy(index_names=self._internal.index_names)
return DataFrame(internal)
@property
def columns(self) -> pd.Index:
"""The column labels of the DataFrame."""
names = [
name if name is None or len(name) > 1 else name[0]
for name in self._internal.column_label_names
]
if self._internal.column_labels_level > 1:
columns = pd.MultiIndex.from_tuples(self._internal.column_labels, names=names)
else:
columns = pd.Index([label[0] for label in self._internal.column_labels], name=names[0])
return columns
@columns.setter
def columns(self, columns) -> None:
if isinstance(columns, pd.MultiIndex):
column_labels = columns.tolist()
else:
column_labels = [
col if is_name_like_tuple(col, allow_none=False) else (col,) for col in columns
]
if len(self._internal.column_labels) != len(column_labels):
raise ValueError(
"Length mismatch: Expected axis has {} elements, "
"new values have {} elements".format(
len(self._internal.column_labels), len(column_labels)
)
)
if isinstance(columns, pd.Index):
column_label_names = [
name if is_name_like_tuple(name) else (name,) for name in columns.names
] # type: Optional[List]
else:
column_label_names = None
ksers = [
self._kser_for(label).rename(name)
for label, name in zip(self._internal.column_labels, column_labels)
]
self._update_internal_frame(
self._internal.with_new_columns(ksers, column_label_names=column_label_names)
)
@property
def dtypes(self) -> pd.Series:
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series(
[self._kser_for(label).dtype for label in self._internal.column_labels],
index=pd.Index(
[label if len(label) > 1 else label[0] for label in self._internal.column_labels]
),
)
def spark_schema(self, index_col: Optional[Union[str, List[str]]] = None) -> StructType:
warnings.warn(
"DataFrame.spark_schema is deprecated as of DataFrame.spark.schema. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.schema(index_col)
spark_schema.__doc__ = SparkFrameMethods.schema.__doc__
def print_schema(self, index_col: Optional[Union[str, List[str]]] = None) -> None:
warnings.warn(
"DataFrame.print_schema is deprecated as of DataFrame.spark.print_schema. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.print_schema(index_col)
print_schema.__doc__ = SparkFrameMethods.print_schema.__doc__
def select_dtypes(self, include=None, exclude=None) -> "DataFrame":
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
ValueError: include and exclude overlap on {'a'}
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError("at least one of include or exclude must be " "nonempty")
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError(
"include and exclude overlap on {inc_ex}".format(
inc_ex=set(include).intersection(set(exclude))
)
)
# Handle Spark types
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
column_labels = []
for label in self._internal.column_labels:
if len(include) > 0:
should_include = (
infer_dtype_from_object(self._kser_for(label).dtype.name) in include_numpy_type
or self._internal.spark_type_for(label) in include_spark_type
)
else:
should_include = not (
infer_dtype_from_object(self._kser_for(label).dtype.name) in exclude_numpy_type
or self._internal.spark_type_for(label) in exclude_spark_type
)
if should_include:
column_labels.append(label)
return DataFrame(
self._internal.with_new_columns([self._kser_for(label) for label in column_labels])
)
def droplevel(self, level, axis=0) -> "DataFrame":
"""
Return DataFrame with requested index / column level(s) removed.
Parameters
----------
level: int, str, or list-like
If a string is given, must be the name of a level If list-like, elements must
be names or positional indexes of levels.
axis: {0 or ‘index’, 1 or ‘columns’}, default 0
Returns
-------
DataFrame with requested index / column level(s) removed.
Examples
--------
>>> df = ks.DataFrame(
... [[3, 4], [7, 8], [11, 12]],
... index=pd.MultiIndex.from_tuples([(1, 2), (5, 6), (9, 10)], names=["a", "b"]),
... )
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df # doctest: +NORMALIZE_WHITESPACE
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a') # doctest: +NORMALIZE_WHITESPACE
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level_2', axis=1) # doctest: +NORMALIZE_WHITESPACE
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
axis = validate_axis(axis)
if axis == 0:
if not isinstance(level, (tuple, list)): # huh?
level = [level]
index_names = self.index.names
nlevels = self._internal.index_level
int_level = set()
for n in level:
if isinstance(n, int):
if n < 0:
n = n + nlevels
if n < 0:
raise IndexError(
"Too many levels: Index has only {} levels, "
"{} is not a valid level number".format(nlevels, (n - nlevels))
)
if n >= nlevels:
raise IndexError(
"Too many levels: Index has only {} levels, not {}".format(
nlevels, (n + 1)
)
)
else:
if n not in index_names:
raise KeyError("Level {} not found".format(n))
n = index_names.index(n)
int_level.add(n)
if len(level) >= nlevels:
raise ValueError(
"Cannot remove {} levels from an index with {} levels: "
"at least one level must be left.".format(len(level), nlevels)
)
index_spark_columns, index_names, index_dtypes = zip(
*[
item
for i, item in enumerate(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_dtypes,
)
)
if i not in int_level
]
)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_dtypes=list(index_dtypes),
)
return DataFrame(internal)
else:
kdf = self.copy()
kdf.columns = kdf.columns.droplevel(level)
return kdf
def drop(
self, labels=None, axis=1, columns: Union[Any, Tuple, List[Any], List[Tuple]] = None
) -> "DataFrame":
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Also support for MultiIndex
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE
b
z w
0 5 7
1 6 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if is_name_like_tuple(columns):
columns = [columns]
elif is_name_like_value(columns):
columns = [(columns,)]
else:
columns = [col if is_name_like_tuple(col) else (col,) for col in columns]
drop_column_labels = set(
label
for label in self._internal.column_labels
for col in columns
if label[: len(col)] == col
)
if len(drop_column_labels) == 0:
raise KeyError(columns)
cols, labels = zip(
*(
(column, label)
for column, label in zip(
self._internal.data_spark_column_names, self._internal.column_labels
)
if label not in drop_column_labels
)
)
internal = self._internal.with_new_columns([self._kser_for(label) for label in labels])
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def _sort(
self, by: List[Column], ascending: Union[bool, List[bool]], inplace: bool, na_position: str
):
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError(
"Length of ascending ({}) != length of by ({})".format(len(ascending), len(by))
)
if na_position not in ("first", "last"):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, "first"): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, "last"): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, "first"): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, "last"): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]
sdf = self._internal.resolved_copy.spark_frame.sort(*(by + [NATURAL_ORDER_COLUMN_NAME]))
kdf = DataFrame(self._internal.with_new_sdf(sdf)) # type: DataFrame
if inplace:
self._update_internal_frame(kdf._internal)
return None
else:
return kdf
def sort_values(
self,
by: Union[Any, List[Any], Tuple, List[Tuple]],
ascending: Union[bool, List[bool]] = True,
inplace: bool = False,
na_position: str = "last",
) -> Optional["DataFrame"]:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_name_like_value(by):
by = [by]
else:
assert is_list_like(by), type(by)
new_by = []
for colname in by:
ser = self[colname]
if not isinstance(ser, ks.Series):
raise ValueError(
"The column %s is not unique. For a multi-index, the label must be a tuple "
"with elements corresponding to each level." % name_like_string(colname)
)
new_by.append(ser.spark.column)
return self._sort(by=new_by, ascending=ascending, inplace=inplace, na_position=na_position)
def sort_index(
self,
axis: int = 0,
level: Optional[Union[int, List[int]]] = None,
ascending: bool = True,
inplace: bool = False,
kind: str = None,
na_position: str = "last",
) -> Optional["DataFrame"]:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError("No other axis than 0 are supported at the moment")
if kind is not None:
raise NotImplementedError(
"Specifying the sorting algorithm is not supported at the moment."
)
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_spark_columns
elif is_list_like(level):
by = [self._internal.index_spark_columns[l] for l in level] # type: ignore
else:
by = [self._internal.index_spark_columns[level]] # type: ignore
return self._sort(by=by, ascending=ascending, inplace=inplace, na_position=na_position)
def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame":
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise.
Returns
-------
DataFrame
DataFrame with levels swapped in MultiIndex.
Examples
--------
>>> midx = pd.MultiIndex.from_arrays(
... [['red', 'blue'], [1, 2], ['s', 'm']], names = ['color', 'number', 'size'])
>>> midx # doctest: +SKIP
MultiIndex([( 'red', 1, 's'),
('blue', 2, 'm')],
names=['color', 'number', 'size'])
Swap levels in a MultiIndex on index.
>>> kdf = ks.DataFrame({'x': [5, 6], 'y':[5, 6]}, index=midx)
>>> kdf # doctest: +NORMALIZE_WHITESPACE
x y
color number size
red 1 s 5 5
blue 2 m 6 6
>>> kdf.swaplevel() # doctest: +NORMALIZE_WHITESPACE
x y
color size number
red s 1 5 5
blue m 2 6 6
>>> kdf.swaplevel(0, 1) # doctest: +NORMALIZE_WHITESPACE
x y
number color size
1 red s 5 5
2 blue m 6 6
>>> kdf.swaplevel('number', 'size') # doctest: +NORMALIZE_WHITESPACE
x y
color size number
red s 1 5 5
blue m 2 6 6
Swap levels in a MultiIndex on columns.
>>> kdf = ks.DataFrame({'x': [5, 6], 'y':[5, 6]})
>>> kdf.columns = midx
>>> kdf
color red blue
number 1 2
size s m
0 5 5
1 6 6
>>> kdf.swaplevel(axis=1)
color red blue
size s m
number 1 2
0 5 5
1 6 6
>>> kdf.swaplevel(axis=1)
color red blue
size s m
number 1 2
0 5 5
1 6 6
>>> kdf.swaplevel(0, 1, axis=1)
number 1 2
color red blue
size s m
0 5 5
1 6 6
>>> kdf.swaplevel('number', 'color', axis=1)
number 1 2
color red blue
size s m
0 5 5
1 6 6
"""
axis = validate_axis(axis)
if axis == 0:
internal = self._swaplevel_index(i, j)
else:
assert axis == 1
internal = self._swaplevel_columns(i, j)
return DataFrame(internal)
def swapaxes(self, i: Union[str, int], j: Union[str, int], copy: bool = True) -> "DataFrame":
"""
Interchange axes and swap values axes appropriately.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from databricks.koalas.config import option_context
>>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE
... ks.DataFrame({'a': range(1001)}).swapaxes(i=0, j=1)
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Parameters
----------
i: {0 or 'index', 1 or 'columns'}. The axis to swap.
j: {0 or 'index', 1 or 'columns'}. The axis to swap.
copy : bool, default True.
Returns
-------
DataFrame
Examples
--------
>>> kdf = ks.DataFrame(
... [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['x', 'y', 'z'], columns=['a', 'b', 'c']
... )
>>> kdf
a b c
x 1 2 3
y 4 5 6
z 7 8 9
>>> kdf.swapaxes(i=1, j=0)
x y z
a 1 4 7
b 2 5 8
c 3 6 9
>>> kdf.swapaxes(i=1, j=1)
a b c
x 1 2 3
y 4 5 6
z 7 8 9
"""
assert copy is True
i = validate_axis(i)
j = validate_axis(j)
return self.copy() if i == j else self.transpose()
def _swaplevel_columns(self, i, j) -> InternalFrame:
assert isinstance(self.columns, pd.MultiIndex)
for index in (i, j):
if not isinstance(index, int) and index not in self.columns.names:
raise KeyError("Level %s not found" % index)
i = i if isinstance(i, int) else self.columns.names.index(i)
j = j if isinstance(j, int) else self.columns.names.index(j)
for index in (i, j):
if index >= len(self.columns) or index < -len(self.columns):
raise IndexError(
"Too many levels: Columns have only %s levels, "
"%s is not a valid level number" % (self._internal.index_level, index)
)
column_label_names = self._internal.column_label_names.copy()
column_label_names[i], column_label_names[j], = (
column_label_names[j],
column_label_names[i],
)
column_labels = self._internal._column_labels
column_label_list = [list(label) for label in column_labels]
for label_list in column_label_list:
label_list[i], label_list[j] = label_list[j], label_list[i]
column_labels = [tuple(x) for x in column_label_list]
internal = self._internal.copy(
column_label_names=list(column_label_names), column_labels=list(column_labels)
)
return internal
def _swaplevel_index(self, i, j) -> InternalFrame:
assert isinstance(self.index, ks.MultiIndex)
for index in (i, j):
if not isinstance(index, int) and index not in self.index.names:
raise KeyError("Level %s not found" % index)
i = i if isinstance(i, int) else self.index.names.index(i)
j = j if isinstance(j, int) else self.index.names.index(j)
for index in (i, j):
if index >= self._internal.index_level or index < -self._internal.index_level:
raise IndexError(
"Too many levels: Index has only %s levels, "
"%s is not a valid level number" % (self._internal.index_level, index)
)
index_map = list(
zip(
self._internal.index_spark_columns,
self._internal.index_names,
self._internal.index_dtypes,
)
)
index_map[i], index_map[j], = index_map[j], index_map[i]
index_spark_columns, index_names, index_dtypes = zip(*index_map)
internal = self._internal.copy(
index_spark_columns=list(index_spark_columns),
index_names=list(index_names),
index_dtypes=list(index_dtypes),
)
return internal
# TODO: add keep = First
def nlargest(self, n: int, columns: "Any") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
return self.sort_values(by=columns, ascending=False).head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: "Any") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
return self.sort_values(by=columns, ascending=True).head(n=n)
def isin(self, values) -> "DataFrame":
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns))
)
data_spark_columns = []
if isinstance(values, dict):
for i, col in enumerate(self.columns):
if col in values:
item = values[col]
item = item.tolist() if isinstance(item, np.ndarray) else list(item)
data_spark_columns.append(
self._internal.spark_column_for(self._internal.column_labels[i])
.isin(item)
.alias(self._internal.data_spark_column_names[i])
)
else:
data_spark_columns.append(
F.lit(False).alias(self._internal.data_spark_column_names[i])
)
elif is_list_like(values):
values = values.tolist() if isinstance(values, np.ndarray) else list(values)
data_spark_columns += [
self._internal.spark_column_for(label)
.isin(values)
.alias(self._internal.spark_column_name_for(label))
for label in self._internal.column_labels
]
else:
raise TypeError("Values should be iterable, Series, DataFrame or dict.")
return DataFrame(self._internal.with_new_columns(data_spark_columns))
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(
self,
right: "DataFrame",
how: str = "inner",
on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None,
left_on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None,
right_on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None,
left_index: bool = False,
right_index: bool = False,
suffixes: Tuple[str, str] = ("_x", "_y"),
) -> "DataFrame":
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; not preserve
key order unlike pandas.
right: use only keys from right frame, similar to a SQL right outer join; not preserve
key order unlike pandas.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
not preserve the order of the left keys unlike pandas.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
DataFrame.join : Join columns of another DataFrame.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
def to_list(os: Optional[Union[Any, List[Any], Tuple, List[Tuple]]]) -> List[Tuple]:
if os is None:
return []
elif is_name_like_tuple(os):
return [os] # type: ignore
elif is_name_like_value(os):
return [(os,)]
else:
return [o if is_name_like_tuple(o) else (o,) for o in os]
if isinstance(right, ks.Series):
right = right.to_frame()
if on:
if left_on or right_on:
raise ValueError(
'Can only pass argument "on" OR "left_on" and "right_on", '
"not a combination of both."
)
left_key_names = list(map(self._internal.spark_column_name_for, to_list(on)))
right_key_names = list(map(right._internal.spark_column_name_for, to_list(on)))
else:
# TODO: need special handling for multi-index.
if left_index:
left_key_names = self._internal.index_spark_column_names
else:
left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on)))
if right_index:
right_key_names = right._internal.index_spark_column_names
else:
right_key_names = list(
map(right._internal.spark_column_name_for, to_list(right_on))
)
if left_key_names and not right_key_names:
raise ValueError("Must pass right_on or right_index=True")
if right_key_names and not left_key_names:
raise ValueError("Must pass left_on or left_index=True")
if not left_key_names and not right_key_names:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
"No common columns to perform merge on. Merge options: "
"left_on=None, right_on=None, left_index=False, right_index=False"
)
left_key_names = list(map(self._internal.spark_column_name_for, to_list(common)))
right_key_names = list(map(right._internal.spark_column_name_for, to_list(common)))
if len(left_key_names) != len(right_key_names):
raise ValueError("len(left_keys) must equal len(right_keys)")
# We should distinguish the name to avoid ambiguous column name after merging.
right_prefix = "__right_"
right_key_names = [right_prefix + right_key_name for right_key_name in right_key_names]
how = validate_how(how)
def resolve(internal, side):
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = sdf.select(
[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
]
+ list(HIDDEN_COLUMNS)
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
)
left_internal = self._internal.resolved_copy
right_internal = resolve(right._internal, "right")
left_table = left_internal.spark_frame.alias("left_table")
right_table = right_internal.spark_frame.alias("right_table")
left_key_columns = [scol_for(left_table, label) for label in left_key_names]
right_key_columns = [scol_for(right_table, label) for label in right_key_names]
join_condition = reduce(
lambda x, y: x & y,
[lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)],
)
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels)
exprs = []
data_columns = []
column_labels = []
left_scol_for = lambda label: scol_for(
left_table, left_internal.spark_column_name_for(label)
)
right_scol_for = lambda label: scol_for(
right_table, right_internal.spark_column_name_for(label)
)
for label in left_internal.column_labels:
col = left_internal.spark_column_name_for(label)
scol = left_scol_for(label)
if label in duplicate_columns:
spark_column_name = left_internal.spark_column_name_for(label)
if (
spark_column_name in left_key_names
and (right_prefix + spark_column_name) in right_key_names
):
right_scol = right_scol_for(label)
if how == "right":
scol = right_scol.alias(col)
elif how == "full":
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
label = tuple([str(label[0]) + left_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
for label in right_internal.column_labels:
# recover `right_prefix` here.
col = right_internal.spark_column_name_for(label)[len(right_prefix) :]
scol = right_scol_for(label).alias(col)
if label in duplicate_columns:
spark_column_name = left_internal.spark_column_name_for(label)
if (
spark_column_name in left_key_names
and (right_prefix + spark_column_name) in right_key_names
):
continue
else:
col = col + right_suffix
scol = scol.alias(col)
label = tuple([str(label[0]) + right_suffix] + list(label[1:]))
exprs.append(scol)
data_columns.append(col)
column_labels.append(label)
left_index_scols = left_internal.index_spark_columns
right_index_scols = right_internal.index_spark_columns
# Retain indices if they are used for joining
if left_index:
if right_index:
if how in ("inner", "left"):
exprs.extend(left_index_scols)
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
elif how == "right":
exprs.extend(right_index_scols)
index_spark_column_names = right_internal.index_spark_column_names
index_names = right_internal.index_names
else:
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
for col, left_scol, right_scol in zip(
index_spark_column_names, left_index_scols, right_index_scols
):
scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol)
exprs.append(scol.alias(col))
else:
exprs.extend(right_index_scols)
index_spark_column_names = right_internal.index_spark_column_names
index_names = right_internal.index_names
elif right_index:
exprs.extend(left_index_scols)
index_spark_column_names = left_internal.index_spark_column_names
index_names = left_internal.index_names
else:
index_spark_column_names = []
index_names = []
selected_columns = joined_table.select(*exprs)
internal = InternalFrame(
spark_frame=selected_columns,
index_spark_columns=[
scol_for(selected_columns, col) for col in index_spark_column_names
],
index_names=index_names,
column_labels=column_labels,
data_spark_columns=[scol_for(selected_columns, col) for col in data_columns],
)
return DataFrame(internal)
def join(
self,
right: "DataFrame",
on: Optional[Union[Any, List[Any], Tuple, List[Tuple]]] = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
) -> "DataFrame":
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> kdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> kdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right')
>>> join_kdf.sort_values(by=join_kdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key'))
>>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method not preserve the
original DataFrame’s index in the result unlike pandas.
>>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key')
>>> join_kdf.index
Int64Index([0, 1, 2, 3], dtype='int64')
"""
if isinstance(right, ks.Series):
common = list(self.columns.intersection([right.name]))
else:
common = list(self.columns.intersection(right.columns))
if len(common) > 0 and not lsuffix and not rsuffix:
raise ValueError(
"columns overlap but no suffix specified: " "{rename}".format(rename=common)
)
need_set_index = False
if on:
if not is_list_like(on):
on = [on] # type: ignore
if len(on) != right._internal.index_level:
raise ValueError(
'len(left_on) must equal the number of levels in the index of "right"'
)
need_set_index = len(set(on) & set(self.index.names)) == 0
if need_set_index:
self = self.set_index(on)
join_kdf = self.merge(
right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix)
)
return join_kdf.reset_index() if need_set_index else join_kdf
def append(
self,
other: "DataFrame",
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> "DataFrame":
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise NotImplementedError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_spark_columns
if len(index_scols) != other._internal.index_level:
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (
self._internal.spark_frame.select(index_scols)
.intersect(
other._internal.spark_frame.select(other._internal.index_spark_columns)
)
.count()
) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return cast(DataFrame, concat([self, other], ignore_index=ignore_index))
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: "DataFrame", join: str = "left", overwrite: bool = True) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
DataFrame.join : Join columns of another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
broadcast : Marks a DataFrame as small enough for use in broadcast joins.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df.sort_index()
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df.sort_index()
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != "left":
raise NotImplementedError("Only left join is supported")
if isinstance(other, ks.Series):
other = other.to_frame()
update_columns = list(
set(self._internal.column_labels).intersection(set(other._internal.column_labels))
)
update_sdf = self.join(
other[update_columns], rsuffix="_new"
)._internal.resolved_copy.spark_frame
data_dtypes = self._internal.data_dtypes.copy()
for column_labels in update_columns:
column_name = self._internal.spark_column_name_for(column_labels)
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(
update_sdf, other._internal.spark_column_name_for(column_labels) + "_new"
)
if overwrite:
update_sdf = update_sdf.withColumn(
column_name, F.when(new_col.isNull(), old_col).otherwise(new_col)
)
else:
update_sdf = update_sdf.withColumn(
column_name, F.when(old_col.isNull(), new_col).otherwise(old_col)
)
data_dtypes[self._internal.column_labels.index(column_labels)] = None # TODO: dtype?
sdf = update_sdf.select(
[scol_for(update_sdf, col) for col in self._internal.spark_column_names]
+ list(HIDDEN_COLUMNS)
)
internal = self._internal.with_new_sdf(sdf, data_dtypes=data_dtypes)
self._update_internal_frame(internal, requires_same_anchor=False)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
random_state: Optional[int] = None,
) -> "DataFrame":
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifying the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError(
"Function sample currently does not support specifying "
"exact number of items to return. Use frac instead."
)
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._internal.resolved_copy.spark_frame.sample(
withReplacement=replace, fraction=frac, seed=random_state
)
return DataFrame(self._internal.with_new_sdf(sdf))
def astype(self, dtype) -> "DataFrame":
"""
Cast a Koalas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire Koalas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
applied = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
for col_name, col in self.items():
if col_name in dtype:
applied.append(col.astype(dtype=dtype[col_name]))
else:
applied.append(col)
else:
for col_name, col in self.items():
applied.append(col.astype(dtype=dtype))
return DataFrame(self._internal.with_new_columns(applied))
def add_prefix(self, prefix) -> "DataFrame":
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
return self._apply_series_op(
lambda kser: kser.rename(tuple([prefix + i for i in kser._column_label]))
)
def add_suffix(self, suffix) -> "DataFrame":
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
return self._apply_series_op(
lambda kser: kser.rename(tuple([i + suffix for i in kser._column_label]))
)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> "DataFrame":
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
DataFrame
Summary statistics of the Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
For multi-index columns:
>>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')]
>>> df.describe() # doctest: +NORMALIZE_WHITESPACE
num
a b
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
>>> df[('num', 'b')].describe()
count 3.0
mean 5.0
std 1.0
min 4.0
25% 4.0
50% 5.0
75% 6.0
max 6.0
Name: (num, b), dtype: float64
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
column_labels = []
for label in self._internal.column_labels:
scol = self._internal.spark_column_for(label)
spark_type = self._internal.spark_type_for(label)
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(
F.nanvl(scol, F.lit(None)).alias(self._internal.spark_column_name_for(label))
)
column_labels.append(label)
elif isinstance(spark_type, NumericType):
exprs.append(scol)
column_labels.append(label)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._internal.spark_frame.select(*exprs).summary(stats)
sdf = sdf.replace("stddev", "std", subset="summary")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, "summary")],
column_labels=column_labels,
data_spark_columns=[
scol_for(sdf, self._internal.spark_column_name_for(label))
for label in column_labels
],
)
return DataFrame(internal).astype("float64")
def drop_duplicates(self, subset=None, keep="first", inplace=False) -> Optional["DataFrame"]:
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy.
Returns
-------
DataFrame
DataFrame with duplicates removed or None if ``inplace=True``.
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_index()
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_index()
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_index()
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates(keep='last').sort_index()
a b
0 1 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates(keep=False).sort_index()
a b
0 1 a
3 2 c
4 3 d
"""
inplace = validate_bool_kwarg(inplace, "inplace")
sdf, column = self._mark_duplicates(subset, keep)
sdf = sdf.where(~scol_for(sdf, column)).drop(column)
internal = self._internal.with_new_sdf(sdf)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def reindex(
self,
labels: Optional[Any] = None,
index: Optional[Any] = None,
columns: Optional[Any] = None,
axis: Optional[Union[int, str]] = None,
copy: Optional[bool] = True,
fill_value: Optional[Any] = None,
) -> "DataFrame":
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ks.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
http_status user_agent
Chrome 200 NaN
Firefox 200 NaN
IE10 404 NaN
Konqueror 301 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
http_status user_agent
Chrome 200 NaN
Firefox 200 NaN
IE10 404 NaN
Konqueror 301 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index()
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index()
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
axis = validate_axis(axis)
if axis == 0:
index = labels
elif axis == 1:
columns = labels
else:
raise ValueError(
"No axis named %s for object type %s." % (axis, type(axis).__name__)
)
if index is not None and not is_list_like(index):
raise TypeError(
"Index must be called with a collection of some kind, "
"%s was passed" % type(index)
)
if columns is not None and not is_list_like(columns):
raise TypeError(
"Columns must be called with a collection of some kind, "
"%s was passed" % type(columns)
)
df = self
if index is not None:
df = df._reindex_index(index, fill_value)
if columns is not None:
df = df._reindex_columns(columns, fill_value)
# Copy
if copy and df is self:
return df.copy()
else:
return df
def _reindex_index(self, index, fill_value):
# When axis is index, we can mimic pandas' by a right outer join.
nlevels = self._internal.index_level
assert nlevels <= 1 or (
isinstance(index, ks.MultiIndex) and nlevels == index.nlevels
), "MultiIndex DataFrame can only be reindexed with a similar Koalas MultiIndex."
index_columns = self._internal.index_spark_column_names
frame = self._internal.resolved_copy.spark_frame.drop(NATURAL_ORDER_COLUMN_NAME)
if isinstance(index, ks.Index):
if nlevels != index.nlevels:
return DataFrame(index._internal.with_new_columns([])).reindex(
columns=self.columns, fill_value=fill_value
)
index_names = index._internal.index_names
scols = index._internal.index_spark_columns
labels = index._internal.spark_frame.select(
[scol.alias(index_column) for scol, index_column in zip(scols, index_columns)]
)
else:
kser = ks.Series(list(index))
labels = kser._internal.spark_frame.select(kser.spark.column.alias(index_columns[0]))
index_names = self._internal.index_names
if fill_value is not None:
frame_index_columns = [
verify_temp_column_name(frame, "__frame_index_column_{}__".format(i))
for i in range(nlevels)
]
index_scols = [
scol_for(frame, index_col).alias(frame_index_col)
for index_col, frame_index_col in zip(index_columns, frame_index_columns)
]
scols = self._internal.resolved_copy.data_spark_columns
frame = frame.select(index_scols + scols)
temp_fill_value = verify_temp_column_name(frame, "__fill_value__")
labels = labels.withColumn(temp_fill_value, F.lit(fill_value))
frame_index_scols = [scol_for(frame, col) for col in frame_index_columns]
labels_index_scols = [scol_for(labels, col) for col in index_columns]
joined_df = frame.join(
labels,
on=[fcol == lcol for fcol, lcol in zip(frame_index_scols, labels_index_scols)],
how="right",
)
joined_df = joined_df.select(
*labels_index_scols,
*[
F.when(
reduce(
lambda c1, c2: c1 & c2,
[
fcol.isNull() & lcol.isNotNull()
for fcol, lcol in zip(frame_index_scols, labels_index_scols)
],
),
scol_for(joined_df, temp_fill_value),
)
.otherwise(scol_for(joined_df, col))
.alias(col)
for col in self._internal.data_spark_column_names
],
)
else:
joined_df = frame.join(labels, on=index_columns, how="right")
sdf = joined_df.drop(NATURAL_ORDER_COLUMN_NAME)
internal = self._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in self._internal.index_spark_column_names
],
index_names=index_names,
index_dtypes=None, # TODO: dtypes?
data_spark_columns=[
scol_for(sdf, col) for col in self._internal.data_spark_column_names
],
)
return DataFrame(internal)
def _reindex_columns(self, columns, fill_value):
level = self._internal.column_labels_level
if level > 1:
label_columns = list(columns)
for col in label_columns:
if not isinstance(col, tuple):
raise TypeError("Expected tuple, got {}".format(type(col).__name__))
else:
label_columns = [(col,) for col in columns]
for col in label_columns:
if len(col) != level:
raise ValueError(
"shape (1,{}) doesn't match the shape (1,{})".format(len(col), level)
)
fill_value = np.nan if fill_value is None else fill_value
scols_or_ksers, labels = [], []
for label in label_columns:
if label in self._internal.column_labels:
scols_or_ksers.append(self._kser_for(label))
else:
scols_or_ksers.append(F.lit(fill_value).alias(name_like_string(label)))
labels.append(label)
if isinstance(columns, pd.Index):
column_label_names = [
name if is_name_like_tuple(name) else (name,) for name in columns.names
]
internal = self._internal.with_new_columns(
scols_or_ksers, column_labels=labels, column_label_names=column_label_names
)
else:
internal = self._internal.with_new_columns(scols_or_ksers, column_labels=labels)
return DataFrame(internal)
def reindex_like(self, other: "DataFrame", copy: bool = True) -> "DataFrame":
"""
Return a DataFrame with matching indices as other object.
Conform the object to the same index on all axes. Places NA/NaN in locations
having no value in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : DataFrame
Its row and column indices are used to define the new indices
of this object.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
Returns
-------
DataFrame
DataFrame with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = ks.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = ks.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN None
2014-02-15 35.1 NaN medium
"""
if isinstance(other, DataFrame):
return self.reindex(index=other.index, columns=other.columns, copy=copy)
else:
raise TypeError("other must be a Koalas DataFrame")
def melt(self, id_vars=None, value_vars=None, var_name=None, value_name="value") -> "DataFrame":
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column. If None it uses `frame.columns.name` or
‘variable’.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ks.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> df.melt(value_vars='A')
variable value
0 A a
1 A b
2 A c
>>> ks.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ks.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
column_labels = self._internal.column_labels
if id_vars is None:
id_vars = []
else:
if isinstance(id_vars, tuple):
if self._internal.column_labels_level == 1:
id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]
else:
raise ValueError(
"id_vars must be a list of tuples" " when columns are a MultiIndex"
)
elif is_name_like_value(id_vars):
id_vars = [(id_vars,)]
else:
id_vars = [idv if is_name_like_tuple(idv) else (idv,) for idv in id_vars]
non_existence_col = [idv for idv in id_vars if idv not in column_labels]
if len(non_existence_col) != 0:
raveled_column_labels = np.ravel(column_labels)
missing = [
nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels
]
if len(missing) != 0:
raise KeyError(
"The following 'id_vars' are not present"
" in the DataFrame: {}".format(missing)
)
else:
raise KeyError(
"None of {} are in the {}".format(non_existence_col, column_labels)
)
if value_vars is None:
value_vars = []
else:
if isinstance(value_vars, tuple):
if self._internal.column_labels_level == 1:
value_vars = [
valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars
]
else:
raise ValueError(
"value_vars must be a list of tuples" " when columns are a MultiIndex"
)
elif is_name_like_value(value_vars):
value_vars = [(value_vars,)]
else:
value_vars = [valv if is_name_like_tuple(valv) else (valv,) for valv in value_vars]
non_existence_col = [valv for valv in value_vars if valv not in column_labels]
if len(non_existence_col) != 0:
raveled_column_labels = np.ravel(column_labels)
missing = [
nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels
]
if len(missing) != 0:
raise KeyError(
"The following 'value_vars' are not present"
" in the DataFrame: {}".format(missing)
)
else:
raise KeyError(
"None of {} are in the {}".format(non_existence_col, column_labels)
)
if len(value_vars) == 0:
value_vars = column_labels
column_labels = [label for label in column_labels if label not in id_vars]
sdf = self._internal.spark_frame
if var_name is None:
if (
self._internal.column_labels_level == 1
and self._internal.column_label_names[0] is None
):
var_name = ["variable"]
else:
var_name = [
name_like_string(name) if name is not None else "variable_{}".format(i)
for i, name in enumerate(self._internal.column_label_names)
]
elif isinstance(var_name, str):
var_name = [var_name]
pairs = F.explode(
F.array(
*[
F.struct(
*(
[F.lit(c).alias(name) for c, name in zip(label, var_name)]
+ [self._internal.spark_column_for(label).alias(value_name)]
)
)
for label in column_labels
if label in value_vars
]
)
)
columns = (
[
self._internal.spark_column_for(label).alias(name_like_string(label))
for label in id_vars
]
+ [F.col("pairs.`%s`" % name) for name in var_name]
+ [F.col("pairs.`%s`" % value_name)]
)
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(
InternalFrame(
spark_frame=exploded_df,
index_spark_columns=None,
column_labels=(
[label if len(label) == 1 else (name_like_string(label),) for label in id_vars]
+ [(name,) for name in var_name]
+ [(value_name,)]
),
)
)
def stack(self) -> Union["DataFrame", "Series"]:
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = ks.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack().sort_index()
cat height 1
weight 0
dog height 3
weight 2
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = ks.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack().sort_index()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = ks.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
"""
from databricks.koalas.series import first_series
if len(self._internal.column_labels) == 0:
return DataFrame(
self._internal.copy(
column_label_names=self._internal.column_label_names[:-1]
).with_filter(F.lit(False))
)
column_labels = defaultdict(dict) # type: Union[defaultdict, OrderedDict]
index_values = set()
should_returns_series = False
for label in self._internal.column_labels:
new_label = label[:-1]
if len(new_label) == 0:
new_label = None
should_returns_series = True
value = label[-1]
scol = self._internal.spark_column_for(label)
column_labels[new_label][value] = scol
index_values.add(value)
column_labels = OrderedDict(sorted(column_labels.items(), key=lambda x: x[0]))
index_name = self._internal.column_label_names[-1]
column_label_names = self._internal.column_label_names[:-1]
if len(column_label_names) == 0:
column_label_names = [None]
index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level)
data_columns = [name_like_string(label) for label in column_labels]
structs = [
F.struct(
[F.lit(value).alias(index_column)]
+ [
(
column_labels[label][value]
if value in column_labels[label]
else F.lit(None)
).alias(name)
for label, name in zip(column_labels, data_columns)
]
).alias(value)
for value in index_values
]
pairs = F.explode(F.array(structs))
sdf = self._internal.spark_frame.withColumn("pairs", pairs)
sdf = sdf.select(
self._internal.index_spark_columns
+ [sdf["pairs"][index_column].alias(index_column)]
+ [sdf["pairs"][name].alias(name) for name in data_columns]
)
internal = InternalFrame( # TODO: dtypes?
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col)
for col in (self._internal.index_spark_column_names + [index_column])
],
index_names=self._internal.index_names + [index_name],
index_dtypes=self._internal.index_dtypes + [None],
column_labels=list(column_labels),
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
column_label_names=column_label_names, # type: ignore
)
kdf = DataFrame(internal) # type: "DataFrame"
if should_returns_series:
return first_series(kdf)
else:
return kdf
def unstack(self) -> Union["DataFrame", "Series"]:
"""
Pivot the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series.
.. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and
it could cause a serious performance degradation since Spark partitions it row based.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack).
Examples
--------
>>> df = ks.DataFrame({"A": {"0": "a", "1": "b", "2": "c"},
... "B": {"0": "1", "1": "3", "2": "5"},
... "C": {"0": "2", "1": "4", "2": "6"}},
... columns=["A", "B", "C"])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> df.unstack().sort_index()
A 0 a
1 b
2 c
B 0 1
1 3
2 5
C 0 2
1 4
2 6
dtype: object
>>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')])
>>> df.unstack().sort_index()
X A 0 a
1 b
2 c
B 0 1
1 3
2 5
Y C 0 2
1 4
2 6
dtype: object
For MultiIndex case:
>>> df = ks.DataFrame({"A": ["a", "b", "c"],
... "B": [1, 3, 5],
... "C": [2, 4, 6]},
... columns=["A", "B", "C"])
>>> df = df.set_index('A', append=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
B C
A
0 a 1 2
1 b 3 4
2 c 5 6
>>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A a b c a b c
0 1.0 NaN NaN 2.0 NaN NaN
1 NaN 3.0 NaN NaN 4.0 NaN
2 NaN NaN 5.0 NaN NaN 6.0
"""
from databricks.koalas.series import first_series
if self._internal.index_level > 1:
# The index after `reset_index()` will never be used, so use "distributed" index
# as a dummy to avoid overhead.
with option_context("compute.default_index_type", "distributed"):
df = self.reset_index()
index = df._internal.column_labels[: self._internal.index_level - 1]
columns = df.columns[self._internal.index_level - 1]
df = df.pivot_table(
index=index, columns=columns, values=self._internal.column_labels, aggfunc="first"
)
internal = df._internal.copy(
index_names=self._internal.index_names[:-1],
index_dtypes=self._internal.index_dtypes[:-1],
column_label_names=(
df._internal.column_label_names[:-1]
+ [
None
if self._internal.index_names[-1] is None
else df._internal.column_label_names[-1]
]
),
)
return DataFrame(internal)
# TODO: Codes here are similar with melt. Should we deduplicate?
column_labels = self._internal.column_labels
ser_name = SPARK_DEFAULT_SERIES_NAME
sdf = self._internal.spark_frame
new_index_columns = [
SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)
]
new_index_map = list(zip(new_index_columns, self._internal.column_label_names))
pairs = F.explode(
F.array(
*[
F.struct(
*(
[F.lit(c).alias(name) for c, name in zip(idx, new_index_columns)]
+ [self._internal.spark_column_for(idx).alias(ser_name)]
)
)
for idx in column_labels
]
)
)
columns = [
F.col("pairs.%s" % name)
for name in new_index_columns[: self._internal.column_labels_level]
] + [F.col("pairs.%s" % ser_name)]
new_index_len = len(new_index_columns)
existing_index_columns = []
for i, index_name in enumerate(self._internal.index_names):
new_index_map.append((SPARK_INDEX_NAME_FORMAT(i + new_index_len), index_name))
existing_index_columns.append(
self._internal.index_spark_columns[i].alias(
SPARK_INDEX_NAME_FORMAT(i + new_index_len)
)
)
exploded_df = sdf.withColumn("pairs", pairs).select(existing_index_columns + columns)
index_spark_column_names, index_names = zip(*new_index_map)
return first_series(
DataFrame(
InternalFrame( # TODO: dtypes?
exploded_df,
index_spark_columns=[
scol_for(exploded_df, col) for col in index_spark_column_names
],
index_names=list(index_names),
column_labels=[None],
)
)
)
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> "Series":
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Returns
-------
Series
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
dtype: bool
"""
from databricks.koalas.series import first_series
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
applied = []
column_labels = self._internal.column_labels
for label in column_labels:
scol = self._internal.spark_column_for(label)
all_col = F.min(F.coalesce(scol.cast("boolean"), F.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.any, Series.quantile. Maybe we should deduplicate it.
value_column = "value"
cols = []
for label, applied_col in zip(column_labels, applied):
cols.append(
F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)]
+ [applied_col.alias(value_column)]
)
)
sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select(
F.explode(F.col("arrays"))
)
sdf = sdf.selectExpr("col.*")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i))
for i in range(self._internal.column_labels_level)
],
index_names=self._internal.column_label_names,
column_labels=[None],
data_spark_columns=[scol_for(sdf, value_column)],
)
return first_series(DataFrame(internal))
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> "Series":
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Returns
-------
Series
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
dtype: bool
"""
from databricks.koalas.series import first_series
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
applied = []
column_labels = self._internal.column_labels
for label in column_labels:
scol = self._internal.spark_column_for(label)
all_col = F.max(F.coalesce(scol.cast("boolean"), F.lit(False)))
applied.append(F.when(all_col.isNull(), False).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.all, Series.quantile. Maybe we should deduplicate it.
value_column = "value"
cols = []
for label, applied_col in zip(column_labels, applied):
cols.append(
F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)]
+ [applied_col.alias(value_column)]
)
)
sdf = self._internal.spark_frame.select(F.array(*cols).alias("arrays")).select(
F.explode(F.col("arrays"))
)
sdf = sdf.selectExpr("col.*")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i))
for i in range(self._internal.column_labels_level)
],
index_names=self._internal.column_label_names,
column_labels=[None],
data_spark_columns=[scol_for(sdf, value_column)],
)
return first_series(DataFrame(internal))
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method="average", ascending=True) -> "DataFrame":
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
return self._apply_series_op(
lambda kser: kser._rank(method=method, ascending=ascending), should_resolve=True
)
def filter(self, items=None, like=None, regex=None, axis=None) -> "DataFrame":
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
For a Series,
>>> # select rows by name
>>> df.one.filter(items=['rabbit'])
rabbit 4
Name: one, dtype: int64
>>> # select rows by regular expression
>>> df.one.filter(regex='e$')
mouse 1
Name: one, dtype: int64
>>> # select rows containing 'bbi'
>>> df.one.filter(like='bbi')
rabbit 4
Name: one, dtype: int64
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive"
)
axis = validate_axis(axis, none_axis=1)
index_scols = self._internal.index_spark_columns
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis == 0:
if len(index_scols) == 1:
col = None
for item in items:
if col is None:
col = index_scols[0] == F.lit(item)
else:
col = col | (index_scols[0] == F.lit(item))
elif len(index_scols) > 1:
# for multi-index
col = None
for item in items:
if not isinstance(item, tuple):
raise TypeError("Unsupported type {}".format(type(item).__name__))
if not item:
raise ValueError("The item should not be empty.")
midx_col = None
for i, element in enumerate(item):
if midx_col is None:
midx_col = index_scols[i] == F.lit(element)
else:
midx_col = midx_col & (index_scols[i] == F.lit(element))
if col is None:
col = midx_col
else:
col = col | midx_col
else:
raise ValueError("Single or multi index must be specified.")
return DataFrame(self._internal.with_filter(col))
else:
return self[items]
elif like is not None:
if axis == 0:
col = None
for index_scol in index_scols:
if col is None:
col = index_scol.contains(like)
else:
col = col | index_scol.contains(like)
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
output_labels = [label for label in column_labels if any(like in i for i in label)]
return self[output_labels]
elif regex is not None:
if axis == 0:
col = None
for index_scol in index_scols:
if col is None:
col = index_scol.rlike(regex)
else:
col = col | index_scol.rlike(regex)
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
matcher = re.compile(regex)
output_labels = [
label
for label in column_labels
if any(matcher.search(i) is not None for i in label)
]
return self[output_labels]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def rename(
self,
mapper=None,
index=None,
columns=None,
axis="index",
inplace=False,
level=None,
errors="ignore",
) -> Optional["DataFrame"]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series
will be left as-is. Extra labels listed don’t throw an error.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to that axis’ values.
Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`
and `columns`.
index : dict-like or function
Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").
columns : dict-like or function
Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").
axis : int or str, default 'index'
Axis to target with mapper. Can be either the axis name ('index', 'columns') or
number (0, 1).
inplace : bool, default False
Whether to return a new DataFrame.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`
contains labels that are not present in the Index being transformed. If 'ignore',
existing keys will be renamed and extra keys will be ignored.
Returns
-------
DataFrame with the renamed axis labels.
Raises
------
`KeyError`
If any of the labels is not found in the selected axis and "errors='raise'".
Examples
--------
>>> kdf1 = ks.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> kdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE
a c
0 1 4
1 2 5
2 3 6
>>> kdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> def str_lower(s) -> str:
... return str.lower(s)
>>> kdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE
a b
0 1 4
1 2 5
2 3 6
>>> def mul10(x) -> int:
... return x * 10
>>> kdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])
>>> kdf2 = ks.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
>>> kdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE
x y
A B C D
0 1 2 3 4
1 5 6 7 8
>>> kdf3 = ks.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))
>>> kdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE
a b
x a 1 2
b 3 4
y c 5 6
d 7 8
"""
def gen_mapper_fn(mapper):
if isinstance(mapper, dict):
if len(mapper) == 0:
if errors == "raise":
raise KeyError("Index include label which is not in the `mapper`.")
else:
return DataFrame(self._internal)
type_set = set(map(lambda x: type(x), mapper.values()))
if len(type_set) > 1:
raise ValueError("Mapper dict should have the same value type.")
spark_return_type = as_spark_type(list(type_set)[0])
def mapper_fn(x):
if x in mapper:
return mapper[x]
else:
if errors == "raise":
raise KeyError("Index include value which is not in the `mapper`")
return x
elif callable(mapper):
spark_return_type = cast(ScalarType, infer_return_type(mapper)).spark_type
def mapper_fn(x):
return mapper(x)
else:
raise ValueError(
"`mapper` or `index` or `columns` should be "
"either dict-like or function type."
)
return mapper_fn, spark_return_type
index_mapper_fn = None
index_mapper_ret_stype = None
columns_mapper_fn = None
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper:
axis = validate_axis(axis)
if axis == 0:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(mapper)
elif axis == 1:
columns_mapper_fn, columns_mapper_ret_stype = gen_mapper_fn(mapper)
else:
raise ValueError(
"argument axis should be either the axis name "
"(‘index’, ‘columns’) or number (0, 1)"
)
else:
if index:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(index)
if columns:
columns_mapper_fn, _ = gen_mapper_fn(columns)
if not index and not columns:
raise ValueError("Either `index` or `columns` should be provided.")
kdf = self.copy()
if index_mapper_fn:
# rename index labels, if `level` is None, rename all index columns, otherwise only
# rename the corresponding level index.
# implement this by transform the underlying spark dataframe,
# Example:
# suppose the kdf index column in underlying spark dataframe is "index_0", "index_1",
# if rename level 0 index labels, will do:
# ``kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))``
# if rename all index labels (`level` is None), then will do:
# ```
# kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))
# .withColumn("index_1", mapper_fn_udf(col("index_1"))
# ```
index_columns = kdf._internal.index_spark_column_names
num_indices = len(index_columns)
if level:
if level < 0 or level >= num_indices:
raise ValueError("level should be an integer between [0, num_indices)")
def gen_new_index_column(level):
index_col_name = index_columns[level]
index_mapper_udf = pandas_udf(
lambda s: s.map(index_mapper_fn), returnType=index_mapper_ret_stype
)
return index_mapper_udf(scol_for(kdf._internal.spark_frame, index_col_name))
sdf = kdf._internal.resolved_copy.spark_frame
index_dtypes = self._internal.index_dtypes.copy()
if level is None:
for i in range(num_indices):
sdf = sdf.withColumn(index_columns[i], gen_new_index_column(i))
index_dtypes[i] = None # TODO: dtype?
else:
sdf = sdf.withColumn(index_columns[level], gen_new_index_column(level))
index_dtypes[level] = None # TODO: dtype?
kdf = DataFrame(kdf._internal.with_new_sdf(sdf, index_dtypes=index_dtypes))
if columns_mapper_fn:
# rename column name.
# Will modify the `_internal._column_labels` and transform underlying spark dataframe
# to the same column name with `_internal._column_labels`.
if level:
if level < 0 or level >= kdf._internal.column_labels_level:
raise ValueError("level should be an integer between [0, column_labels_level)")
def gen_new_column_labels_entry(column_labels_entry):
if isinstance(column_labels_entry, tuple):
if level is None:
# rename all level columns
return tuple(map(columns_mapper_fn, column_labels_entry))
else:
# only rename specified level column
entry_list = list(column_labels_entry)
entry_list[level] = columns_mapper_fn(entry_list[level])
return tuple(entry_list)
else:
return columns_mapper_fn(column_labels_entry)
new_column_labels = list(map(gen_new_column_labels_entry, kdf._internal.column_labels))
new_data_scols = [
kdf._kser_for(old_label).rename(new_label)
for old_label, new_label in zip(kdf._internal.column_labels, new_column_labels)
]
kdf = DataFrame(kdf._internal.with_new_columns(new_data_scols))
if inplace:
self._update_internal_frame(kdf._internal)
return None
else:
return kdf
def rename_axis(
self,
mapper: Optional[Any] = None,
index: Optional[Any] = None,
columns: Optional[Any] = None,
axis: Optional[Union[int, str]] = 0,
inplace: Optional[bool] = False,
) -> Optional["DataFrame"]:
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
A scalar, list-like, dict-like or functions transformations to
apply to the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
inplace : bool, default False
Modifies the object directly, instead of creating a new DataFrame.
Returns
-------
DataFrame, or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
The second calling convention will modify the names of the
corresponding index specified by axis.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
>>> df = ks.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... index=["dog", "cat", "monkey"],
... columns=["num_legs", "num_arms"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal").sort_index()
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
animal
cat 4 0
dog 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns").sort_index()
>>> df # doctest: +NORMALIZE_WHITESPACE
limbs num_legs num_arms
animal
cat 4 0
dog 4 0
monkey 2 2
**MultiIndex**
>>> index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df = ks.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... index=index,
... columns=["num_legs", "num_arms"])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
class name
mammal cat 4 0
dog 4 0
monkey 2 2
>>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
TYPE NAME
mammal cat 4 0
dog 4 0
monkey 2 2
"""
def gen_names(v, curnames):
if is_scalar(v):
newnames = [v]
elif is_list_like(v) and not is_dict_like(v):
newnames = list(v)
elif is_dict_like(v):
newnames = [v[name] if name in v else name for name in curnames]
elif callable(v):
newnames = [v(name) for name in curnames]
else:
raise ValueError(
"`mapper` or `index` or `columns` should be "
"either dict-like or function type."
)
if len(newnames) != len(curnames):
raise ValueError(
"Length of new names must be {}, got {}".format(len(curnames), len(newnames))
)
return [name if is_name_like_tuple(name) else (name,) for name in newnames]
if mapper is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'.")
if mapper is not None:
axis = validate_axis(axis)
if axis == 0:
index = mapper
elif axis == 1:
columns = mapper
column_label_names = (
gen_names(columns, self.columns.names)
if columns is not None
else self._internal.column_label_names
)
index_names = (
gen_names(index, self.index.names) if index is not None else self._internal.index_names
)
internal = self._internal.copy(
index_names=index_names, column_label_names=column_label_names
)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def keys(self) -> pd.Index:
"""
Return alias for columns.
Returns
-------
Index
Columns of the DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
>>> df.keys()
Index(['max_speed', 'shield'], dtype='object')
"""
return self.columns
def pct_change(self, periods=1) -> "DataFrame":
"""
Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
DataFrame
Examples
--------
Percentage change in French franc, Deutsche Mark, and Italian lira
from 1980-01-01 to 1980-03-01.
>>> df = ks.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
You can set periods to shift for forming percent change
>>> df.pct_change(2)
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 NaN NaN NaN
1980-03-01 0.067912 0.073814 0.06883
"""
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods)
def op(kser):
prev_row = F.lag(kser.spark.column, periods).over(window)
return ((kser.spark.column - prev_row) / prev_row).alias(
kser._internal.data_spark_column_names[0]
)
return self._apply_series_op(op, should_resolve=True)
# TODO: axis = 1
def idxmax(self, axis=0) -> "Series":
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with maximum value using `to_pandas()`
because we suppose the number of rows with max values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmax
Examples
--------
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> kdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> kdf.idxmax()
a 2
b 0
c 2
dtype: int64
For Multi-column Index
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> kdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> kdf.idxmax()
a x 2
b y 0
c z 2
dtype: int64
"""
max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns)
sdf_max = self._internal.spark_frame.select(*max_cols).head()
# `sdf_max` looks like below
# +------+------+------+
# |(a, x)|(b, y)|(c, z)|
# +------+------+------+
# | 3| 4.0| 400|
# +------+------+------+
conds = (
scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max)
)
cond = reduce(lambda x, y: x | y, conds)
kdf = DataFrame(self._internal.with_filter(cond)) # type: "DataFrame"
return cast(ks.Series, ks.from_pandas(kdf._to_internal_pandas().idxmax()))
# TODO: axis = 1
def idxmin(self, axis=0) -> "Series":
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with minimum value using `to_pandas()`
because we suppose the number of rows with min values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmin
Examples
--------
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> kdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> kdf.idxmin()
a 0
b 3
c 1
dtype: int64
For Multi-column Index
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> kdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> kdf.idxmin()
a x 0
b y 3
c z 1
dtype: int64
"""
min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns)
sdf_min = self._internal.spark_frame.select(*min_cols).head()
conds = (
scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min)
)
cond = reduce(lambda x, y: x | y, conds)
kdf = DataFrame(self._internal.with_filter(cond)) # type: "DataFrame"
return cast(ks.Series, ks.from_pandas(kdf._to_internal_pandas().idxmin()))
def info(self, verbose=None, buf=None, max_cols=None, null_counts=None) -> None:
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used.
null_counts : bool, optional
Whether to show the non-null counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = ks.DataFrame(
... {"int_col": int_values, "text_col": text_values, "float_col": float_values},
... columns=['int_col', 'text_col', 'float_col'])
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True) # doctest: +SKIP
<class 'databricks.koalas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False) # doctest: +SKIP
<class 'databricks.koalas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open('%s/info.txt' % path, "w",
... encoding="utf-8") as f:
... _ = f.write(s)
>>> with open('%s/info.txt' % path) as f:
... f.readlines() # doctest: +SKIP
["<class 'databricks.koalas.frame.DataFrame'>\\n",
'Index: 5 entries, 0 to 4\\n',
'Data columns (total 3 columns):\\n',
' # Column Non-Null Count Dtype \\n',
'--- ------ -------------- ----- \\n',
' 0 int_col 5 non-null int64 \\n',
' 1 text_col 5 non-null object \\n',
' 2 float_col 5 non-null float64\\n',
'dtypes: float64(1), int64(1), object(1)']
"""
# To avoid pandas' existing config affects Koalas.
# TODO: should we have corresponding Koalas configs?
with pd.option_context(
"display.max_info_columns", sys.maxsize, "display.max_info_rows", sys.maxsize
):
try:
# hack to use pandas' info as is.
object.__setattr__(self, "_data", self)
count_func = self.count
self.count = lambda: count_func().to_pandas() # type: ignore
return pd.DataFrame.info(
self,
verbose=verbose,
buf=buf,
max_cols=max_cols,
memory_usage=False,
null_counts=null_counts,
)
finally:
del self._data
self.count = count_func # type: ignore
# TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas'
def quantile(
self,
q: Union[float, Iterable[float]] = 0.5,
axis: Union[int, str] = 0,
numeric_only: bool = True,
accuracy: int = 10000,
) -> Union["DataFrame", "Series"]:
"""
Return value at the given quantile.
.. note:: Unlike pandas', the quantile in Koalas is an approximated quantile based upon
approximate percentile computation because computing quantile across a large dataset
is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
axis : int or str, default 0 or 'index'
Can only be set to 0 at the moment.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be computed as well.
Can only be set to True at the moment.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> kdf = ks.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})
>>> kdf
a b
0 1 6
1 2 7
2 3 8
3 4 9
4 5 0
>>> kdf.quantile(.5)
a 3.0
b 7.0
Name: 0.5, dtype: float64
>>> kdf.quantile([.25, .5, .75])
a b
0.25 2.0 6.0
0.50 3.0 7.0
0.75 4.0 8.0
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
if not isinstance(accuracy, int):
raise ValueError(
"accuracy must be an integer; however, got [%s]" % type(accuracy).__name__
)
if isinstance(q, Iterable):
q = list(q)
for v in q if isinstance(q, list) else [q]:
if not isinstance(v, float):
raise ValueError(
"q must be a float or an array of floats; however, [%s] found." % type(v)
)
if v < 0.0 or v > 1.0:
raise ValueError("percentiles should all be in the interval [0, 1].")
def quantile(spark_column, spark_type):
if isinstance(spark_type, (BooleanType, NumericType)):
return SF.percentile_approx(spark_column.cast(DoubleType()), q, accuracy)
else:
raise TypeError(
"Could not convert {} ({}) to numeric".format(
spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()
)
)
if isinstance(q, list):
# First calculate the percentiles from all columns and map it to each `quantiles`
# by creating each entry as a struct. So, it becomes an array of structs as below:
#
# +-----------------------------------------+
# | arrays|
# +-----------------------------------------+
# |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]|
# +-----------------------------------------+
percentile_cols = []
percentile_col_names = []
column_labels = []
for label, column in zip(
self._internal.column_labels, self._internal.data_spark_column_names
):
spark_type = self._internal.spark_type_for(label)
is_numeric_or_boolean = isinstance(spark_type, (NumericType, BooleanType))
keep_column = not numeric_only or is_numeric_or_boolean
if keep_column:
percentile_col = quantile(self._internal.spark_column_for(label), spark_type)
percentile_cols.append(percentile_col.alias(column))
percentile_col_names.append(column)
column_labels.append(label)
if len(percentile_cols) == 0:
return DataFrame(index=q)
sdf = self._internal.spark_frame.select(percentile_cols)
# Here, after select percentile cols, a spark_frame looks like below:
# +---------+---------+
# | a| b|
# +---------+---------+
# |[2, 3, 4]|[6, 7, 8]|
# +---------+---------+
cols_dict = OrderedDict() # type: OrderedDict
for column in percentile_col_names:
cols_dict[column] = list()
for i in range(len(q)):
cols_dict[column].append(scol_for(sdf, column).getItem(i).alias(column))
internal_index_column = SPARK_DEFAULT_INDEX_NAME
cols = []
for i, col in enumerate(zip(*cols_dict.values())):
cols.append(F.struct(F.lit(q[i]).alias(internal_index_column), *col))
sdf = sdf.select(F.array(*cols).alias("arrays"))
# And then, explode it and manually set the index.
# +-----------------+---+---+
# |__index_level_0__| a| b|
# +-----------------+---+---+
# | 0.25| 2| 6|
# | 0.5| 3| 7|
# | 0.75| 4| 8|
# +-----------------+---+---+
sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*")
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, internal_index_column)],
column_labels=column_labels,
data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names],
)
return DataFrame(internal)
else:
return self._reduce_for_stat_function(
quantile, name="quantile", numeric_only=numeric_only
).rename(q)
def query(self, expr, inplace=False) -> Optional["DataFrame"]:
"""
Query the columns of a DataFrame with a boolean expression.
.. note:: Internal columns that starting with a '__' prefix are able to access, however,
they are not supposed to be accessed.
.. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the
pandas specific syntax such as `@` is not supported. If you want the pandas syntax,
you can work around with :meth:`DataFrame.koalas.apply_batch`, but you should
be aware that `query_func` will be executed at different nodes in a distributed manner.
So, for example, to use `@` syntax, make sure the variable is serialized by, for
example, putting it within the closure as below.
>>> df = ks.DataFrame({'A': range(2000), 'B': range(2000)})
>>> def query_func(pdf):
... num = 1995
... return pdf.query('A > @num')
>>> df.koalas.apply_batch(query_func)
A B
1996 1996 1996
1997 1997 1997
1998 1998 1998
1999 1999 1999
Parameters
----------
expr : str
The query string to evaluate.
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
Examples
--------
>>> df = ks.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
if isinstance(self.columns, pd.MultiIndex):
raise ValueError("Doesn't support for MultiIndex columns")
if not isinstance(expr, str):
raise ValueError(
"expr must be a string to be evaluated, {} given".format(type(expr).__name__)
)
inplace = validate_bool_kwarg(inplace, "inplace")
data_columns = [label[0] for label in self._internal.column_labels]
sdf = self._internal.spark_frame.select(
self._internal.index_spark_columns
+ [
scol.alias(col)
for scol, col in zip(self._internal.data_spark_columns, data_columns)
]
).filter(expr)
internal = self._internal.with_new_sdf(sdf, data_columns=data_columns)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal)
def explain(self, extended: Optional[bool] = None, mode: Optional[str] = None) -> None:
warnings.warn(
"DataFrame.explain is deprecated as of DataFrame.spark.explain. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.explain(extended, mode)
explain.__doc__ = SparkFrameMethods.explain.__doc__
def take(self, indices, axis=0, **kwargs) -> "DataFrame":
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3]).sort_index()
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2]).sort_index()
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
axis = validate_axis(axis)
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise ValueError("`indices` must be a list-like except dict or set")
if axis == 0:
return cast(DataFrame, self.iloc[indices, :])
else:
return cast(DataFrame, self.iloc[:, indices])
def eval(self, expr, inplace=False) -> Optional[Union["DataFrame", "Series"]]:
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
Returns
-------
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Examples
--------
>>> df = ks.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from databricks.koalas.series import first_series
if isinstance(self.columns, pd.MultiIndex):
raise ValueError("`eval` is not supported for multi-index columns")
inplace = validate_bool_kwarg(inplace, "inplace")
should_return_series = False
series_name = None
should_return_scalar = False
# Since `eval_func` doesn't have a type hint, inferring the schema is always preformed
# in the `apply_batch`. Hence, the variables `should_return_series`, `series_name`,
# and `should_return_scalar` can be updated.
def eval_func(pdf):
nonlocal should_return_series
nonlocal series_name
nonlocal should_return_scalar
result_inner = pdf.eval(expr, inplace=inplace)
if inplace:
result_inner = pdf
if isinstance(result_inner, pd.Series):
should_return_series = True
series_name = result_inner.name
result_inner = result_inner.to_frame()
elif is_scalar(result_inner):
should_return_scalar = True
result_inner = pd.Series(result_inner).to_frame()
return result_inner
result = self.koalas.apply_batch(eval_func)
if inplace:
# Here, the result is always a frame because the error is thrown during schema inference
# from pandas.
self._update_internal_frame(result._internal, requires_same_anchor=False)
return None
elif should_return_series:
return first_series(result).rename(series_name)
elif should_return_scalar:
return first_series(result)[0]
else:
# Returns a frame
return result
def explode(self, column) -> "DataFrame":
"""
Transform each element of a list-like to a row, replicating index values.
Parameters
----------
column : str or tuple
Column to explode.
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Examples
--------
>>> df = ks.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 [] 1
2 [3, 4] 1
>>> df.explode('A')
A B
0 1.0 1
0 2.0 1
0 3.0 1
1 NaN 1
2 3.0 1
2 4.0 1
"""
from databricks.koalas.series import Series
if not is_name_like_value(column):
raise ValueError("column must be a scalar")
kdf = DataFrame(self._internal.resolved_copy) # type: "DataFrame"
kser = kdf[column]
if not isinstance(kser, Series):
raise ValueError(
"The column %s is not unique. For a multi-index, the label must be a tuple "
"with elements corresponding to each level." % name_like_string(column)
)
if not isinstance(kser.spark.data_type, ArrayType):
return self.copy()
sdf = kdf._internal.spark_frame.withColumn(
kser._internal.data_spark_column_names[0], F.explode_outer(kser.spark.column)
)
data_dtypes = kdf._internal.data_dtypes.copy()
data_dtypes[kdf._internal.column_labels.index(kser._column_label)] = None # TODO: dtype?
internal = kdf._internal.with_new_sdf(sdf, data_dtypes=data_dtypes)
return DataFrame(internal)
def mad(self, axis=0) -> "Series":
"""
Return the mean absolute deviation of values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
>>> df.mad()
a 0.666667
b 0.066667
dtype: float64
>>> df.mad(axis=1)
0 0.45
1 0.90
2 1.35
3 NaN
dtype: float64
"""
from databricks.koalas.series import first_series
axis = validate_axis(axis)
if axis == 0:
def get_spark_column(kdf, label):
scol = kdf._internal.spark_column_for(label)
col_type = kdf._internal.spark_type_for(label)
if isinstance(col_type, BooleanType):
scol = scol.cast("integer")
return scol
new_column_labels = []
for label in self._internal.column_labels:
# Filtering out only columns of numeric and boolean type column.
dtype = self._kser_for(label).spark.data_type
if isinstance(dtype, (NumericType, BooleanType)):
new_column_labels.append(label)
new_columns = [
F.avg(get_spark_column(self, label)).alias(name_like_string(label))
for label in new_column_labels
]
mean_data = self._internal.spark_frame.select(new_columns).first()
new_columns = [
F.avg(
F.abs(get_spark_column(self, label) - mean_data[name_like_string(label)])
).alias(name_like_string(label))
for label in new_column_labels
]
sdf = self._internal.spark_frame.select(
[F.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)] + new_columns
)
# The data is expected to be small so it's fine to transpose/use default index.
with ks.option_context("compute.max_rows", 1):
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)],
column_labels=new_column_labels,
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal).transpose())
else:
@pandas_udf(returnType=DoubleType())
def calculate_columns_axis(*cols):
return pd.concat(cols, axis=1).mad(axis=1)
internal = self._internal.copy(
column_labels=[None],
data_spark_columns=[
calculate_columns_axis(*self._internal.data_spark_columns).alias(
SPARK_DEFAULT_SERIES_NAME
)
],
data_dtypes=[None],
column_label_names=None,
)
return first_series(DataFrame(internal))
def tail(self, n=5) -> "DataFrame":
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail() # doctest: +SKIP
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3) # doctest: +SKIP
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3) # doctest: +SKIP
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
"""
if LooseVersion(pyspark.__version__) < LooseVersion("3.0"):
raise RuntimeError("tail can be used in PySpark >= 3.0")
if not isinstance(n, int):
raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__))
if n < 0:
n = len(self) + n
if n <= 0:
return ks.DataFrame(self._internal.with_filter(F.lit(False)))
# Should use `resolved_copy` here for the case like `(kdf + 1).tail()`
sdf = self._internal.resolved_copy.spark_frame
rows = sdf.tail(n)
new_sdf = default_session().createDataFrame(rows, sdf.schema)
return DataFrame(self._internal.with_new_sdf(new_sdf))
def align(
self,
other: Union["DataFrame", "Series"],
join: str = "outer",
axis: Optional[Union[int, str]] = None,
copy: bool = True,
) -> Tuple["DataFrame", Union["DataFrame", "Series"]]:
"""
Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
Returns
-------
(left, right) : (DataFrame, type of other)
Aligned objects.
Examples
--------
>>> ks.set_option("compute.ops_on_diff_frames", True)
>>> df1 = ks.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
>>> df2 = ks.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
Align both axis:
>>> aligned_l, aligned_r = df1.align(df2)
>>> aligned_l.sort_index()
a b c
10 1.0 a NaN
11 NaN None NaN
12 NaN None NaN
20 2.0 b NaN
30 3.0 c NaN
>>> aligned_r.sort_index()
a b c
10 4.0 NaN d
11 5.0 NaN e
12 6.0 NaN f
20 NaN NaN None
30 NaN NaN None
Align only axis=0 (index):
>>> aligned_l, aligned_r = df1.align(df2, axis=0)
>>> aligned_l.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> aligned_r.sort_index()
a c
10 4.0 d
11 5.0 e
12 6.0 f
20 NaN None
30 NaN None
Align only axis=1 (column):
>>> aligned_l, aligned_r = df1.align(df2, axis=1)
>>> aligned_l.sort_index()
a b c
10 1 a NaN
20 2 b NaN
30 3 c NaN
>>> aligned_r.sort_index()
a b c
10 4 NaN d
11 5 NaN e
12 6 NaN f
Align with the join type "inner":
>>> aligned_l, aligned_r = df1.align(df2, join="inner")
>>> aligned_l.sort_index()
a
10 1
>>> aligned_r.sort_index()
a
10 4
Align with a Series:
>>> s = ks.Series([7, 8, 9], index=[10, 11, 12])
>>> aligned_l, aligned_r = df1.align(s, axis=0)
>>> aligned_l.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> aligned_r.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> ks.reset_option("compute.ops_on_diff_frames")
"""
from databricks.koalas.series import Series, first_series
if not isinstance(other, (DataFrame, Series)):
raise TypeError("unsupported type: {}".format(type(other).__name__))
how = validate_how(join)
axis = validate_axis(axis, None)
right_is_series = isinstance(other, Series)
if right_is_series:
if axis is None:
raise ValueError("Must specify axis=0 or 1")
elif axis != 0:
raise NotImplementedError(
"align currently only works for axis=0 when right is Series"
)
left = self
right = other
if (axis is None or axis == 0) and not same_anchor(left, right):
combined = combine_frames(left, right, how=how)
left = combined["this"]
right = combined["that"]
if right_is_series:
right = first_series(right).rename(other.name)
if (
axis is None or axis == 1
) and left._internal.column_labels != right._internal.column_labels:
if left._internal.column_labels_level != right._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
left = left.copy()
right = right.copy()
if how == "full":
column_labels = sorted(
list(set(left._internal.column_labels) | set(right._internal.column_labels))
)
elif how == "inner":
column_labels = sorted(
list(set(left._internal.column_labels) & set(right._internal.column_labels))
)
elif how == "left":
column_labels = left._internal.column_labels
else:
column_labels = right._internal.column_labels
for label in column_labels:
if label not in left._internal.column_labels:
left[label] = F.lit(None).cast(DoubleType())
left = left[column_labels]
for label in column_labels:
if label not in right._internal.column_labels:
right[label] = F.lit(None).cast(DoubleType())
right = right[column_labels]
return (left.copy(), right.copy()) if copy else (left, right)
@staticmethod
def from_dict(data, orient="columns", dtype=None, columns=None) -> "DataFrame":
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]}
>>> ks.DataFrame.from_dict(data)
col_1 col_2
0 3 10
1 2 20
2 1 30
3 0 40
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]}
>>> ks.DataFrame.from_dict(data, orient='index').sort_index()
0 1 2 3
row_1 3 2 1 0
row_2 10 20 30 40
When using the 'index' orientation, the column names can be
specified manually:
>>> ks.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D']).sort_index()
A B C D
row_1 3 2 1 0
row_2 10 20 30 40
"""
return DataFrame(pd.DataFrame.from_dict(data, orient=orient, dtype=dtype, columns=columns))
def _to_internal_pandas(self):
"""
Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._internal.to_pandas_frame
def _get_or_create_repr_pandas_cache(self, n):
if not hasattr(self, "_repr_pandas_cache") or n not in self._repr_pandas_cache:
object.__setattr__(
self, "_repr_pandas_cache", {n: self.head(n + 1)._to_internal_pandas()}
)
return self._repr_pandas_cache[n]
def __repr__(self):
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string()
pdf = self._get_or_create_repr_pandas_cache(max_display_count)
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_string = pdf.to_string(show_dimensions=True)
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = "\n\n[Showing only the first {nrows} rows x {ncols} columns]".format(
nrows=nrows, ncols=ncols
)
return REPR_PATTERN.sub(footer, repr_string)
return pdf.to_string()
def _repr_html_(self):
max_display_count = get_option("display.max_rows")
# pandas 0.25.1 has a regression about HTML representation so 'bold_rows'
# has to be set as False explicitly. See https://github.com/pandas-dev/pandas/issues/28204
bold_rows = not (LooseVersion("0.25.1") == LooseVersion(pd.__version__))
if max_display_count is None:
return self._to_internal_pandas().to_html(notebook=True, bold_rows=bold_rows)
pdf = self._get_or_create_repr_pandas_cache(max_display_count)
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_html = pdf.to_html(show_dimensions=True, notebook=True, bold_rows=bold_rows)
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = (
"\n<p>Showing only the first {rows} rows "
"{by} {cols} columns</p>\n</div>".format(rows=nrows, by=by, cols=ncols)
)
return REPR_HTML_PATTERN.sub(footer, repr_html)
return pdf.to_html(notebook=True, bold_rows=bold_rows)
def __getitem__(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
elif isinstance(key, Series):
return self.loc[key.astype(bool)]
elif isinstance(key, slice):
if any(type(n) == int or None for n in [key.start, key.stop]):
# Seems like pandas Frame always uses int as positional search when slicing
# with ints.
return self.iloc[key]
return self.loc[key]
elif is_name_like_value(key):
return self.loc[:, key]
elif is_list_like(key):
return self.loc[:, list(key)]
raise NotImplementedError(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
if isinstance(value, (DataFrame, Series)) and not same_anchor(value, self):
# Different Series or DataFrames
level = self._internal.column_labels_level
key = DataFrame._index_normalized_label(level, key)
value = DataFrame._index_normalized_frame(level, value)
def assign_columns(kdf, this_column_labels, that_column_labels):
assert len(key) == len(that_column_labels)
# Note that here intentionally uses `zip_longest` that combine
# that_columns.
for k, this_label, that_label in zip_longest(
key, this_column_labels, that_column_labels
):
yield (kdf._kser_for(that_label), tuple(["that", *k]))
if this_label is not None and this_label[1:] != k:
yield (kdf._kser_for(this_label), this_label)
kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left")
elif isinstance(value, list):
if len(self) != len(value):
raise ValueError("Length of values does not match length of index")
# TODO: avoid using default index?
with option_context(
"compute.default_index_type",
"distributed-sequence",
"compute.ops_on_diff_frames",
True,
):
kdf = self.reset_index()
kdf[key] = ks.DataFrame(value)
kdf = kdf.set_index(kdf.columns[: self._internal.index_level])
kdf.index.names = self.index.names
elif isinstance(key, list):
assert isinstance(value, DataFrame)
# Same DataFrames.
field_names = value.columns
kdf = self._assign({k: value[c] for k, c in zip(key, field_names)})
else:
# Same Series.
kdf = self._assign({key: value})
self._update_internal_frame(kdf._internal)
@staticmethod
def _index_normalized_label(level, labels):
"""
Returns a label that is normalized against the current column index level.
For example, the key "abc" can be ("abc", "", "") if the current Frame has
a multi-index for its column
"""
if is_name_like_tuple(labels):
labels = [labels]
elif is_name_like_value(labels):
labels = [(labels,)]
else:
labels = [k if is_name_like_tuple(k) else (k,) for k in labels]
if any(len(label) > level for label in labels):
raise KeyError(
"Key length ({}) exceeds index depth ({})".format(
max(len(label) for label in labels), level
)
)
return [tuple(list(label) + ([""] * (level - len(label)))) for label in labels]
@staticmethod
def _index_normalized_frame(level, kser_or_kdf):
"""
Returns a frame that is normalized against the current column index level.
For example, the name in `pd.Series([...], name="abc")` can be can be
("abc", "", "") if the current DataFrame has a multi-index for its column
"""
from databricks.koalas.series import Series
if isinstance(kser_or_kdf, Series):
kdf = kser_or_kdf.to_frame()
else:
assert isinstance(kser_or_kdf, DataFrame), type(kser_or_kdf)
kdf = kser_or_kdf.copy()
kdf.columns = pd.MultiIndex.from_tuples(
[
tuple([name_like_string(label)] + ([""] * (level - 1)))
for label in kdf._internal.column_labels
],
)
return kdf
def __getattr__(self, key: str) -> Any:
if key.startswith("__"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
try:
return self.loc[:, key]
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key)
)
def __setattr__(self, key: str, value) -> None:
try:
object.__getattribute__(self, key)
return object.__setattr__(self, key, value)
except AttributeError:
pass
if (key,) in self._internal.column_labels:
self[key] = value
else:
msg = "Koalas doesn't allow columns to be created via a new attribute name"
if is_testing():
raise AssertionError(msg)
else:
warnings.warn(msg, UserWarning)
def __len__(self):
return self._internal.resolved_copy.spark_frame.count()
def __dir__(self):
fields = [
f for f in self._internal.resolved_copy.spark_frame.schema.fieldNames() if " " not in f
]
return super().__dir__() + fields
def __iter__(self):
return iter(self.columns)
# NDArray Compat
def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):
# TODO: is it possible to deduplicate it with '_map_series_op'?
if all(isinstance(inp, DataFrame) for inp in inputs) and any(
not same_anchor(inp, inputs[0]) for inp in inputs
):
# binary only
assert len(inputs) == 2
this = inputs[0]
that = inputs[1]
if this._internal.column_labels_level != that._internal.column_labels_level:
raise ValueError("cannot join with no overlapping index names")
# Different DataFrames
def apply_op(kdf, this_column_labels, that_column_labels):
for this_label, that_label in zip(this_column_labels, that_column_labels):
yield (
ufunc(
kdf._kser_for(this_label), kdf._kser_for(that_label), **kwargs
).rename(this_label),
this_label,
)
return align_diff_frames(apply_op, this, that, fillna=True, how="full")
else:
# DataFrame and Series
applied = []
this = inputs[0]
assert all(inp is this for inp in inputs if isinstance(inp, DataFrame))
for label in this._internal.column_labels:
arguments = []
for inp in inputs:
arguments.append(inp[label] if isinstance(inp, DataFrame) else inp)
# both binary and unary.
applied.append(ufunc(*arguments, **kwargs).rename(label))
internal = this._internal.with_new_columns(applied)
return DataFrame(internal)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return _create_tuple_for_frame_type(params)
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# Koalas DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a spark DataFrame, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.limit(2).toPandas()
assert len(l) == 1, (sdf, l)
row = l.iloc[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal, storage_level=None):
if storage_level is None:
object.__setattr__(self, "_cached", internal.spark_frame.cache())
elif isinstance(storage_level, StorageLevel):
object.__setattr__(self, "_cached", internal.spark_frame.persist(storage_level))
else:
raise TypeError(
"Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`"
)
super().__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.spark.unpersist()
# create accessor for Spark related methods.
spark = CachedAccessor("spark", CachedSparkFrameMethods)
@property
def storage_level(self) -> StorageLevel:
warnings.warn(
"DataFrame.storage_level is deprecated as of DataFrame.spark.storage_level. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.storage_level
storage_level.__doc__ = CachedSparkFrameMethods.storage_level.__doc__
def unpersist(self) -> None:
warnings.warn(
"DataFrame.unpersist is deprecated as of DataFrame.spark.unpersist. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.unpersist()
unpersist.__doc__ = CachedSparkFrameMethods.unpersist.__doc__
| 1 | 18,119 | Could you also update it in `at_time`? | databricks-koalas | py |
@@ -143,8 +143,9 @@ class Reader implements DataSourceReader, SupportsScanColumnarBatch, SupportsPus
} catch (IOException ioe) {
LOG.warn("Failed to get Hadoop Filesystem", ioe);
}
+ Boolean localityFallback = LOCALITY_WHITELIST_FS.contains(scheme);
this.localityPreferred = options.get("locality").map(Boolean::parseBoolean)
- .orElse(LOCALITY_WHITELIST_FS.contains(scheme));
+ .orElse(localityFallback);
} else {
this.localityPreferred = false;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.IOException;
import java.io.Serializable;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.Schema;
import org.apache.iceberg.SchemaParser;
import org.apache.iceberg.SnapshotSummary;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.TableScan;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.expressions.Expressions;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.hadoop.Util;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.spark.SparkFilters;
import org.apache.iceberg.spark.SparkSchemaUtil;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.sources.Filter;
import org.apache.spark.sql.sources.v2.DataSourceOptions;
import org.apache.spark.sql.sources.v2.reader.DataSourceReader;
import org.apache.spark.sql.sources.v2.reader.InputPartition;
import org.apache.spark.sql.sources.v2.reader.InputPartitionReader;
import org.apache.spark.sql.sources.v2.reader.Statistics;
import org.apache.spark.sql.sources.v2.reader.SupportsPushDownFilters;
import org.apache.spark.sql.sources.v2.reader.SupportsPushDownRequiredColumns;
import org.apache.spark.sql.sources.v2.reader.SupportsReportStatistics;
import org.apache.spark.sql.sources.v2.reader.SupportsScanColumnarBatch;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.vectorized.ColumnarBatch;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.TableProperties.DEFAULT_NAME_MAPPING;
class Reader implements DataSourceReader, SupportsScanColumnarBatch, SupportsPushDownFilters,
SupportsPushDownRequiredColumns, SupportsReportStatistics {
private static final Logger LOG = LoggerFactory.getLogger(Reader.class);
private static final Filter[] NO_FILTERS = new Filter[0];
private static final ImmutableSet<String> LOCALITY_WHITELIST_FS = ImmutableSet.of("hdfs");
private final Table table;
private final Long snapshotId;
private final Long startSnapshotId;
private final Long endSnapshotId;
private final Long asOfTimestamp;
private final Long splitSize;
private final Integer splitLookback;
private final Long splitOpenFileCost;
private final Broadcast<FileIO> io;
private final Broadcast<EncryptionManager> encryptionManager;
private final boolean caseSensitive;
private StructType requestedSchema = null;
private List<Expression> filterExpressions = null;
private Filter[] pushedFilters = NO_FILTERS;
private final boolean localityPreferred;
private final boolean batchReadsEnabled;
private final int batchSize;
// lazy variables
private Schema schema = null;
private StructType type = null; // cached because Spark accesses it multiple times
private List<CombinedScanTask> tasks = null; // lazy cache of tasks
private Boolean readUsingBatch = null;
Reader(Table table, Broadcast<FileIO> io, Broadcast<EncryptionManager> encryptionManager,
boolean caseSensitive, DataSourceOptions options) {
this.table = table;
this.snapshotId = options.get("snapshot-id").map(Long::parseLong).orElse(null);
this.asOfTimestamp = options.get("as-of-timestamp").map(Long::parseLong).orElse(null);
if (snapshotId != null && asOfTimestamp != null) {
throw new IllegalArgumentException(
"Cannot scan using both snapshot-id and as-of-timestamp to select the table snapshot");
}
this.startSnapshotId = options.get("start-snapshot-id").map(Long::parseLong).orElse(null);
this.endSnapshotId = options.get("end-snapshot-id").map(Long::parseLong).orElse(null);
if (snapshotId != null || asOfTimestamp != null) {
if (startSnapshotId != null || endSnapshotId != null) {
throw new IllegalArgumentException(
"Cannot specify start-snapshot-id and end-snapshot-id to do incremental scan when either snapshot-id or " +
"as-of-timestamp is specified");
}
} else {
if (startSnapshotId == null && endSnapshotId != null) {
throw new IllegalArgumentException("Cannot only specify option end-snapshot-id to do incremental scan");
}
}
// look for split behavior overrides in options
this.splitSize = options.get("split-size").map(Long::parseLong).orElse(null);
this.splitLookback = options.get("lookback").map(Integer::parseInt).orElse(null);
this.splitOpenFileCost = options.get("file-open-cost").map(Long::parseLong).orElse(null);
if (io.getValue() instanceof HadoopFileIO) {
String scheme = "no_exist";
try {
Configuration conf = SparkSession.active().sessionState().newHadoopConf();
// merge hadoop config set on table
mergeIcebergHadoopConfs(conf, table.properties());
// merge hadoop config passed as options and overwrite the one on table
mergeIcebergHadoopConfs(conf, options.asMap());
FileSystem fs = new Path(table.location()).getFileSystem(conf);
scheme = fs.getScheme().toLowerCase(Locale.ENGLISH);
} catch (IOException ioe) {
LOG.warn("Failed to get Hadoop Filesystem", ioe);
}
this.localityPreferred = options.get("locality").map(Boolean::parseBoolean)
.orElse(LOCALITY_WHITELIST_FS.contains(scheme));
} else {
this.localityPreferred = false;
}
this.schema = table.schema();
this.io = io;
this.encryptionManager = encryptionManager;
this.caseSensitive = caseSensitive;
this.batchReadsEnabled = options.get("vectorization-enabled").map(Boolean::parseBoolean).orElse(
PropertyUtil.propertyAsBoolean(table.properties(),
TableProperties.PARQUET_VECTORIZATION_ENABLED, TableProperties.PARQUET_VECTORIZATION_ENABLED_DEFAULT));
this.batchSize = options.get("batch-size").map(Integer::parseInt).orElse(
PropertyUtil.propertyAsInt(table.properties(),
TableProperties.PARQUET_BATCH_SIZE, TableProperties.PARQUET_BATCH_SIZE_DEFAULT));
}
private Schema lazySchema() {
if (schema == null) {
if (requestedSchema != null) {
// the projection should include all columns that will be returned, including those only used in filters
this.schema = SparkSchemaUtil.prune(table.schema(), requestedSchema, filterExpression(), caseSensitive);
} else {
this.schema = table.schema();
}
}
return schema;
}
private Expression filterExpression() {
if (filterExpressions != null) {
return filterExpressions.stream().reduce(Expressions.alwaysTrue(), Expressions::and);
}
return Expressions.alwaysTrue();
}
private StructType lazyType() {
if (type == null) {
this.type = SparkSchemaUtil.convert(lazySchema());
}
return type;
}
@Override
public StructType readSchema() {
return lazyType();
}
/**
* This is called in the Spark Driver when data is to be materialized into {@link ColumnarBatch}
*/
@Override
public List<InputPartition<ColumnarBatch>> planBatchInputPartitions() {
Preconditions.checkState(enableBatchRead(), "Batched reads not enabled");
Preconditions.checkState(batchSize > 0, "Invalid batch size");
String tableSchemaString = SchemaParser.toJson(table.schema());
String expectedSchemaString = SchemaParser.toJson(lazySchema());
String nameMappingString = table.properties().get(DEFAULT_NAME_MAPPING);
List<InputPartition<ColumnarBatch>> readTasks = Lists.newArrayList();
for (CombinedScanTask task : tasks()) {
readTasks.add(new ReadTask<>(
task, tableSchemaString, expectedSchemaString, nameMappingString, io, encryptionManager, caseSensitive,
localityPreferred, new BatchReaderFactory(batchSize)));
}
LOG.info("Batching input partitions with {} tasks.", readTasks.size());
return readTasks;
}
/**
* This is called in the Spark Driver when data is to be materialized into {@link InternalRow}
*/
@Override
public List<InputPartition<InternalRow>> planInputPartitions() {
String tableSchemaString = SchemaParser.toJson(table.schema());
String expectedSchemaString = SchemaParser.toJson(lazySchema());
String nameMappingString = table.properties().get(DEFAULT_NAME_MAPPING);
List<InputPartition<InternalRow>> readTasks = Lists.newArrayList();
for (CombinedScanTask task : tasks()) {
readTasks.add(new ReadTask<>(
task, tableSchemaString, expectedSchemaString, nameMappingString, io, encryptionManager, caseSensitive,
localityPreferred, InternalRowReaderFactory.INSTANCE));
}
return readTasks;
}
@Override
public Filter[] pushFilters(Filter[] filters) {
this.tasks = null; // invalidate cached tasks, if present
List<Expression> expressions = Lists.newArrayListWithExpectedSize(filters.length);
List<Filter> pushed = Lists.newArrayListWithExpectedSize(filters.length);
for (Filter filter : filters) {
Expression expr = SparkFilters.convert(filter);
if (expr != null) {
expressions.add(expr);
pushed.add(filter);
}
}
this.filterExpressions = expressions;
this.pushedFilters = pushed.toArray(new Filter[0]);
// invalidate the schema that will be projected
this.schema = null;
this.type = null;
// Spark doesn't support residuals per task, so return all filters
// to get Spark to handle record-level filtering
return filters;
}
@Override
public Filter[] pushedFilters() {
return pushedFilters;
}
@Override
public void pruneColumns(StructType newRequestedSchema) {
this.requestedSchema = newRequestedSchema;
// invalidate the schema that will be projected
this.schema = null;
this.type = null;
}
@Override
public Statistics estimateStatistics() {
if (filterExpressions == null || filterExpressions == Expressions.alwaysTrue()) {
long totalRecords = PropertyUtil.propertyAsLong(table.currentSnapshot().summary(),
SnapshotSummary.TOTAL_RECORDS_PROP, Long.MAX_VALUE);
return new Stats(SparkSchemaUtil.estimateSize(lazyType(), totalRecords), totalRecords);
}
long sizeInBytes = 0L;
long numRows = 0L;
for (CombinedScanTask task : tasks()) {
for (FileScanTask file : task.files()) {
sizeInBytes += file.length();
numRows += file.file().recordCount();
}
}
return new Stats(sizeInBytes, numRows);
}
@Override
public boolean enableBatchRead() {
if (readUsingBatch == null) {
boolean allParquetFileScanTasks =
tasks().stream()
.allMatch(combinedScanTask -> !combinedScanTask.isDataTask() && combinedScanTask.files()
.stream()
.allMatch(fileScanTask -> fileScanTask.file().format().equals(
FileFormat.PARQUET)));
boolean allOrcFileScanTasks =
tasks().stream()
.allMatch(combinedScanTask -> !combinedScanTask.isDataTask() && combinedScanTask.files()
.stream()
.allMatch(fileScanTask -> fileScanTask.file().format().equals(
FileFormat.ORC)));
boolean atLeastOneColumn = lazySchema().columns().size() > 0;
boolean onlyPrimitives = lazySchema().columns().stream().allMatch(c -> c.type().isPrimitiveType());
this.readUsingBatch = batchReadsEnabled && (allOrcFileScanTasks ||
(allParquetFileScanTasks && atLeastOneColumn && onlyPrimitives));
}
return readUsingBatch;
}
private static void mergeIcebergHadoopConfs(
Configuration baseConf, Map<String, String> options) {
options.keySet().stream()
.filter(key -> key.startsWith("hadoop."))
.forEach(key -> baseConf.set(key.replaceFirst("hadoop.", ""), options.get(key)));
}
private List<CombinedScanTask> tasks() {
if (tasks == null) {
TableScan scan = table
.newScan()
.caseSensitive(caseSensitive)
.project(lazySchema());
if (snapshotId != null) {
scan = scan.useSnapshot(snapshotId);
}
if (asOfTimestamp != null) {
scan = scan.asOfTime(asOfTimestamp);
}
if (startSnapshotId != null) {
if (endSnapshotId != null) {
scan = scan.appendsBetween(startSnapshotId, endSnapshotId);
} else {
scan = scan.appendsAfter(startSnapshotId);
}
}
if (splitSize != null) {
scan = scan.option(TableProperties.SPLIT_SIZE, splitSize.toString());
}
if (splitLookback != null) {
scan = scan.option(TableProperties.SPLIT_LOOKBACK, splitLookback.toString());
}
if (splitOpenFileCost != null) {
scan = scan.option(TableProperties.SPLIT_OPEN_FILE_COST, splitOpenFileCost.toString());
}
if (filterExpressions != null) {
for (Expression filter : filterExpressions) {
scan = scan.filter(filter);
}
}
try (CloseableIterable<CombinedScanTask> tasksIterable = scan.planTasks()) {
this.tasks = Lists.newArrayList(tasksIterable);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close table scan: %s", scan);
}
}
return tasks;
}
@Override
public String toString() {
return String.format(
"IcebergScan(table=%s, type=%s, filters=%s, caseSensitive=%s, batchedReads=%s)",
table, lazySchema().asStruct(), filterExpressions, caseSensitive, enableBatchRead());
}
private static class ReadTask<T> implements Serializable, InputPartition<T> {
private final CombinedScanTask task;
private final String tableSchemaString;
private final String expectedSchemaString;
private final String nameMappingString;
private final Broadcast<FileIO> io;
private final Broadcast<EncryptionManager> encryptionManager;
private final boolean caseSensitive;
private final boolean localityPreferred;
private final ReaderFactory<T> readerFactory;
private transient Schema tableSchema = null;
private transient Schema expectedSchema = null;
private transient String[] preferredLocations = null;
private ReadTask(CombinedScanTask task, String tableSchemaString, String expectedSchemaString,
String nameMappingString, Broadcast<FileIO> io, Broadcast<EncryptionManager> encryptionManager,
boolean caseSensitive, boolean localityPreferred, ReaderFactory<T> readerFactory) {
this.task = task;
this.tableSchemaString = tableSchemaString;
this.expectedSchemaString = expectedSchemaString;
this.io = io;
this.encryptionManager = encryptionManager;
this.caseSensitive = caseSensitive;
this.localityPreferred = localityPreferred;
this.preferredLocations = getPreferredLocations();
this.readerFactory = readerFactory;
this.nameMappingString = nameMappingString;
}
@Override
public InputPartitionReader<T> createPartitionReader() {
return readerFactory.create(task, lazyTableSchema(), lazyExpectedSchema(), nameMappingString, io.value(),
encryptionManager.value(), caseSensitive);
}
@Override
public String[] preferredLocations() {
return preferredLocations;
}
private Schema lazyTableSchema() {
if (tableSchema == null) {
this.tableSchema = SchemaParser.fromJson(tableSchemaString);
}
return tableSchema;
}
private Schema lazyExpectedSchema() {
if (expectedSchema == null) {
this.expectedSchema = SchemaParser.fromJson(expectedSchemaString);
}
return expectedSchema;
}
@SuppressWarnings("checkstyle:RegexpSingleline")
private String[] getPreferredLocations() {
if (!localityPreferred) {
return new String[0];
}
Configuration conf = SparkSession.active().sparkContext().hadoopConfiguration();
return Util.blockLocations(task, conf);
}
}
private interface ReaderFactory<T> extends Serializable {
InputPartitionReader<T> create(CombinedScanTask task, Schema tableSchema, Schema expectedSchema,
String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive);
}
private static class InternalRowReaderFactory implements ReaderFactory<InternalRow> {
private static final InternalRowReaderFactory INSTANCE = new InternalRowReaderFactory();
private InternalRowReaderFactory() {
}
@Override
public InputPartitionReader<InternalRow> create(CombinedScanTask task, Schema tableSchema, Schema expectedSchema,
String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive) {
return new RowReader(task, tableSchema, expectedSchema, nameMapping, io, encryptionManager, caseSensitive);
}
}
private static class BatchReaderFactory implements ReaderFactory<ColumnarBatch> {
private final int batchSize;
BatchReaderFactory(int batchSize) {
this.batchSize = batchSize;
}
@Override
public InputPartitionReader<ColumnarBatch> create(CombinedScanTask task, Schema tableSchema, Schema expectedSchema,
String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive) {
return new BatchReader(task, expectedSchema, nameMapping, io, encryptionManager, caseSensitive, batchSize);
}
}
private static class RowReader extends RowDataReader implements InputPartitionReader<InternalRow> {
RowReader(CombinedScanTask task, Schema tableSchema, Schema expectedSchema, String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive) {
super(task, tableSchema, expectedSchema, nameMapping, io, encryptionManager, caseSensitive);
}
}
private static class BatchReader extends BatchDataReader implements InputPartitionReader<ColumnarBatch> {
BatchReader(CombinedScanTask task, Schema expectedSchema, String nameMapping, FileIO io,
EncryptionManager encryptionManager, boolean caseSensitive, int size) {
super(task, expectedSchema, nameMapping, io, encryptionManager, caseSensitive, size);
}
}
}
| 1 | 23,372 | Because schema is non-final we can't just switch this to a lambda | apache-iceberg | java |
@@ -136,7 +136,7 @@ namespace NLog.Conditions
}
catch (Exception exception)
{
- if (exception.MustBeRethrown())
+ if (exception.MustBeRethrown("While resolving function '{0}' follwing exception occured: {1}", functionName, exception))
{
throw;
} | 1 | //
// Copyright (c) 2004-2011 Jaroslaw Kowalski <[email protected]>
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
namespace NLog.Conditions
{
using System;
using System.Collections.Generic;
using System.Globalization;
using NLog.Config;
using NLog.Internal;
using NLog.Layouts;
/// <summary>
/// Condition parser. Turns a string representation of condition expression
/// into an expression tree.
/// </summary>
public class ConditionParser
{
private readonly ConditionTokenizer tokenizer;
private readonly ConfigurationItemFactory configurationItemFactory;
/// <summary>
/// Initializes a new instance of the <see cref="ConditionParser"/> class.
/// </summary>
/// <param name="stringReader">The string reader.</param>
/// <param name="configurationItemFactory">Instance of <see cref="ConfigurationItemFactory"/> used to resolve references to condition methods and layout renderers.</param>
private ConditionParser(SimpleStringReader stringReader, ConfigurationItemFactory configurationItemFactory)
{
this.configurationItemFactory = configurationItemFactory;
this.tokenizer = new ConditionTokenizer(stringReader);
}
/// <summary>
/// Parses the specified condition string and turns it into
/// <see cref="ConditionExpression"/> tree.
/// </summary>
/// <param name="expressionText">The expression to be parsed.</param>
/// <returns>The root of the expression syntax tree which can be used to get the value of the condition in a specified context.</returns>
public static ConditionExpression ParseExpression(string expressionText)
{
return ParseExpression(expressionText, ConfigurationItemFactory.Default);
}
/// <summary>
/// Parses the specified condition string and turns it into
/// <see cref="ConditionExpression"/> tree.
/// </summary>
/// <param name="expressionText">The expression to be parsed.</param>
/// <param name="configurationItemFactories">Instance of <see cref="ConfigurationItemFactory"/> used to resolve references to condition methods and layout renderers.</param>
/// <returns>The root of the expression syntax tree which can be used to get the value of the condition in a specified context.</returns>
public static ConditionExpression ParseExpression(string expressionText, ConfigurationItemFactory configurationItemFactories)
{
if (expressionText == null)
{
return null;
}
var parser = new ConditionParser(new SimpleStringReader(expressionText), configurationItemFactories);
ConditionExpression expression = parser.ParseExpression();
if (!parser.tokenizer.IsEOF())
{
throw new ConditionParseException("Unexpected token: " + parser.tokenizer.TokenValue);
}
return expression;
}
/// <summary>
/// Parses the specified condition string and turns it into
/// <see cref="ConditionExpression"/> tree.
/// </summary>
/// <param name="stringReader">The string reader.</param>
/// <param name="configurationItemFactories">Instance of <see cref="ConfigurationItemFactory"/> used to resolve references to condition methods and layout renderers.</param>
/// <returns>
/// The root of the expression syntax tree which can be used to get the value of the condition in a specified context.
/// </returns>
internal static ConditionExpression ParseExpression(SimpleStringReader stringReader, ConfigurationItemFactory configurationItemFactories)
{
var parser = new ConditionParser(stringReader, configurationItemFactories);
ConditionExpression expression = parser.ParseExpression();
return expression;
}
private ConditionMethodExpression ParsePredicate(string functionName)
{
var par = new List<ConditionExpression>();
while (!this.tokenizer.IsEOF() && this.tokenizer.TokenType != ConditionTokenType.RightParen)
{
par.Add(ParseExpression());
if (this.tokenizer.TokenType != ConditionTokenType.Comma)
{
break;
}
this.tokenizer.GetNextToken();
}
this.tokenizer.Expect(ConditionTokenType.RightParen);
try
{
var methodInfo = this.configurationItemFactory.ConditionMethods.CreateInstance(functionName);
return new ConditionMethodExpression(functionName, methodInfo, par);
}
catch (Exception exception)
{
if (exception.MustBeRethrown())
{
throw;
}
throw new ConditionParseException("Cannot resolve function '" + functionName + "'", exception);
}
}
private ConditionExpression ParseLiteralExpression()
{
if (this.tokenizer.IsToken(ConditionTokenType.LeftParen))
{
this.tokenizer.GetNextToken();
ConditionExpression e = this.ParseExpression();
this.tokenizer.Expect(ConditionTokenType.RightParen);
return e;
}
if (this.tokenizer.IsToken(ConditionTokenType.Minus))
{
this.tokenizer.GetNextToken();
if (!this.tokenizer.IsNumber())
{
throw new ConditionParseException("Number expected, got " + this.tokenizer.TokenType);
}
string numberString = this.tokenizer.TokenValue;
this.tokenizer.GetNextToken();
if (numberString.IndexOf('.') >= 0)
{
return new ConditionLiteralExpression(-double.Parse(numberString, CultureInfo.InvariantCulture));
}
return new ConditionLiteralExpression(-int.Parse(numberString, CultureInfo.InvariantCulture));
}
if (this.tokenizer.IsNumber())
{
string numberString = this.tokenizer.TokenValue;
this.tokenizer.GetNextToken();
if (numberString.IndexOf('.') >= 0)
{
return new ConditionLiteralExpression(double.Parse(numberString, CultureInfo.InvariantCulture));
}
return new ConditionLiteralExpression(int.Parse(numberString, CultureInfo.InvariantCulture));
}
if (this.tokenizer.TokenType == ConditionTokenType.String)
{
ConditionExpression e = new ConditionLayoutExpression(Layout.FromString(this.tokenizer.StringTokenValue, this.configurationItemFactory));
this.tokenizer.GetNextToken();
return e;
}
if (this.tokenizer.TokenType == ConditionTokenType.Keyword)
{
string keyword = this.tokenizer.EatKeyword();
if (0 == string.Compare(keyword, "level", StringComparison.OrdinalIgnoreCase))
{
return new ConditionLevelExpression();
}
if (0 == string.Compare(keyword, "logger", StringComparison.OrdinalIgnoreCase))
{
return new ConditionLoggerNameExpression();
}
if (0 == string.Compare(keyword, "message", StringComparison.OrdinalIgnoreCase))
{
return new ConditionMessageExpression();
}
if (0 == string.Compare(keyword, "loglevel", StringComparison.OrdinalIgnoreCase))
{
this.tokenizer.Expect(ConditionTokenType.Dot);
return new ConditionLiteralExpression(LogLevel.FromString(this.tokenizer.EatKeyword()));
}
if (0 == string.Compare(keyword, "true", StringComparison.OrdinalIgnoreCase))
{
return new ConditionLiteralExpression(true);
}
if (0 == string.Compare(keyword, "false", StringComparison.OrdinalIgnoreCase))
{
return new ConditionLiteralExpression(false);
}
if (0 == string.Compare(keyword, "null", StringComparison.OrdinalIgnoreCase))
{
return new ConditionLiteralExpression(null);
}
if (this.tokenizer.TokenType == ConditionTokenType.LeftParen)
{
this.tokenizer.GetNextToken();
ConditionMethodExpression predicateExpression = this.ParsePredicate(keyword);
return predicateExpression;
}
}
throw new ConditionParseException("Unexpected token: " + this.tokenizer.TokenValue);
}
private ConditionExpression ParseBooleanRelation()
{
ConditionExpression e = this.ParseLiteralExpression();
if (this.tokenizer.IsToken(ConditionTokenType.EqualTo))
{
this.tokenizer.GetNextToken();
return new ConditionRelationalExpression(e, this.ParseLiteralExpression(), ConditionRelationalOperator.Equal);
}
if (this.tokenizer.IsToken(ConditionTokenType.NotEqual))
{
this.tokenizer.GetNextToken();
return new ConditionRelationalExpression(e, this.ParseLiteralExpression(), ConditionRelationalOperator.NotEqual);
}
if (this.tokenizer.IsToken(ConditionTokenType.LessThan))
{
this.tokenizer.GetNextToken();
return new ConditionRelationalExpression(e, this.ParseLiteralExpression(), ConditionRelationalOperator.Less);
}
if (this.tokenizer.IsToken(ConditionTokenType.GreaterThan))
{
this.tokenizer.GetNextToken();
return new ConditionRelationalExpression(e, this.ParseLiteralExpression(), ConditionRelationalOperator.Greater);
}
if (this.tokenizer.IsToken(ConditionTokenType.LessThanOrEqualTo))
{
this.tokenizer.GetNextToken();
return new ConditionRelationalExpression(e, this.ParseLiteralExpression(), ConditionRelationalOperator.LessOrEqual);
}
if (this.tokenizer.IsToken(ConditionTokenType.GreaterThanOrEqualTo))
{
this.tokenizer.GetNextToken();
return new ConditionRelationalExpression(e, this.ParseLiteralExpression(), ConditionRelationalOperator.GreaterOrEqual);
}
return e;
}
private ConditionExpression ParseBooleanPredicate()
{
if (this.tokenizer.IsKeyword("not") || this.tokenizer.IsToken(ConditionTokenType.Not))
{
this.tokenizer.GetNextToken();
return new ConditionNotExpression(this.ParseBooleanPredicate());
}
return this.ParseBooleanRelation();
}
private ConditionExpression ParseBooleanAnd()
{
ConditionExpression expression = this.ParseBooleanPredicate();
while (this.tokenizer.IsKeyword("and") || this.tokenizer.IsToken(ConditionTokenType.And))
{
this.tokenizer.GetNextToken();
expression = new ConditionAndExpression(expression, this.ParseBooleanPredicate());
}
return expression;
}
private ConditionExpression ParseBooleanOr()
{
ConditionExpression expression = this.ParseBooleanAnd();
while (this.tokenizer.IsKeyword("or") || this.tokenizer.IsToken(ConditionTokenType.Or))
{
this.tokenizer.GetNextToken();
expression = new ConditionOrExpression(expression, this.ParseBooleanAnd());
}
return expression;
}
private ConditionExpression ParseBooleanExpression()
{
return this.ParseBooleanOr();
}
private ConditionExpression ParseExpression()
{
return this.ParseBooleanExpression();
}
}
} | 1 | 11,658 | proposal: **always** add the exception to the end of the message. So no need to to pass exception twice (implicit and explicit). Need a lot of changes. | NLog-NLog | .cs |
@@ -41,9 +41,14 @@ public class InvocationStartProcessingEventListener implements EventListener {
public void process(Event data) {
InvocationStartProcessingEvent event = (InvocationStartProcessingEvent) data;
InvocationMonitor monitor = registryMonitor.getInvocationMonitor(event.getOperationName());
+ //TODO:current java chassis unable know invocation type before starting process,so all type WaitInQueue increment(-1) (decrement)
monitor.getWaitInQueue().increment(-1);
+ monitor.setInvocationMonitorType(event.getInvocationType());
if (InvocationType.PRODUCER.equals(event.getInvocationType())) {
monitor.getLifeTimeInQueue().update(event.getInQueueNanoTime());
+ monitor.getProducerCall().increment();
+ } else {
+ monitor.getConsumerCall().increment();
}
}
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.metrics.core.event;
import io.servicecomb.core.metrics.InvocationStartProcessingEvent;
import io.servicecomb.foundation.common.event.Event;
import io.servicecomb.foundation.common.event.EventListener;
import io.servicecomb.metrics.core.monitor.InvocationMonitor;
import io.servicecomb.metrics.core.monitor.RegistryMonitor;
import io.servicecomb.swagger.invocation.InvocationType;
public class InvocationStartProcessingEventListener implements EventListener {
private final RegistryMonitor registryMonitor;
public InvocationStartProcessingEventListener(RegistryMonitor registryMonitor) {
this.registryMonitor = registryMonitor;
}
@Override
public Class<? extends Event> getConcernedEvent() {
return InvocationStartProcessingEvent.class;
}
@Override
public void process(Event data) {
InvocationStartProcessingEvent event = (InvocationStartProcessingEvent) data;
InvocationMonitor monitor = registryMonitor.getInvocationMonitor(event.getOperationName());
monitor.getWaitInQueue().increment(-1);
if (InvocationType.PRODUCER.equals(event.getInvocationType())) {
monitor.getLifeTimeInQueue().update(event.getInQueueNanoTime());
}
}
}
| 1 | 8,123 | why? event.getInvocationType is not you want? | apache-servicecomb-java-chassis | java |
@@ -29,6 +29,7 @@ import (
type Info struct {
ID string `json:"server_id"`
Version string `json:"version"`
+ GitHash string `json:"git_hash"`
GoVersion string `json:"go"`
Host string `json:"host"`
Port int `json:"port"` | 1 | // Copyright 2012-2016 Apcera Inc. All rights reserved.
package server
import (
"bufio"
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"strconv"
"strings"
"sync"
"time"
// Allow dynamic profiling.
_ "net/http/pprof"
"github.com/nats-io/gnatsd/util"
)
// Info is the information sent to clients to help them understand information
// about this server.
type Info struct {
ID string `json:"server_id"`
Version string `json:"version"`
GoVersion string `json:"go"`
Host string `json:"host"`
Port int `json:"port"`
AuthRequired bool `json:"auth_required"`
SSLRequired bool `json:"ssl_required"` // DEPRECATED: ssl json used for older clients
TLSRequired bool `json:"tls_required"`
TLSVerify bool `json:"tls_verify"`
MaxPayload int `json:"max_payload"`
IP string `json:"ip,omitempty"`
ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to.
// Used internally for quick look-ups.
clientConnectURLs map[string]struct{}
}
// Server is our main struct.
type Server struct {
gcid uint64
stats
mu sync.Mutex
info Info
infoJSON []byte
sl *Sublist
configFile string
optsMu sync.RWMutex
opts *Options
running bool
shutdown bool
listener net.Listener
clients map[uint64]*client
routes map[uint64]*client
remotes map[string]*client
users map[string]*User
totalClients uint64
done chan bool
start time.Time
http net.Listener
httpHandler http.Handler
profiler net.Listener
httpReqStats map[string]uint64
routeListener net.Listener
routeInfo Info
routeInfoJSON []byte
rcQuit chan bool
grMu sync.Mutex
grTmpClients map[uint64]*client
grRunning bool
grWG sync.WaitGroup // to wait on various go routines
cproto int64 // number of clients supporting async INFO
configTime time.Time // last time config was loaded
logging struct {
sync.RWMutex
logger Logger
trace int32
debug int32
}
// Used by tests to check that http.Servers do
// not set any timeout.
monitoringServer *http.Server
profilingServer *http.Server
}
// Make sure all are 64bits for atomic use
type stats struct {
inMsgs int64
outMsgs int64
inBytes int64
outBytes int64
slowConsumers int64
}
// New will setup a new server struct after parsing the options.
func New(opts *Options) *Server {
processOptions(opts)
// Process TLS options, including whether we require client certificates.
tlsReq := opts.TLSConfig != nil
verify := (tlsReq && opts.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert)
info := Info{
ID: genID(),
Version: VERSION,
GoVersion: runtime.Version(),
Host: opts.Host,
Port: opts.Port,
AuthRequired: false,
TLSRequired: tlsReq,
SSLRequired: tlsReq,
TLSVerify: verify,
MaxPayload: opts.MaxPayload,
clientConnectURLs: make(map[string]struct{}),
}
now := time.Now()
s := &Server{
configFile: opts.ConfigFile,
info: info,
sl: NewSublist(),
opts: opts,
done: make(chan bool, 1),
start: now,
configTime: now,
}
s.mu.Lock()
defer s.mu.Unlock()
// For tracking clients
s.clients = make(map[uint64]*client)
// For tracking connections that are not yet registered
// in s.routes, but for which readLoop has started.
s.grTmpClients = make(map[uint64]*client)
// For tracking routes and their remote ids
s.routes = make(map[uint64]*client)
s.remotes = make(map[string]*client)
// Used to kick out all of the route
// connect Go routines.
s.rcQuit = make(chan bool)
// Used to setup Authorization.
s.configureAuthorization()
s.generateServerInfoJSON()
s.handleSignals()
return s
}
func (s *Server) getOpts() *Options {
s.optsMu.RLock()
opts := s.opts
s.optsMu.RUnlock()
return opts
}
func (s *Server) setOpts(opts *Options) {
s.optsMu.Lock()
s.opts = opts
s.optsMu.Unlock()
}
func (s *Server) generateServerInfoJSON() {
// Generate the info json
b, err := json.Marshal(s.info)
if err != nil {
s.Fatalf("Error marshaling INFO JSON: %+v\n", err)
return
}
s.infoJSON = []byte(fmt.Sprintf("INFO %s %s", b, CR_LF))
}
func (s *Server) generateRouteInfoJSON() {
b, err := json.Marshal(s.routeInfo)
if err != nil {
s.Fatalf("Error marshaling route INFO JSON: %+v\n", err)
return
}
s.routeInfoJSON = []byte(fmt.Sprintf(InfoProto, b))
}
// PrintAndDie is exported for access in other packages.
func PrintAndDie(msg string) {
fmt.Fprintf(os.Stderr, "%s\n", msg)
os.Exit(1)
}
// PrintServerAndExit will print our version and exit.
func PrintServerAndExit() {
fmt.Printf("nats-server version %s\n", VERSION)
os.Exit(0)
}
// ProcessCommandLineArgs takes the command line arguments
// validating and setting flags for handling in case any
// sub command was present.
func ProcessCommandLineArgs(cmd *flag.FlagSet) (showVersion bool, showHelp bool, err error) {
if len(cmd.Args()) > 0 {
arg := cmd.Args()[0]
switch strings.ToLower(arg) {
case "version":
return true, false, nil
case "help":
return false, true, nil
default:
return false, false, fmt.Errorf("unrecognized command: %q", arg)
}
}
return false, false, nil
}
// Protected check on running state
func (s *Server) isRunning() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.running
}
func (s *Server) logPid() error {
pidStr := strconv.Itoa(os.Getpid())
return ioutil.WriteFile(s.getOpts().PidFile, []byte(pidStr), 0660)
}
// Start up the server, this will block.
// Start via a Go routine if needed.
func (s *Server) Start() {
s.Noticef("Starting nats-server version %s", VERSION)
s.Debugf("Go build version %s", s.info.GoVersion)
// Avoid RACE between Start() and Shutdown()
s.mu.Lock()
s.running = true
s.mu.Unlock()
s.grMu.Lock()
s.grRunning = true
s.grMu.Unlock()
// Snapshot server options.
opts := s.getOpts()
// Log the pid to a file
if opts.PidFile != _EMPTY_ {
if err := s.logPid(); err != nil {
PrintAndDie(fmt.Sprintf("Could not write pidfile: %v\n", err))
}
}
// Start monitoring if needed
if err := s.StartMonitoring(); err != nil {
s.Fatalf("Can't start monitoring: %v", err)
return
}
// The Routing routine needs to wait for the client listen
// port to be opened and potential ephemeral port selected.
clientListenReady := make(chan struct{})
// Start up routing as well if needed.
if opts.Cluster.Port != 0 {
s.startGoRoutine(func() {
s.StartRouting(clientListenReady)
})
}
// Pprof http endpoint for the profiler.
if opts.ProfPort != 0 {
s.StartProfiler()
}
// Wait for clients.
s.AcceptLoop(clientListenReady)
}
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
// and closing all associated clients.
func (s *Server) Shutdown() {
s.mu.Lock()
// Prevent issues with multiple calls.
if s.shutdown {
s.mu.Unlock()
return
}
s.shutdown = true
s.running = false
s.grMu.Lock()
s.grRunning = false
s.grMu.Unlock()
conns := make(map[uint64]*client)
// Copy off the clients
for i, c := range s.clients {
conns[i] = c
}
// Copy off the connections that are not yet registered
// in s.routes, but for which the readLoop has started
s.grMu.Lock()
for i, c := range s.grTmpClients {
conns[i] = c
}
s.grMu.Unlock()
// Copy off the routes
for i, r := range s.routes {
r.setRouteNoReconnectOnClose()
conns[i] = r
}
// Number of done channel responses we expect.
doneExpected := 0
// Kick client AcceptLoop()
if s.listener != nil {
doneExpected++
s.listener.Close()
s.listener = nil
}
// Kick route AcceptLoop()
if s.routeListener != nil {
doneExpected++
s.routeListener.Close()
s.routeListener = nil
}
// Kick HTTP monitoring if its running
if s.http != nil {
doneExpected++
s.http.Close()
s.http = nil
}
// Kick Profiling if its running
if s.profiler != nil {
doneExpected++
s.profiler.Close()
}
// Release the solicited routes connect go routines.
close(s.rcQuit)
s.mu.Unlock()
// Close client and route connections
for _, c := range conns {
c.closeConnection()
}
// Block until the accept loops exit
for doneExpected > 0 {
<-s.done
doneExpected--
}
// Wait for go routines to be done.
s.grWG.Wait()
}
// AcceptLoop is exported for easier testing.
func (s *Server) AcceptLoop(clr chan struct{}) {
// If we were to exit before the listener is setup properly,
// make sure we close the channel.
defer func() {
if clr != nil {
close(clr)
}
}()
// Snapshot server options.
opts := s.getOpts()
hp := net.JoinHostPort(opts.Host, strconv.Itoa(opts.Port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on port: %s, %q", hp, e)
return
}
s.Noticef("Listening for client connections on %s",
net.JoinHostPort(opts.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
// Alert of TLS enabled.
if opts.TLSConfig != nil {
s.Noticef("TLS required for client connections")
}
s.Debugf("Server id is %s", s.info.ID)
s.Noticef("Server is ready")
// Setup state that can enable shutdown
s.mu.Lock()
s.listener = l
// If server was started with RANDOM_PORT (-1), opts.Port would be equal
// to 0 at the beginning this function. So we need to get the actual port
if opts.Port == 0 {
// Write resolved port back to options.
_, port, err := net.SplitHostPort(l.Addr().String())
if err != nil {
s.Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
s.mu.Unlock()
return
}
portNum, err := strconv.Atoi(port)
if err != nil {
s.Fatalf("Error parsing server address (%s): %s", l.Addr().String(), e)
s.mu.Unlock()
return
}
opts.Port = portNum
}
s.mu.Unlock()
// Let the caller know that we are ready
close(clr)
clr = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
s.Errorf("Temporary Client Accept Error (%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else if s.isRunning() {
s.Errorf("Client Accept Error: %v", err)
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createClient(conn)
s.grWG.Done()
})
}
s.Noticef("Server Exiting..")
s.done <- true
}
// StartProfiler is called to enable dynamic profiling.
func (s *Server) StartProfiler() {
// Snapshot server options.
opts := s.getOpts()
port := opts.ProfPort
// Check for Random Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.Host, strconv.Itoa(port))
l, err := net.Listen("tcp", hp)
s.Noticef("profiling port: %d", l.Addr().(*net.TCPAddr).Port)
if err != nil {
s.Fatalf("error starting profiler: %s", err)
}
srv := &http.Server{
Addr: hp,
Handler: http.DefaultServeMux,
MaxHeaderBytes: 1 << 20,
}
s.mu.Lock()
s.profiler = l
s.profilingServer = srv
s.mu.Unlock()
go func() {
// if this errors out, it's probably because the server is being shutdown
err := srv.Serve(l)
if err != nil {
s.mu.Lock()
shutdown := s.shutdown
s.mu.Unlock()
if !shutdown {
s.Fatalf("error starting profiler: %s", err)
}
}
s.done <- true
}()
}
// StartHTTPMonitoring will enable the HTTP monitoring port.
// DEPRECATED: Should use StartMonitoring.
func (s *Server) StartHTTPMonitoring() {
s.startMonitoring(false)
}
// StartHTTPSMonitoring will enable the HTTPS monitoring port.
// DEPRECATED: Should use StartMonitoring.
func (s *Server) StartHTTPSMonitoring() {
s.startMonitoring(true)
}
// StartMonitoring starts the HTTP or HTTPs server if needed.
func (s *Server) StartMonitoring() error {
// Snapshot server options.
opts := s.getOpts()
// Specifying both HTTP and HTTPS ports is a misconfiguration
if opts.HTTPPort != 0 && opts.HTTPSPort != 0 {
return fmt.Errorf("can't specify both HTTP (%v) and HTTPs (%v) ports", opts.HTTPPort, opts.HTTPSPort)
}
var err error
if opts.HTTPPort != 0 {
err = s.startMonitoring(false)
} else if opts.HTTPSPort != 0 {
if opts.TLSConfig == nil {
return fmt.Errorf("TLS cert and key required for HTTPS")
}
err = s.startMonitoring(true)
}
return err
}
// HTTP endpoints
const (
RootPath = "/"
VarzPath = "/varz"
ConnzPath = "/connz"
RoutezPath = "/routez"
SubszPath = "/subsz"
StackszPath = "/stacksz"
)
// Start the monitoring server
func (s *Server) startMonitoring(secure bool) error {
// Snapshot server options.
opts := s.getOpts()
// Used to track HTTP requests
s.httpReqStats = map[string]uint64{
RootPath: 0,
VarzPath: 0,
ConnzPath: 0,
RoutezPath: 0,
SubszPath: 0,
}
var (
hp string
err error
httpListener net.Listener
port int
)
monitorProtocol := "http"
if secure {
monitorProtocol += "s"
port = opts.HTTPSPort
if port == -1 {
port = 0
}
hp = net.JoinHostPort(opts.HTTPHost, strconv.Itoa(port))
config := util.CloneTLSConfig(opts.TLSConfig)
config.ClientAuth = tls.NoClientCert
httpListener, err = tls.Listen("tcp", hp, config)
} else {
port = opts.HTTPPort
if port == -1 {
port = 0
}
hp = net.JoinHostPort(opts.HTTPHost, strconv.Itoa(port))
httpListener, err = net.Listen("tcp", hp)
}
if err != nil {
return fmt.Errorf("can't listen to the monitor port: %v", err)
}
s.Noticef("Starting %s monitor on %s", monitorProtocol,
net.JoinHostPort(opts.HTTPHost, strconv.Itoa(httpListener.Addr().(*net.TCPAddr).Port)))
mux := http.NewServeMux()
// Root
mux.HandleFunc(RootPath, s.HandleRoot)
// Varz
mux.HandleFunc(VarzPath, s.HandleVarz)
// Connz
mux.HandleFunc(ConnzPath, s.HandleConnz)
// Routez
mux.HandleFunc(RoutezPath, s.HandleRoutez)
// Subz
mux.HandleFunc(SubszPath, s.HandleSubsz)
// Subz alias for backwards compatibility
mux.HandleFunc("/subscriptionsz", s.HandleSubsz)
// Stacksz
mux.HandleFunc(StackszPath, s.HandleStacksz)
// Do not set a WriteTimeout because it could cause cURL/browser
// to return empty response or unable to display page if the
// server needs more time to build the response.
srv := &http.Server{
Addr: hp,
Handler: mux,
MaxHeaderBytes: 1 << 20,
}
s.mu.Lock()
s.http = httpListener
s.httpHandler = mux
s.monitoringServer = srv
s.mu.Unlock()
go func() {
srv.Serve(httpListener)
srv.Handler = nil
s.mu.Lock()
s.httpHandler = nil
s.mu.Unlock()
s.done <- true
}()
return nil
}
// HTTPHandler returns the http.Handler object used to handle monitoring
// endpoints. It will return nil if the server is not configured for
// monitoring, or if the server has not been started yet (Server.Start()).
func (s *Server) HTTPHandler() http.Handler {
s.mu.Lock()
defer s.mu.Unlock()
return s.httpHandler
}
func (s *Server) createClient(conn net.Conn) *client {
// Snapshot server options.
opts := s.getOpts()
c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: int64(opts.MaxPayload), start: time.Now()}
// Grab JSON info string
s.mu.Lock()
info := s.infoJSON
authRequired := s.info.AuthRequired
tlsRequired := s.info.TLSRequired
s.totalClients++
s.mu.Unlock()
// Grab lock
c.mu.Lock()
// Initialize
c.initClient()
c.Debugf("Client connection created")
// Send our information.
c.sendInfo(info)
// Unlock to register
c.mu.Unlock()
// Register with the server.
s.mu.Lock()
// If server is not running, Shutdown() may have already gathered the
// list of connections to close. It won't contain this one, so we need
// to bail out now otherwise the readLoop started down there would not
// be interrupted.
if !s.running {
s.mu.Unlock()
return c
}
// If there is a max connections specified, check that adding
// this new client would not push us over the max
if opts.MaxConn > 0 && len(s.clients) >= opts.MaxConn {
s.mu.Unlock()
c.maxConnExceeded()
return nil
}
s.clients[c.cid] = c
s.mu.Unlock()
// Re-Grab lock
c.mu.Lock()
// Check for TLS
if tlsRequired {
c.Debugf("Starting TLS client connection handshake")
c.nc = tls.Server(c.nc, opts.TLSConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
// Force handshake
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Debugf("TLS handshake error: %v", err)
c.sendErr("Secure Connection - TLS Required")
c.closeConnection()
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Indicate that handshake is complete (used in monitoring)
c.flags.set(handshakeComplete)
}
// The connection may have been closed
if c.nc == nil {
c.mu.Unlock()
return c
}
// Check for Auth. We schedule this timer after the TLS handshake to avoid
// the race where the timer fires during the handshake and causes the
// server to write bad data to the socket. See issue #432.
if authRequired {
c.setAuthTimer(secondsToDuration(opts.AuthTimeout))
}
if tlsRequired {
// Rewrap bw
c.bw = bufio.NewWriterSize(c.nc, startBufSize)
}
// Do final client initialization
// Set the Ping timer
c.setPingTimer()
// Spin up the read loop.
s.startGoRoutine(func() { c.readLoop() })
if tlsRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
c.mu.Unlock()
return c
}
// updateServerINFO updates the server's Info object with the given
// array of URLs and re-generate the infoJSON byte array, only if the
// given URLs were not already recorded.
func (s *Server) updateServerINFO(urls []string) {
s.mu.Lock()
defer s.mu.Unlock()
// Will be set to true if we alter the server's Info object.
wasUpdated := false
for _, url := range urls {
if _, present := s.info.clientConnectURLs[url]; !present {
s.info.clientConnectURLs[url] = struct{}{}
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, url)
wasUpdated = true
}
}
if wasUpdated {
s.generateServerInfoJSON()
}
}
// Handle closing down a connection when the handshake has timedout.
func tlsTimeout(c *client, conn *tls.Conn) {
c.mu.Lock()
nc := c.nc
c.mu.Unlock()
// Check if already closed
if nc == nil {
return
}
cs := conn.ConnectionState()
if !cs.HandshakeComplete {
c.Debugf("TLS handshake timeout")
c.sendErr("Secure Connection - TLS Required")
c.closeConnection()
}
}
// Seems silly we have to write these
func tlsVersion(ver uint16) string {
switch ver {
case tls.VersionTLS10:
return "1.0"
case tls.VersionTLS11:
return "1.1"
case tls.VersionTLS12:
return "1.2"
}
return fmt.Sprintf("Unknown [%x]", ver)
}
// We use hex here so we don't need multiple versions
func tlsCipher(cs uint16) string {
name, present := cipherMapByID[cs]
if present {
return name
}
return fmt.Sprintf("Unknown [%x]", cs)
}
// Remove a client or route from our internal accounting.
func (s *Server) removeClient(c *client) {
var rID string
c.mu.Lock()
cid := c.cid
typ := c.typ
r := c.route
if r != nil {
rID = r.remoteID
}
updateProtoInfoCount := false
if typ == CLIENT && c.opts.Protocol >= ClientProtoInfo {
updateProtoInfoCount = true
}
c.mu.Unlock()
s.mu.Lock()
switch typ {
case CLIENT:
delete(s.clients, cid)
if updateProtoInfoCount {
s.cproto--
}
case ROUTER:
delete(s.routes, cid)
if r != nil {
rc, ok := s.remotes[rID]
// Only delete it if it is us..
if ok && c == rc {
delete(s.remotes, rID)
}
}
}
s.mu.Unlock()
}
/////////////////////////////////////////////////////////////////
// These are some helpers for accounting in functional tests.
/////////////////////////////////////////////////////////////////
// NumRoutes will report the number of registered routes.
func (s *Server) NumRoutes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.routes)
}
// NumRemotes will report number of registered remotes.
func (s *Server) NumRemotes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.remotes)
}
// NumClients will report the number of registered clients.
func (s *Server) NumClients() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.clients)
}
// NumSubscriptions will report how many subscriptions are active.
func (s *Server) NumSubscriptions() uint32 {
s.mu.Lock()
subs := s.sl.Count()
s.mu.Unlock()
return subs
}
// ConfigTime will report the last time the server configuration was loaded.
func (s *Server) ConfigTime() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.configTime
}
// Addr will return the net.Addr object for the current listener.
func (s *Server) Addr() net.Addr {
s.mu.Lock()
defer s.mu.Unlock()
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// MonitorAddr will return the net.Addr object for the monitoring listener.
func (s *Server) MonitorAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.http == nil {
return nil
}
return s.http.Addr().(*net.TCPAddr)
}
// ClusterAddr returns the net.Addr object for the route listener.
func (s *Server) ClusterAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.routeListener == nil {
return nil
}
return s.routeListener.Addr().(*net.TCPAddr)
}
// ProfilerAddr returns the net.Addr object for the route listener.
func (s *Server) ProfilerAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.profiler == nil {
return nil
}
return s.profiler.Addr().(*net.TCPAddr)
}
// ReadyForConnections returns `true` if the server is ready to accept client
// and, if routing is enabled, route connections. If after the duration
// `dur` the server is still not ready, returns `false`.
func (s *Server) ReadyForConnections(dur time.Duration) bool {
// Snapshot server options.
opts := s.getOpts()
end := time.Now().Add(dur)
for time.Now().Before(end) {
s.mu.Lock()
ok := s.listener != nil && (opts.Cluster.Port == 0 || s.routeListener != nil)
s.mu.Unlock()
if ok {
return true
}
time.Sleep(25 * time.Millisecond)
}
return false
}
// ID returns the server's ID
func (s *Server) ID() string {
s.mu.Lock()
defer s.mu.Unlock()
return s.info.ID
}
func (s *Server) startGoRoutine(f func()) {
s.grMu.Lock()
if s.grRunning {
s.grWG.Add(1)
go f()
}
s.grMu.Unlock()
}
// getClientConnectURLs returns suitable URLs for clients to connect to the listen
// port based on the server options' Host and Port. If the Host corresponds to
// "any" interfaces, this call returns the list of resolved IP addresses.
func (s *Server) getClientConnectURLs() []string {
// Snapshot server options.
opts := s.getOpts()
s.mu.Lock()
defer s.mu.Unlock()
sPort := strconv.Itoa(opts.Port)
urls := make([]string, 0, 1)
ipAddr, err := net.ResolveIPAddr("ip", opts.Host)
// If the host is "any" (0.0.0.0 or ::), get specific IPs from available
// interfaces.
if err == nil && ipAddr.IP.IsUnspecified() {
var ip net.IP
ifaces, _ := net.Interfaces()
for _, i := range ifaces {
addrs, _ := i.Addrs()
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
// Skip non global unicast addresses
if !ip.IsGlobalUnicast() || ip.IsUnspecified() {
ip = nil
continue
}
urls = append(urls, net.JoinHostPort(ip.String(), sPort))
}
}
}
if err != nil || len(urls) == 0 {
// We are here if s.opts.Host is not "0.0.0.0" nor "::", or if for some
// reason we could not add any URL in the loop above.
// We had a case where a Windows VM was hosed and would have err == nil
// and not add any address in the array in the loop above, and we
// ended-up returning 0.0.0.0, which is problematic for Windows clients.
// Check for 0.0.0.0 or :: specifically, and ignore if that's the case.
if opts.Host == "0.0.0.0" || opts.Host == "::" {
s.Errorf("Address %q can not be resolved properly", opts.Host)
} else {
urls = append(urls, net.JoinHostPort(opts.Host, sPort))
}
}
return urls
}
| 1 | 7,444 | nit: how about `GitSHA`? | nats-io-nats-server | go |
@@ -93,6 +93,7 @@ an example.
*/
#define MAX_REMOTE_JOBS_DEFAULT 100
+#define FAIL_DIR "makeflow.failed.%d"
static sig_atomic_t makeflow_abort_flag = 0;
static int makeflow_failed_flag = 0; | 1 | /*
Copyright (C) 2008- The University of Notre Dame
This software is distributed under the GNU General Public License.
See the file COPYING for details.
*/
#include "auth_all.h"
#include "auth_ticket.h"
#include "batch_job.h"
#include "cctools.h"
#include "copy_stream.h"
#include "create_dir.h"
#include "debug.h"
#include "getopt_aux.h"
#include "hash_table.h"
#include "int_sizes.h"
#include "itable.h"
#include "link.h"
#include "list.h"
#include "load_average.h"
#include "macros.h"
#include "path.h"
#include "random.h"
#include "rmonitor.h"
#include "stringtools.h"
#include "work_queue.h"
#include "work_queue_catalog.h"
#include "xxmalloc.h"
#include "jx.h"
#include "jx_print.h"
#include "jx_parse.h"
#include "jx_eval.h"
#include "create_dir.h"
#include "sha1.h"
#include "dag.h"
#include "dag_visitors.h"
#include "parser.h"
#include "parser_jx.h"
#include "makeflow_summary.h"
#include "makeflow_gc.h"
#include "makeflow_log.h"
#include "makeflow_wrapper.h"
#include "makeflow_wrapper_docker.h"
#include "makeflow_wrapper_monitor.h"
#include "makeflow_wrapper_umbrella.h"
#include "makeflow_mounts.h"
#include "makeflow_wrapper_enforcement.h"
#include "makeflow_wrapper_singularity.h"
#include "makeflow_archive.h"
#include "makeflow_catalog_reporter.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <libgen.h>
#include <assert.h>
#include <unistd.h>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
/*
Code organization notes:
- The modules dag/dag_node/dag_file etc contain the data structures that
represent the dag structure by itself. Functions named dag_*() create
and manipulate those data structures, but do not execute the dag itself.
These are shared between makeflow and other tools that read and manipulate
the dag, like makeflow_viz, makeflow_linker, and so forth.
- The modules makeflow/makeflow_log/makeflow_gc etc contain the functions
that execute the dag by invoking batch operations, processing the log, etc.
These are all functions named makeflow_*() to distinguish them from dag_*().
- The separation between dag structure and execution state is imperfect,
because some of the execution state (note states, node counts, etc)
is stored in struct dag and struct dag_node. Perhaps this can be improved.
- All operations on files should use the batch_fs_*() functions, rather
than invoking Unix I/O directly. This is because some batch systems
(Hadoop, Confuga, etc) also include the storage where the files to be
accessed are located.
- APIs like work_queue_* should be indirectly accessed by setting options
in Batch Job using batch_queue_set_option. See batch_job_work_queue.c for
an example.
*/
#define MAX_REMOTE_JOBS_DEFAULT 100
static sig_atomic_t makeflow_abort_flag = 0;
static int makeflow_failed_flag = 0;
static int makeflow_submit_timeout = 3600;
static int makeflow_retry_flag = 0;
static int makeflow_retry_max = 5;
/* makeflow_gc_method indicates the type of garbage collection
* indicated by the user. Refer to makeflow_gc.h for specifics */
static makeflow_gc_method_t makeflow_gc_method = MAKEFLOW_GC_NONE;
/* Disk size at which point GC is run */
static uint64_t makeflow_gc_size = 0;
/* # of files after which GC is run */
static int makeflow_gc_count = -1;
/* Iterations of wait loop prior ot GC check */
static int makeflow_gc_barrier = 1;
/* Determines next gc_barrier to make checks less frequent with large number of tasks */
static double makeflow_gc_task_ratio = 0.05;
static batch_queue_type_t batch_queue_type = BATCH_QUEUE_TYPE_LOCAL;
static struct batch_queue *local_queue = 0;
static struct batch_queue *remote_queue = 0;
static int local_jobs_max = 1;
static int remote_jobs_max = MAX_REMOTE_JOBS_DEFAULT;
static char *project = NULL;
static int port = 0;
static int output_len_check = 0;
static int skip_file_check = 0;
static int cache_mode = 1;
static container_mode_t container_mode = CONTAINER_MODE_NONE;
static char *container_image = NULL;
static char *container_image_tar = NULL;
static char *parrot_path = "./parrot_run";
/*
Wait upto this many seconds for an output file of a succesfull task
to appear on the local filesystem (e.g, to deal with NFS
semantics.
*/
static int file_creation_patience_wait_time = 0;
/*
Write a verbose transaction log with SYMBOL tags.
SYMBOLs are category labels (SYMBOLs should be deprecated
once weaver/pbui tools are updated.)
*/
static int log_verbose_mode = 0;
static struct makeflow_wrapper *wrapper = 0;
static struct makeflow_monitor *monitor = 0;
static struct makeflow_wrapper *enforcer = 0;
static struct makeflow_wrapper_umbrella *umbrella = 0;
static int catalog_reporting_on = 0;
static char *mountfile = NULL;
static char *mount_cache = NULL;
static int use_mountfile = 0;
static struct list *shared_fs_list = NULL;
static int did_find_archived_job = 0;
/*
Generates file list for node based on node files, wrapper
input files, and monitor input files. Relies on %% nodeid
replacement for monitor file names.
*/
static struct list *makeflow_generate_input_files( struct dag_node *n )
{
struct list *result = list_duplicate(n->source_files);
if(wrapper) result = makeflow_wrapper_generate_files(result, wrapper->input_files, n, wrapper);
if(enforcer) result = makeflow_wrapper_generate_files(result, enforcer->input_files, n, enforcer);
if(umbrella) result = makeflow_wrapper_generate_files(result, umbrella->wrapper->input_files, n, umbrella->wrapper);
if(monitor) result = makeflow_wrapper_generate_files(result, monitor->wrapper->input_files, n, monitor->wrapper);
return result;
}
static struct list *makeflow_generate_output_files( struct dag_node *n )
{
struct list *result = list_duplicate(n->target_files);
if(wrapper) result = makeflow_wrapper_generate_files(result, wrapper->output_files, n, wrapper);
if(enforcer) result = makeflow_wrapper_generate_files(result, enforcer->output_files, n, enforcer);
if(umbrella) result = makeflow_wrapper_generate_files(result, umbrella->wrapper->output_files, n, umbrella->wrapper);
if(monitor) result = makeflow_wrapper_generate_files(result, monitor->wrapper->output_files, n, monitor->wrapper);
return result;
}
/*
Abort one job in a given batch queue.
*/
static void makeflow_abort_job( struct dag *d, struct dag_node *n, struct batch_queue *q, UINT64_T jobid, const char *name )
{
printf("aborting %s job %" PRIu64 "\n", name, jobid);
batch_job_remove(q, jobid);
makeflow_log_state_change(d, n, DAG_NODE_STATE_ABORTED);
struct list *outputs = makeflow_generate_output_files(n);
struct dag_file *f;
list_first_item(outputs);
while((f = list_next_item(outputs)))
makeflow_clean_file(d, q, f, 0);
makeflow_clean_node(d, q, n, 1);
}
/*
Abort the dag by removing all batch jobs from all queues.
*/
static void makeflow_abort_all(struct dag *d)
{
UINT64_T jobid;
struct dag_node *n;
printf("got abort signal...\n");
itable_firstkey(d->local_job_table);
while(itable_nextkey(d->local_job_table, &jobid, (void **) &n)) {
makeflow_abort_job(d,n,local_queue,jobid,"local");
}
itable_firstkey(d->remote_job_table);
while(itable_nextkey(d->remote_job_table, &jobid, (void **) &n)) {
makeflow_abort_job(d,n,remote_queue,jobid,"remote");
}
}
static void makeflow_node_force_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n);
/*
Decide whether to rerun a node based on batch and file system status. The silent
option was added for to prevent confusing debug output when in clean mode. When
clean_mode is not NONE we silence the node reseting output.
*/
void makeflow_node_decide_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n, int silent)
{
struct dag_file *f;
if(itable_lookup(rerun_table, n->nodeid))
return;
// Below are a bunch of situations when a node has to be rerun.
// If a job was submitted to Condor, then just reconnect to it.
if(n->state == DAG_NODE_STATE_RUNNING && !(n->local_job && local_queue) && batch_queue_type == BATCH_QUEUE_TYPE_CONDOR) {
// Reconnect the Condor jobs
if(!silent) fprintf(stderr, "rule still running: %s\n", n->command);
itable_insert(d->remote_job_table, n->jobid, n);
// Otherwise, we cannot reconnect to the job, so rerun it
} else if(n->state == DAG_NODE_STATE_RUNNING || n->state == DAG_NODE_STATE_FAILED || n->state == DAG_NODE_STATE_ABORTED) {
if(!silent) fprintf(stderr, "will retry failed rule: %s\n", n->command);
goto rerun;
}
// Rerun if an input file has been updated since the last execution.
list_first_item(n->source_files);
while((f = list_next_item(n->source_files))) {
if(dag_file_should_exist(f)) {
continue;
} else {
if(!f->created_by) {
if(!silent) fprintf(stderr, "makeflow: input file %s does not exist and is not created by any rule.\n", f->filename);
exit(1);
} else {
/* If input file is missing, but node completed and file was garbage, then avoid rerunning. */
if(n->state == DAG_NODE_STATE_COMPLETE && f->state == DAG_FILE_STATE_DELETE) {
continue;
}
goto rerun;
}
}
}
// Rerun if an output file is missing.
list_first_item(n->target_files);
while((f = list_next_item(n->target_files))) {
if(dag_file_should_exist(f))
continue;
/* If output file is missing, but node completed and file was gc'ed, then avoid rerunning. */
if(n->state == DAG_NODE_STATE_COMPLETE && f->state == DAG_FILE_STATE_DELETE)
continue;
goto rerun;
}
// Do not rerun this node
return;
rerun:
makeflow_node_force_rerun(rerun_table, d, n);
}
/*
Reset all state to cause a node to be re-run.
*/
void makeflow_node_force_rerun(struct itable *rerun_table, struct dag *d, struct dag_node *n)
{
struct dag_node *p;
struct dag_file *f1;
struct dag_file *f2;
int child_node_found;
if(itable_lookup(rerun_table, n->nodeid))
return;
// Mark this node as having been rerun already
itable_insert(rerun_table, n->nodeid, n);
// Remove running batch jobs
if(n->state == DAG_NODE_STATE_RUNNING) {
if(n->local_job && local_queue) {
batch_job_remove(local_queue, n->jobid);
itable_remove(d->local_job_table, n->jobid);
} else {
batch_job_remove(remote_queue, n->jobid);
itable_remove(d->remote_job_table, n->jobid);
}
}
// Clean up things associated with this node
struct list *outputs = makeflow_generate_output_files(n);
list_first_item(outputs);
while((f1 = list_next_item(outputs)))
makeflow_clean_file(d, remote_queue, f1, 0);
makeflow_clean_node(d, remote_queue, n, 0);
makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING);
// For each parent node, rerun it if input file was garbage collected
list_first_item(n->source_files);
while((f1 = list_next_item(n->source_files))) {
if(dag_file_should_exist(f1))
continue;
p = f1->created_by;
if(p) {
makeflow_node_force_rerun(rerun_table, d, p);
f1->reference_count += 1;
}
}
// For each child node, rerun it
list_first_item(n->target_files);
while((f1 = list_next_item(n->target_files))) {
for(p = d->nodes; p; p = p->next) {
child_node_found = 0;
list_first_item(p->source_files);
while((f2 = list_next_item(n->source_files))) {
if(!strcmp(f1->filename, f2->filename)) {
child_node_found = 1;
break;
}
}
if(child_node_found) {
makeflow_node_force_rerun(rerun_table, d, p);
}
}
}
}
/*
Update nested jobs with appropriate number of local jobs
(total local jobs max / maximum number of concurrent nests).
*/
static void makeflow_prepare_nested_jobs(struct dag *d)
{
int dag_nested_width = dag_width(d, 1);
int update_dag_nests = 1;
char *s = getenv("MAKEFLOW_UPDATE_NESTED_JOBS");
if(s)
update_dag_nests = atoi(s);
if(dag_nested_width > 0 && update_dag_nests) {
dag_nested_width = MIN(dag_nested_width, local_jobs_max);
struct dag_node *n;
for(n = d->nodes; n; n = n->next) {
if(n->nested_job && ((n->local_job && local_queue) || batch_queue_type == BATCH_QUEUE_TYPE_LOCAL)) {
char *command = xxmalloc(strlen(n->command) + 20);
sprintf(command, "%s -j %d", n->command, local_jobs_max / dag_nested_width);
free((char *) n->command);
n->command = command;
}
}
}
}
/*
Match a filename (/home/fred) to a path stem (/home).
Returns 0 on match, non-zero otherwise.
*/
static int prefix_match(void *stem, const void *filename) {
assert(stem);
assert(filename);
return strncmp(stem, filename, strlen(stem));
}
/*
Returns true if the given filename is located in
a shared filesystem, as given by the shared_fs_list.
*/
static int makeflow_file_on_sharedfs( const char *filename )
{
return !list_iterate(shared_fs_list,prefix_match,filename);
}
/*
Given a file, return the string that identifies it appropriately
for the given batch system, combining the local and remote name
and making substitutions according to the node.
*/
static char * makeflow_file_format( struct dag_node *n, struct dag_file *f, struct batch_queue *queue )
{
const char *remotename = dag_node_get_remote_name(n, f->filename);
if(!remotename && wrapper) remotename = makeflow_wrapper_get_remote_name(wrapper, n->d, f->filename);
if(!remotename && enforcer) remotename = makeflow_wrapper_get_remote_name(enforcer, n->d, f->filename);
if(!remotename && monitor) remotename = makeflow_wrapper_get_remote_name(monitor->wrapper, n->d, f->filename);
if(!remotename && umbrella) remotename = makeflow_wrapper_get_remote_name(umbrella->wrapper, n->d, f->filename);
if(!remotename) remotename = f->filename;
switch (batch_queue_get_type(queue)) {
case BATCH_QUEUE_TYPE_WORK_QUEUE:
return string_format("%s=%s,", f->filename, remotename);
default:
return string_format("%s,", f->filename);
}
}
/*
Given a list of files, add the files to the given string.
Returns the original string, realloced if necessary
*/
static char * makeflow_file_list_format( struct dag_node *node, char *file_str, struct list *file_list, struct batch_queue *queue )
{
struct dag_file *file;
if(!file_str) file_str = strdup("");
if(!file_list) return file_str;
list_first_item(file_list);
while((file=list_next_item(file_list))) {
if (makeflow_file_on_sharedfs(file->filename)) {
debug(D_MAKEFLOW_RUN, "Skipping file %s on shared fs\n",
file->filename);
continue;
}
char *f = makeflow_file_format(node,file,queue);
file_str = string_combine(file_str,f);
free(f);
}
return file_str;
}
/*
Submit one fully formed job, retrying failures up to the makeflow_submit_timeout.
This is necessary because busy batch systems occasionally do not accept a job submission.
*/
static batch_job_id_t makeflow_node_submit_retry( struct batch_queue *queue, const char *command, const char *input_files, const char *output_files, struct jx *envlist, const struct rmsummary *resources)
{
time_t stoptime = time(0) + makeflow_submit_timeout;
int waittime = 1;
batch_job_id_t jobid = 0;
/* Display the fully elaborated command, just like Make does. */
printf("submitting job: %s\n", command);
while(1) {
jobid = batch_job_submit(queue, command, input_files, output_files, envlist, resources);
if(jobid >= 0) {
printf("submitted job %"PRIbjid"\n", jobid);
return jobid;
}
fprintf(stderr, "couldn't submit batch job, still trying...\n");
if(makeflow_abort_flag) break;
if(time(0) > stoptime) {
fprintf(stderr, "unable to submit job after %d seconds!\n", makeflow_submit_timeout);
break;
}
sleep(waittime);
waittime *= 2;
if(waittime > 60) waittime = 60;
}
return 0;
}
/*
Expand a dag_node into a text list of input files,
output files, and a command, by applying all wrappers
and settings. Used at both job submission and completion
to obtain identical strings.
*/
static void makeflow_node_expand( struct dag_node *n, struct batch_queue *queue, struct list **input_list, struct list **output_list, char **input_files, char **output_files, char **command )
{
makeflow_wrapper_umbrella_set_input_files(umbrella, queue, n);
if (*input_list == NULL) {
*input_list = makeflow_generate_input_files(n);
}
if (*output_list == NULL) {
*output_list = makeflow_generate_output_files(n);
}
/* Create strings for all the files mentioned by this node. */
*input_files = makeflow_file_list_format(n, 0, *input_list, queue);
*output_files = makeflow_file_list_format(n, 0, *output_list, queue);
/* Expand the command according to each of the wrappers */
*command = strdup(n->command);
*command = makeflow_wrap_wrapper(*command, n, wrapper);
*command = makeflow_wrap_enforcer(*command, n, enforcer, *input_list, *output_list);
*command = makeflow_wrap_umbrella(*command, n, umbrella, queue, *input_files, *output_files);
*command = makeflow_wrap_monitor(*command, n, queue, monitor);
}
/*
Submit a node to the appropriate batch system, after materializing
the necessary list of input and output files, and applying all
wrappers and options.
*/
static void makeflow_node_submit(struct dag *d, struct dag_node *n)
{
struct batch_queue *queue;
struct dag_file *f;
struct list *input_list = NULL, *output_list = NULL;
char *input_files = NULL, *output_files = NULL, *command = NULL;
if(n->local_job && local_queue) {
queue = local_queue;
} else {
queue = remote_queue;
}
makeflow_node_expand(n, queue, &input_list, &output_list, &input_files, &output_files, &command);
/* Before setting the batch job options (stored in the "BATCH_OPTIONS"
* variable), we must save the previous global queue value, and then
* restore it after we submit. */
struct dag_variable_lookup_set s = { d, n->category, n, NULL };
char *batch_options = dag_variable_lookup_string("BATCH_OPTIONS", &s);
char *previous_batch_options = NULL;
if(batch_queue_get_option(queue, "batch-options"))
previous_batch_options = xxstrdup(batch_queue_get_option(queue, "batch-options"));
if(batch_options) {
debug(D_MAKEFLOW_RUN, "Batch options: %s\n", batch_options);
batch_queue_set_option(queue, "batch-options", batch_options);
free(batch_options);
}
batch_queue_set_int_option(queue, "task-id", n->nodeid);
/* Generate the environment vars specific to this node. */
struct jx *envlist = dag_node_env_create(d,n);
/* Logs the creation of output files. */
makeflow_log_file_list_state_change(d,output_list,DAG_FILE_STATE_EXPECT);
/* check archiving directory to see if node has already been preserved */
if (d->should_read_archive && makeflow_archive_is_preserved(d, n, command, input_list, output_list)) {
printf("node %d already exists in archive, replicating output files\n", n->nodeid);
/* copy archived files to working directory and update state for node and dag_files */
makeflow_archive_copy_preserved_files(d, n, output_list);
n->state = DAG_NODE_STATE_RUNNING;
list_first_item(n->target_files);
while((f = list_next_item(n->target_files))) {
makeflow_log_file_state_change(d, f, DAG_FILE_STATE_EXISTS);
}
makeflow_log_state_change(d, n, DAG_NODE_STATE_COMPLETE);
did_find_archived_job = 1;
} else {
/* Now submit the actual job, retrying failures as needed. */
n->jobid = makeflow_node_submit_retry(queue,command,input_files,output_files,envlist, dag_node_dynamic_label(n));
/* Update all of the necessary data structures. */
if(n->jobid >= 0) {
makeflow_log_state_change(d, n, DAG_NODE_STATE_RUNNING);
if(n->local_job && local_queue) {
itable_insert(d->local_job_table, n->jobid, n);
} else {
itable_insert(d->remote_job_table, n->jobid, n);
}
} else {
makeflow_log_state_change(d, n, DAG_NODE_STATE_FAILED);
makeflow_failed_flag = 1;
}
}
/* Restore old batch job options. */
if(previous_batch_options) {
batch_queue_set_option(queue, "batch-options", previous_batch_options);
free(previous_batch_options);
}
free(command);
list_delete(input_list);
list_delete(output_list);
free(input_files);
free(output_files);
jx_delete(envlist);
}
static int makeflow_node_ready(struct dag *d, struct dag_node *n)
{
struct dag_file *f;
if(n->state != DAG_NODE_STATE_WAITING)
return 0;
if(n->local_job && local_queue) {
if(dag_local_jobs_running(d) >= local_jobs_max)
return 0;
} else {
if(dag_remote_jobs_running(d) >= remote_jobs_max)
return 0;
}
list_first_item(n->source_files);
while((f = list_next_item(n->source_files))) {
if(dag_file_should_exist(f)) {
continue;
} else {
return 0;
}
}
return 1;
}
/*
Find all jobs ready to be run, then submit them.
*/
static void makeflow_dispatch_ready_jobs(struct dag *d)
{
struct dag_node *n;
for(n = d->nodes; n; n = n->next) {
if(dag_remote_jobs_running(d) >= remote_jobs_max && dag_local_jobs_running(d) >= local_jobs_max) {
break;
}
if(makeflow_node_ready(d, n)) {
makeflow_node_submit(d, n);
}
}
}
/*
Check the the indicated file was created and log, error, or retry as appropriate.
*/
int makeflow_node_check_file_was_created(struct dag_node *n, struct dag_file *f)
{
struct stat buf;
int file_created = 0;
int64_t start_check = time(0);
while(!file_created) {
if(batch_fs_stat(remote_queue, f->filename, &buf) < 0) {
fprintf(stderr, "%s did not create file %s\n", n->command, f->filename);
}
else if(output_len_check && buf.st_size <= 0) {
debug(D_MAKEFLOW_RUN, "%s created a file of length %ld\n", n->command, (long) buf.st_size);
}
else {
/* File was created and has length larger than zero. */
debug(D_MAKEFLOW_RUN, "File %s created by rule %d.\n", f->filename, n->nodeid);
f->actual_size = buf.st_size;
makeflow_log_file_state_change(n->d, f, DAG_FILE_STATE_EXISTS);
file_created = 1;
break;
}
if(file_creation_patience_wait_time > 0 && time(0) - start_check < file_creation_patience_wait_time) {
/* Failed to see the file. Sleep and try again. */
debug(D_MAKEFLOW_RUN, "Checking again for file %s.\n", f->filename);
sleep(1);
} else {
/* Failed was not seen by makeflow in the aloted tries. */
debug(D_MAKEFLOW_RUN, "File %s was not created by rule %d.\n", f->filename, n->nodeid);
file_created = 0;
break;
}
}
return file_created;
}
/*
Mark the given task as completing, using the batch_job_info completion structure provided by batch_job.
*/
static void makeflow_node_complete(struct dag *d, struct dag_node *n, struct batch_queue *queue, struct batch_job_info *info)
{
struct dag_file *f;
int job_failed = 0;
int monitor_retried = 0;
if(n->state != DAG_NODE_STATE_RUNNING)
return;
if(monitor) {
char *nodeid = string_format("%d",n->nodeid);
char *output_prefix = NULL;
if(batch_queue_supports_feature(queue, "output_directories") || n->local_job) {
output_prefix = xxstrdup(monitor->log_prefix);
} else {
output_prefix = xxstrdup(path_basename(monitor->log_prefix));
}
char *log_name_prefix = string_replace_percents(output_prefix, nodeid);
char *summary_name = string_format("%s.summary", log_name_prefix);
if(n->resources_measured)
rmsummary_delete(n->resources_measured);
n->resources_measured = rmsummary_parse_file_single(summary_name);
category_accumulate_summary(n->category, n->resources_measured, NULL);
makeflow_monitor_move_output_if_needed(n, queue, monitor);
free(nodeid);
free(log_name_prefix);
free(summary_name);
}
struct list *outputs = makeflow_generate_output_files(n);
if(info->disk_allocation_exhausted) {
job_failed = 1;
}
else if(info->exited_normally && info->exit_code == 0) {
list_first_item(outputs);
while((f = list_next_item(outputs))) {
if(!makeflow_node_check_file_was_created(n, f))
{
job_failed = 1;
}
}
} else {
if(info->exited_normally) {
fprintf(stderr, "%s failed with exit code %d\n", n->command, info->exit_code);
} else {
fprintf(stderr, "%s crashed with signal %d (%s)\n", n->command, info->exit_signal, strsignal(info->exit_signal));
}
job_failed = 1;
}
if(job_failed) {
makeflow_log_state_change(d, n, DAG_NODE_STATE_FAILED);
/* Clean files created in node. Clean existing and expected and record deletion. */
list_first_item(outputs);
while((f = list_next_item(outputs))) {
if(f->state == DAG_FILE_STATE_EXPECT) {
makeflow_clean_file(d, remote_queue, f, 1);
} else {
makeflow_clean_file(d, remote_queue, f, 0);
}
}
if(info->disk_allocation_exhausted) {
fprintf(stderr, "\nrule %d failed because it exceeded its loop device allocation capacity.\n", n->nodeid);
if(n->resources_measured)
{
rmsummary_print(stderr, n->resources_measured, /* pprint */ 0, /* extra fields */ NULL);
fprintf(stderr, "\n");
}
category_allocation_t next = category_next_label(n->category, n->resource_request, /* resource overflow */ 1, n->resources_requested, n->resources_measured);
if(next != CATEGORY_ALLOCATION_ERROR) {
debug(D_MAKEFLOW_RUN, "Rule %d resubmitted using new resource allocation.\n", n->nodeid);
n->resource_request = next;
fprintf(stderr, "\nrule %d resubmitting with maximum resources.\n", n->nodeid);
makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING);
if(monitor) { monitor_retried = 1; }
}
}
if(monitor && info->exit_code == RM_OVERFLOW)
{
debug(D_MAKEFLOW_RUN, "rule %d failed because it exceeded the resources limits.\n", n->nodeid);
if(n->resources_measured && n->resources_measured->limits_exceeded)
{
char *str = rmsummary_print_string(n->resources_measured->limits_exceeded, 1);
debug(D_MAKEFLOW_RUN, "%s", str);
free(str);
}
category_allocation_t next = category_next_label(n->category, n->resource_request, /* resource overflow */ 1, n->resources_requested, n->resources_measured);
if(next != CATEGORY_ALLOCATION_ERROR) {
debug(D_MAKEFLOW_RUN, "Rule %d resubmitted using new resource allocation.\n", n->nodeid);
n->resource_request = next;
makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING);
monitor_retried = 1;
}
}
if(!monitor_retried) {
if(makeflow_retry_flag || info->exit_code == 101) {
n->failure_count++;
if(n->failure_count > makeflow_retry_max) {
notice(D_MAKEFLOW_RUN, "job %s failed too many times.", n->command);
makeflow_failed_flag = 1;
} else {
notice(D_MAKEFLOW_RUN, "will retry failed job %s", n->command);
makeflow_log_state_change(d, n, DAG_NODE_STATE_WAITING);
}
}
else
{
makeflow_failed_flag = 1;
}
}
else
{
makeflow_failed_flag = 1;
}
} else {
/* Mark source files that have been used by this node */
list_first_item(n->source_files);
while((f = list_next_item(n->source_files))) {
f->reference_count+= -1;
if(f->reference_count == 0 && f->state == DAG_FILE_STATE_EXISTS)
makeflow_log_file_state_change(d, f, DAG_FILE_STATE_COMPLETE);
}
/* store node into archiving directory */
if (d->should_write_to_archive) {
printf("archiving node within archiving directory\n");
struct list *input_list = NULL;
char *input_files = NULL, *output_files = NULL, *command = NULL;
makeflow_node_expand(n, queue, &input_list, &outputs, &input_files, &output_files, &command);
makeflow_archive_populate(d, n, command, input_list, outputs, info);
free(command);
free(input_files);
free(output_files);
list_delete(input_list);
}
makeflow_log_state_change(d, n, DAG_NODE_STATE_COMPLETE);
}
list_delete(outputs);
}
/*
Check the dag for consistency, and emit errors if input dependencies, etc are missing.
*/
static int makeflow_check(struct dag *d)
{
struct stat buf;
struct dag_node *n;
struct dag_file *f;
int error = 0;
debug(D_MAKEFLOW_RUN, "checking rules for consistency...\n");
for(n = d->nodes; n; n = n->next) {
list_first_item(n->source_files);
while((f = list_next_item(n->source_files))) {
if(f->created_by) {
continue;
}
if(skip_file_check || batch_fs_stat(remote_queue, f->filename, &buf) >= 0) {
continue;
}
if(f->source) {
continue;
}
fprintf(stderr, "makeflow: %s does not exist, and is not created by any rule.\n", f->filename);
error++;
}
}
if(error) {
fprintf(stderr, "makeflow: found %d errors during consistency check.\n", error);
return 0;
} else {
return 1;
}
}
/*
Used to check that features used are supported by the batch system.
This would be where we added checking of selected options to verify they
are supported by the batch system, such as work_queue specific options.
*/
static int makeflow_check_batch_consistency(struct dag *d)
{
struct dag_node *n;
struct dag_file *f;
int error = 0;
debug(D_MAKEFLOW_RUN, "checking for consistency of batch system support...\n");
for(n = d->nodes; n; n = n->next) {
if(itable_size(n->remote_names) > 0 || (wrapper && wrapper->uses_remote_rename)){
if(n->local_job) {
debug(D_ERROR, "Remote renaming is not supported with -Tlocal or LOCAL execution. Rule %d (line %d).\n", n->nodeid, n->linenum);
error = 1;
break;
} else if (!batch_queue_supports_feature(remote_queue, "remote_rename")) {
debug(D_ERROR, "Remote renaming is not supported on selected batch system. Rule %d (line %d).\n", n->nodeid, n->linenum);
error = 1;
break;
}
}
if(!batch_queue_supports_feature(remote_queue, "absolute_path") && !n->local_job){
list_first_item(n->source_files);
while((f = list_next_item(n->source_files)) && !error) {
const char *remotename = dag_node_get_remote_name(n, f->filename);
if (makeflow_file_on_sharedfs(f->filename)) {
if (remotename)
fatal("Remote renaming for %s is not supported on a shared filesystem",
f->filename);
continue;
}
if((remotename && *remotename == '/') || (*f->filename == '/' && !remotename)) {
debug(D_ERROR, "Absolute paths are not supported on selected batch system. Rule %d (line %d).\n", n->nodeid, n->linenum);
error = 1;
break;
}
}
list_first_item(n->target_files);
while((f = list_next_item(n->target_files)) && !error) {
const char *remotename = dag_node_get_remote_name(n, f->filename);
if (makeflow_file_on_sharedfs(f->filename)) {
if (remotename)
fatal("Remote renaming for %s is not supported on a shared filesystem",
f->filename);
continue;
}
if((remotename && *remotename == '/') || (*f->filename == '/' && !remotename)) {
debug(D_ERROR, "Absolute paths are not supported on selected batch system. Rule %d (line %d).\n", n->nodeid, n->linenum);
error = 1;
break;
}
}
}
}
if(error) {
return 0;
} else {
return 1;
}
}
/*
Main loop for running a makeflow: submit jobs, wait for completion, keep going until everything done.
*/
static void makeflow_run( struct dag *d )
{
struct dag_node *n;
batch_job_id_t jobid;
struct batch_job_info info;
timestamp_t last_time = timestamp_get();
timestamp_t start = timestamp_get();
int first_report = 1;
//reporting to catalog
if(catalog_reporting_on){
makeflow_catalog_summary(d, project, batch_queue_type, start);
}
while(!makeflow_abort_flag) {
did_find_archived_job = 0;
makeflow_dispatch_ready_jobs(d);
/*
Due to the fact that archived jobs are never "run", no local or remote jobs are added
to the remote or local job table if all ready jobs were found within the archive.
Thus makeflow_dispatch_ready_jobs must run at least once more if an archived job was found.
*/
if(dag_local_jobs_running(d)==0 && dag_remote_jobs_running(d)==0 && did_find_archived_job == 0 )
break;
if(dag_remote_jobs_running(d)) {
int tmp_timeout = 5;
jobid = batch_job_wait_timeout(remote_queue, &info, time(0) + tmp_timeout);
if(jobid > 0) {
printf("job %"PRIbjid" completed\n",jobid);
debug(D_MAKEFLOW_RUN, "Job %" PRIbjid " has returned.\n", jobid);
n = itable_remove(d->remote_job_table, jobid);
if(n)
makeflow_node_complete(d, n, remote_queue, &info);
}
}
if(dag_local_jobs_running(d)) {
time_t stoptime;
int tmp_timeout = 5;
if(dag_remote_jobs_running(d)) {
stoptime = time(0);
} else {
stoptime = time(0) + tmp_timeout;
}
jobid = batch_job_wait_timeout(local_queue, &info, stoptime);
if(jobid > 0) {
debug(D_MAKEFLOW_RUN, "Job %" PRIbjid " has returned.\n", jobid);
n = itable_remove(d->local_job_table, jobid);
if(n)
makeflow_node_complete(d, n, local_queue, &info);
}
}
/* Make periodic report to catalog. */
timestamp_t now = timestamp_get();
if(catalog_reporting_on && (((now-last_time) > (60 * 1000 * 1000)) || first_report==1)){ //if we are in reporting mode, and if either it's our first report, or 1 min has transpired
makeflow_catalog_summary(d, project,batch_queue_type,start);
last_time = now;
first_report = 0;
}
/* Rather than try to garbage collect after each time in this
* wait loop, perform garbage collection after a proportional
* amount of tasks have passed. */
makeflow_gc_barrier--;
if(makeflow_gc_method != MAKEFLOW_GC_NONE && makeflow_gc_barrier == 0) {
makeflow_gc(d, remote_queue, makeflow_gc_method, makeflow_gc_size, makeflow_gc_count);
makeflow_gc_barrier = MAX(d->nodeid_counter * makeflow_gc_task_ratio, 1);
}
}
/* Always make final report to catalog when workflow ends. */
if(catalog_reporting_on){
makeflow_catalog_summary(d, project,batch_queue_type,start);
}
if(makeflow_abort_flag) {
makeflow_abort_all(d);
} else {
if(!makeflow_failed_flag && makeflow_gc_method != MAKEFLOW_GC_NONE) {
makeflow_gc(d,remote_queue,MAKEFLOW_GC_ALL,0,0);
}
}
}
/*
Signal handler to catch abort signals. Note that permissible actions in signal handlers are very limited, so we emit a message to the terminal and update a global variable noticed by makeflow_run.
*/
static void handle_abort(int sig)
{
static int abort_count_to_exit = 5;
abort_count_to_exit -= 1;
int fd = open("/dev/tty", O_WRONLY);
if (fd >= 0) {
char buf[256];
snprintf(buf, sizeof(buf), "Received signal %d, will try to clean up remote resources. Send signal %d more times to force exit.\n", sig, abort_count_to_exit);
write(fd, buf, strlen(buf));
close(fd);
}
if (abort_count_to_exit == 1)
signal(sig, SIG_DFL);
makeflow_abort_flag = 1;
}
static void set_archive_directory_string(char **archive_directory, char *option_arg)
{
if (*archive_directory != NULL) {
// need to free archive directory to avoid memory leak since it has already been set once
free(*archive_directory);
}
if (option_arg) {
*archive_directory = xxstrdup(option_arg);
} else {
char *uid = xxmalloc(10);
sprintf(uid, "%d", getuid());
*archive_directory = xxmalloc(sizeof(MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY) + 20 * sizeof(char));
sprintf(*archive_directory, "%s%s", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY, uid);
free(uid);
}
}
static void show_help_run(const char *cmd)
{
printf("Use: %s [options] <dagfile>\n", cmd);
printf("Frequently used options:\n\n");
printf(" %-30s Clean up: remove logfile and all targets. Optional specification [intermediates, outputs, cache] removes only the indicated files.\n", "-c,--clean=<type>");
printf(" %-30s Batch system type: (default is local)\n", "-T,--batch-type=<type>");
printf(" %-30s %s\n\n", "", batch_queue_type_string());
printf("Other options are:\n");
printf(" %-30s Advertise the master information to a catalog server.\n", "-a,--advertise");
printf(" %-30s Specify path to Amazon credentials (for use with -T amazon)\n", "--amazon-credentials");
printf(" %-30s Specify amazon-ami (for use with -T amazon)\n", "--amazon-ami");
printf(" %-30s Disable the check for AFS. (experts only.)\n", "-A,--disable-afs-check");
printf(" %-30s Add these options to all batch submit files.\n", "-B,--batch-options=<options>");
printf(" %-30s Set catalog server to <catalog>. Format: HOSTNAME:PORT \n", "-C,--catalog-server=<catalog>");
printf(" %-30s Enable debugging for this subsystem\n", "-d,--debug=<subsystem>");
printf(" %-30s Write summary of workflow to this file upon success or failure.\n", "-f,--summary-log=<file>");
printf(" %-30s Work Queue fast abort multiplier. (default is deactivated)\n", "-F,--wq-fast-abort=<#>");
printf(" %-30s Show this help screen.\n", "-h,--help");
printf(" %-30s Max number of local jobs to run at once. (default is # of cores)\n", "-j,--max-local=<#>");
printf(" %-30s Max number of remote jobs to run at once.\n", "-J,--max-remote=<#>");
printf(" (default %d for -Twq, %d otherwise.)\n", 10*MAX_REMOTE_JOBS_DEFAULT, MAX_REMOTE_JOBS_DEFAULT );
printf(" %-30s Use this file for the makeflow log. (default is X.makeflowlog)\n", "-l,--makeflow-log=<logfile>");
printf(" %-30s Use this file for the batch system log. (default is X.<type>log)\n", "-L,--batch-log=<logfile>");
printf(" %-30s Send summary of workflow to this email address upon success or failure.\n", "-m,--email=<email>");
printf(" %-30s Use this file as a mountlist.\n", " --mounts=<mountfile>");
printf(" %-30s Use this dir as the cache for file dependencies.\n", " --cache=<cache_dir>");
printf(" %-30s Set the project name to <project>\n", "-N,--project-name=<project>");
printf(" %-30s Send debugging to this file. (can also be :stderr, :stdout, :syslog, or :journal)\n", "-o,--debug-file=<file>");
printf(" %-30s Rotate debug file once it reaches this size.\n", " --debug-rotate-max=<bytes>");
printf(" %-30s Password file for authenticating workers.\n", " --password");
printf(" %-30s Port number to use with Work Queue. (default is %d, 0=arbitrary)\n", "-p,--port=<port>", WORK_QUEUE_DEFAULT_PORT);
printf(" %-30s Priority. Higher the value, higher the priority.\n", "-P,--priority=<integer>");
printf(" %-30s Automatically retry failed batch jobs up to %d times.\n", "-R,--retry", makeflow_retry_max);
printf(" %-30s Automatically retry failed batch jobs up to n times.\n", "-r,--retry-count=<n>");
printf(" %-30s Wait for output files to be created upto n seconds (e.g., to deal with NFS semantics).\n", " --wait-for-files-upto=<n>");
printf(" %-30s Time to retry failed batch job submission. (default is %ds)\n", "-S,--submission-timeout=<#>", makeflow_submit_timeout);
printf(" %-30s Work Queue keepalive timeout. (default is %ds)\n", "-t,--wq-keepalive-timeout=<#>", WORK_QUEUE_DEFAULT_KEEPALIVE_TIMEOUT);
printf(" %-30s Work Queue keepalive interval. (default is %ds)\n", "-u,--wq-keepalive-interval=<#>", WORK_QUEUE_DEFAULT_KEEPALIVE_INTERVAL);
printf(" %-30s Umbrella binary for running every rule in a makeflow.\n", " --umbrella-binary=<file>");
printf(" %-30s Umbrella log file prefix for running every rule in a makeflow. (default is <makefilename>.umbrella.log)\n", " --umbrella-log-prefix=<string>");
printf(" %-30s Umbrella execution mode for running every rule in a makeflow. (default is local)\n", " --umbrella-mode=<mode>");
printf(" %-30s Umbrella spec for running every rule in a makeflow.\n", " --umbrella-spec=<file>");
printf(" %-30s Show version string\n", "-v,--version");
printf(" %-30s Work Queue scheduling algorithm. (time|files|fcfs)\n", "-W,--wq-schedule=<mode>");
printf(" %-30s Working directory for the batch system.\n", " --working-dir=<dir|url>");
printf(" %-30s Wrap all commands with this prefix.\n", " --wrapper=<cmd>");
printf(" %-30s Wrapper command requires this input file.\n", " --wrapper-input=<cmd>");
printf(" %-30s Wrapper command produces this output file.\n", " --wrapper-output=<cmd>");
printf(" %-30s Change directory: chdir to enable executing the Makefile in other directory.\n", "-X,--change-directory");
printf(" %-30s Force failure on zero-length output files \n", "-z,--zero-length-error");
printf(" %-30s Select port at random and write it to this file.\n", "-Z,--port-file=<file>");
printf(" %-30s Disable batch system caching. (default is false)\n", " --disable-cache");
printf(" %-30s Add node id symbol tags in the makeflow log. (default is false)\n", " --log-verbose");
printf(" %-30s Run each task with a container based on this docker image.\n", "--docker=<image>");
printf(" %-30s Load docker image from the tar file.\n", "--docker-tar=<tar file>");
printf(" %-30s Indicate user trusts inputs exist.\n", "--skip-file-check");
printf(" %-30s Use Parrot to restrict access to the given inputs/outputs.\n", "--enforcement");
printf(" %-30s Path to parrot_run (defaults to current directory).\n", "--parrot-path=<path>");
printf(" %-30s Indicate preferred master connection. Choose one of by_ip or by_hostname. (default is by_ip)\n", "--work-queue-preferred-connection");
printf(" %-30s Use JSON format rather than Make-style format for the input file.\n", "--json");
printf(" %-30s Evaluate JX input. Implies --json\n", "--jx");
printf(" %-30s Evaluate the JX input in the given context.\n", "--jx-context");
printf(" %-30s Wrap execution of all rules in a singularity container.\n","--singularity=<image>");
printf(" %-30s Assume the given directory is a shared filesystem accessible to all workers.\n", "--shared-fs");
printf(" %-30s Archive results of makeflow in specified directory (default directory is %s)\n", "--archive=<dir>", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY);
printf(" %-30s Read/Use archived results of makeflow in specified directory, will not write to archive (default directory is %s)\n", "--archive-read=<dir>", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY);
printf(" %-30s Write archived results of makeflow in specified directory, will not read/use archived data (default directory is %s)\n", "--archive-write=<dir>", MAKEFLOW_ARCHIVE_DEFAULT_DIRECTORY);
printf(" %-30s Indicate the host name of preferred mesos master.\n", "--mesos-master=<hostname:port>");
printf(" %-30s Indicate the path to mesos python2 site-packages.\n", "--mesos-path=<path>");
printf(" %-30s Indicate the linking libraries for running mesos.\n", "--mesos-preload=<path>");
printf("\n*Monitor Options:\n\n");
printf(" %-30s Enable the resource monitor, and write the monitor logs to <dir>.\n", "--monitor=<dir>");
printf(" %-30s Set monitor interval to <#> seconds. (default is 1 second)\n", " --monitor-interval=<#>");
printf(" %-30s Enable monitor time series. (default is disabled)\n", " --monitor-with-time-series");
printf(" %-30s Enable monitoring of openened files. (default is disabled)\n", " --monitor-with-opened-files");
printf(" %-30s Format for monitor logs. (default %s)\n", " --monitor-log-fmt=<fmt>", DEFAULT_MONITOR_LOG_FORMAT);
}
int main(int argc, char *argv[])
{
int c;
const char *dagfile;
char *change_dir = NULL;
char *batchlogfilename = NULL;
const char *batch_submit_options = getenv("BATCH_OPTIONS");
makeflow_clean_depth clean_mode = MAKEFLOW_CLEAN_NONE;
char *email_summary_to = NULL;
int explicit_remote_jobs_max = 0;
int explicit_local_jobs_max = 0;
char *logfilename = NULL;
int port_set = 0;
timestamp_t runtime = 0;
int skip_afs_check = 0;
int should_read_archive = 0;
int should_write_to_archive = 0;
timestamp_t time_completed = 0;
const char *work_queue_keepalive_interval = NULL;
const char *work_queue_keepalive_timeout = NULL;
const char *work_queue_master_mode = "standalone";
const char *work_queue_port_file = NULL;
double wq_option_fast_abort_multiplier = -1.0;
const char *amazon_credentials = NULL;
const char *amazon_ami = NULL;
const char *priority = NULL;
char *work_queue_password = NULL;
char *wq_wait_queue_size = 0;
int did_explicit_auth = 0;
char *chirp_tickets = NULL;
char *working_dir = NULL;
char *work_queue_preferred_connection = NULL;
char *write_summary_to = NULL;
char *s;
char *log_dir = NULL;
char *log_format = NULL;
char *archive_directory = NULL;
category_mode_t allocation_mode = CATEGORY_ALLOCATION_MODE_FIXED;
shared_fs_list = list_create();
char *mesos_master = "127.0.0.1:5050/";
char *mesos_path = NULL;
char *mesos_preload = NULL;
int json_input = 0;
int jx_input = 0;
char *jx_context = NULL;
random_init();
debug_config(argv[0]);
s = getenv("MAKEFLOW_BATCH_QUEUE_TYPE");
if(s) {
batch_queue_type = batch_queue_type_from_string(s);
if(batch_queue_type == BATCH_QUEUE_TYPE_UNKNOWN) {
fprintf(stderr, "makeflow: unknown batch queue type: %s (from $MAKEFLOW_BATCH_QUEUE_TYPE)\n", s);
return 1;
}
}
s = getenv("WORK_QUEUE_MASTER_MODE");
if(s) {
work_queue_master_mode = s;
}
s = getenv("WORK_QUEUE_NAME");
if(s) {
project = xxstrdup(s);
}
s = getenv("WORK_QUEUE_FAST_ABORT_MULTIPLIER");
if(s) {
wq_option_fast_abort_multiplier = atof(s);
}
enum {
LONG_OPT_AUTH = UCHAR_MAX+1,
LONG_OPT_CACHE,
LONG_OPT_DEBUG_ROTATE_MAX,
LONG_OPT_DISABLE_BATCH_CACHE,
LONG_OPT_DOT_CONDENSE,
LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME,
LONG_OPT_GC_SIZE,
LONG_OPT_MONITOR,
LONG_OPT_MONITOR_INTERVAL,
LONG_OPT_MONITOR_LOG_NAME,
LONG_OPT_MONITOR_OPENED_FILES,
LONG_OPT_MONITOR_TIME_SERIES,
LONG_OPT_MOUNTS,
LONG_OPT_PASSWORD,
LONG_OPT_TICKETS,
LONG_OPT_VERBOSE_PARSING,
LONG_OPT_LOG_VERBOSE_MODE,
LONG_OPT_WORKING_DIR,
LONG_OPT_PREFERRED_CONNECTION,
LONG_OPT_WQ_WAIT_FOR_WORKERS,
LONG_OPT_WRAPPER,
LONG_OPT_WRAPPER_INPUT,
LONG_OPT_WRAPPER_OUTPUT,
LONG_OPT_DOCKER,
LONG_OPT_DOCKER_TAR,
LONG_OPT_AMAZON_CREDENTIALS,
LONG_OPT_AMAZON_AMI,
LONG_OPT_JSON,
LONG_OPT_JX,
LONG_OPT_JX_CONTEXT,
LONG_OPT_SKIP_FILE_CHECK,
LONG_OPT_UMBRELLA_BINARY,
LONG_OPT_UMBRELLA_LOG_PREFIX,
LONG_OPT_UMBRELLA_MODE,
LONG_OPT_UMBRELLA_SPEC,
LONG_OPT_ALLOCATION_MODE,
LONG_OPT_ENFORCEMENT,
LONG_OPT_PARROT_PATH,
LONG_OPT_SINGULARITY,
LONG_OPT_SHARED_FS,
LONG_OPT_ARCHIVE,
LONG_OPT_ARCHIVE_READ_ONLY,
LONG_OPT_ARCHIVE_WRITE_ONLY,
LONG_OPT_MESOS_MASTER,
LONG_OPT_MESOS_PATH,
LONG_OPT_MESOS_PRELOAD
};
static const struct option long_options_run[] = {
{"advertise", no_argument, 0, 'a'},
{"allocation", required_argument, 0, LONG_OPT_ALLOCATION_MODE},
{"auth", required_argument, 0, LONG_OPT_AUTH},
{"batch-log", required_argument, 0, 'L'},
{"batch-options", required_argument, 0, 'B'},
{"batch-type", required_argument, 0, 'T'},
{"cache", required_argument, 0, LONG_OPT_CACHE},
{"catalog-server", required_argument, 0, 'C'},
{"clean", optional_argument, 0, 'c'},
{"debug", required_argument, 0, 'd'},
{"debug-file", required_argument, 0, 'o'},
{"debug-rotate-max", required_argument, 0, LONG_OPT_DEBUG_ROTATE_MAX},
{"disable-afs-check", no_argument, 0, 'A'},
{"disable-cache", no_argument, 0, LONG_OPT_DISABLE_BATCH_CACHE},
{"email", required_argument, 0, 'm'},
{"wait-for-files-upto", required_argument, 0, LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME},
{"gc", required_argument, 0, 'g'},
{"gc-size", required_argument, 0, LONG_OPT_GC_SIZE},
{"gc-count", required_argument, 0, 'G'},
{"wait-for-files-upto", required_argument, 0, LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME},
{"help", no_argument, 0, 'h'},
{"makeflow-log", required_argument, 0, 'l'},
{"max-local", required_argument, 0, 'j'},
{"max-remote", required_argument, 0, 'J'},
{"monitor", required_argument, 0, LONG_OPT_MONITOR},
{"monitor-interval", required_argument, 0, LONG_OPT_MONITOR_INTERVAL},
{"monitor-log-name", required_argument, 0, LONG_OPT_MONITOR_LOG_NAME},
{"monitor-with-opened-files", no_argument, 0, LONG_OPT_MONITOR_OPENED_FILES},
{"monitor-with-time-series", no_argument, 0, LONG_OPT_MONITOR_TIME_SERIES},
{"mounts", required_argument, 0, LONG_OPT_MOUNTS},
{"password", required_argument, 0, LONG_OPT_PASSWORD},
{"port", required_argument, 0, 'p'},
{"port-file", required_argument, 0, 'Z'},
{"priority", required_argument, 0, 'P'},
{"project-name", required_argument, 0, 'N'},
{"retry", no_argument, 0, 'R'},
{"retry-count", required_argument, 0, 'r'},
{"shared-fs", required_argument, 0, LONG_OPT_SHARED_FS},
{"show-output", no_argument, 0, 'O'},
{"submission-timeout", required_argument, 0, 'S'},
{"summary-log", required_argument, 0, 'f'},
{"tickets", required_argument, 0, LONG_OPT_TICKETS},
{"version", no_argument, 0, 'v'},
{"log-verbose", no_argument, 0, LONG_OPT_LOG_VERBOSE_MODE},
{"working-dir", required_argument, 0, LONG_OPT_WORKING_DIR},
{"skip-file-check", no_argument, 0, LONG_OPT_SKIP_FILE_CHECK},
{"umbrella-binary", required_argument, 0, LONG_OPT_UMBRELLA_BINARY},
{"umbrella-log-prefix", required_argument, 0, LONG_OPT_UMBRELLA_LOG_PREFIX},
{"umbrella-mode", required_argument, 0, LONG_OPT_UMBRELLA_MODE},
{"umbrella-spec", required_argument, 0, LONG_OPT_UMBRELLA_SPEC},
{"work-queue-preferred-connection", required_argument, 0, LONG_OPT_PREFERRED_CONNECTION},
{"wq-estimate-capacity", no_argument, 0, 'E'},
{"wq-fast-abort", required_argument, 0, 'F'},
{"wq-keepalive-interval", required_argument, 0, 'u'},
{"wq-keepalive-timeout", required_argument, 0, 't'},
{"wq-schedule", required_argument, 0, 'W'},
{"wq-wait-queue-size", required_argument, 0, LONG_OPT_WQ_WAIT_FOR_WORKERS},
{"wrapper", required_argument, 0, LONG_OPT_WRAPPER},
{"wrapper-input", required_argument, 0, LONG_OPT_WRAPPER_INPUT},
{"wrapper-output", required_argument, 0, LONG_OPT_WRAPPER_OUTPUT},
{"zero-length-error", no_argument, 0, 'z'},
{"change-directory", required_argument, 0, 'X'},
{"docker", required_argument, 0, LONG_OPT_DOCKER},
{"docker-tar", required_argument, 0, LONG_OPT_DOCKER_TAR},
{"amazon-credentials", required_argument, 0, LONG_OPT_AMAZON_CREDENTIALS},
{"amazon-ami", required_argument, 0, LONG_OPT_AMAZON_AMI},
{"json", no_argument, 0, LONG_OPT_JSON},
{"jx", no_argument, 0, LONG_OPT_JX},
{"jx-context", required_argument, 0, LONG_OPT_JX_CONTEXT},
{"enforcement", no_argument, 0, LONG_OPT_ENFORCEMENT},
{"parrot-path", required_argument, 0, LONG_OPT_PARROT_PATH},
{"singularity", required_argument, 0, LONG_OPT_SINGULARITY},
{"archive", optional_argument, 0, LONG_OPT_ARCHIVE},
{"archive-read", optional_argument, 0, LONG_OPT_ARCHIVE_READ_ONLY},
{"archive-write", optional_argument, 0, LONG_OPT_ARCHIVE_WRITE_ONLY},
{"mesos-master", required_argument, 0, LONG_OPT_MESOS_MASTER},
{"mesos-path", required_argument, 0, LONG_OPT_MESOS_PATH},
{"mesos-preload", required_argument, 0, LONG_OPT_MESOS_PRELOAD},
{0, 0, 0, 0}
};
static const char option_string_run[] = "aAB:c::C:d:Ef:F:g:G:hj:J:l:L:m:M:N:o:Op:P:r:RS:t:T:u:vW:X:zZ:";
while((c = getopt_long(argc, argv, option_string_run, long_options_run, NULL)) >= 0) {
switch (c) {
case 'a':
work_queue_master_mode = "catalog";
break;
case 'A':
skip_afs_check = 1;
break;
case 'B':
batch_submit_options = optarg;
break;
case 'c':
clean_mode = MAKEFLOW_CLEAN_ALL;
if(optarg){
if(strcasecmp(optarg, "intermediates") == 0){
clean_mode = MAKEFLOW_CLEAN_INTERMEDIATES;
} else if(strcasecmp(optarg, "outputs") == 0){
clean_mode = MAKEFLOW_CLEAN_OUTPUTS;
} else if(strcasecmp(optarg, "cache") == 0){
clean_mode = MAKEFLOW_CLEAN_CACHE;
} else if(strcasecmp(optarg, "all") != 0){
fprintf(stderr, "makeflow: unknown clean option %s", optarg);
exit(1);
}
}
break;
case 'C':
setenv("CATALOG_HOST", optarg, 1);
break;
case 'd':
debug_flags_set(optarg);
break;
case 'E':
// This option is deprecated. Capacity estimation is now on by default.
break;
case LONG_OPT_AUTH:
if (!auth_register_byname(optarg))
fatal("could not register authentication method `%s': %s", optarg, strerror(errno));
did_explicit_auth = 1;
break;
case LONG_OPT_TICKETS:
chirp_tickets = strdup(optarg);
break;
case 'f':
write_summary_to = xxstrdup(optarg);
break;
case 'F':
wq_option_fast_abort_multiplier = atof(optarg);
break;
case 'g':
if(strcasecmp(optarg, "none") == 0) {
makeflow_gc_method = MAKEFLOW_GC_NONE;
} else if(strcasecmp(optarg, "ref_count") == 0) {
makeflow_gc_method = MAKEFLOW_GC_COUNT;
if(makeflow_gc_count < 0)
makeflow_gc_count = 16; /* Try to collect at most 16 files. */
} else if(strcasecmp(optarg, "on_demand") == 0) {
makeflow_gc_method = MAKEFLOW_GC_ON_DEMAND;
if(makeflow_gc_count < 0)
makeflow_gc_count = 16; /* Try to collect at most 16 files. */
} else if(strcasecmp(optarg, "all") == 0) {
makeflow_gc_method = MAKEFLOW_GC_ALL;
if(makeflow_gc_count < 0)
makeflow_gc_count = 1 << 14; /* Inode threshold of 2^14. */
} else {
fprintf(stderr, "makeflow: invalid garbage collection method: %s\n", optarg);
exit(1);
}
break;
case LONG_OPT_GC_SIZE:
makeflow_gc_size = string_metric_parse(optarg);
break;
case 'G':
makeflow_gc_count = atoi(optarg);
break;
case LONG_OPT_FILE_CREATION_PATIENCE_WAIT_TIME:
file_creation_patience_wait_time = MAX(0,atoi(optarg));
break;
case 'h':
show_help_run(argv[0]);
return 0;
case 'j':
explicit_local_jobs_max = atoi(optarg);
break;
case 'J':
explicit_remote_jobs_max = atoi(optarg);
break;
case 'l':
logfilename = xxstrdup(optarg);
break;
case 'L':
batchlogfilename = xxstrdup(optarg);
break;
case 'm':
email_summary_to = xxstrdup(optarg);
break;
case LONG_OPT_MONITOR:
if (!monitor) monitor = makeflow_monitor_create();
if(log_dir) free(log_dir);
log_dir = xxstrdup(optarg);
break;
case LONG_OPT_MONITOR_INTERVAL:
if (!monitor) monitor = makeflow_monitor_create();
monitor->interval = atoi(optarg);
break;
case LONG_OPT_MONITOR_TIME_SERIES:
if (!monitor) monitor = makeflow_monitor_create();
monitor->enable_time_series = 1;
break;
case LONG_OPT_MONITOR_OPENED_FILES:
if (!monitor) monitor = makeflow_monitor_create();
monitor->enable_list_files = 1;
break;
case LONG_OPT_MONITOR_LOG_NAME:
if (!monitor) monitor = makeflow_monitor_create();
if(log_format) free(log_format);
log_format = xxstrdup(optarg);
break;
case LONG_OPT_CACHE:
mount_cache = xxstrdup(optarg);
break;
case LONG_OPT_MOUNTS:
mountfile = xxstrdup(optarg);
break;
case LONG_OPT_AMAZON_CREDENTIALS:
amazon_credentials = xxstrdup(optarg);
break;
case LONG_OPT_AMAZON_AMI:
amazon_ami = xxstrdup(optarg);
break;
case 'M':
case 'N':
free(project);
project = xxstrdup(optarg);
work_queue_master_mode = "catalog";
catalog_reporting_on = 1; //set to true
break;
case 'o':
debug_config_file(optarg);
break;
case 'p':
port_set = 1;
port = atoi(optarg);
break;
case 'P':
priority = optarg;
break;
case 'r':
makeflow_retry_flag = 1;
makeflow_retry_max = atoi(optarg);
break;
case 'R':
makeflow_retry_flag = 1;
break;
case 'S':
makeflow_submit_timeout = atoi(optarg);
break;
case 't':
work_queue_keepalive_timeout = optarg;
break;
case 'T':
batch_queue_type = batch_queue_type_from_string(optarg);
if(batch_queue_type == BATCH_QUEUE_TYPE_UNKNOWN) {
fprintf(stderr, "makeflow: unknown batch queue type: %s\n", optarg);
return 1;
}
break;
case 'u':
work_queue_keepalive_interval = optarg;
break;
case 'v':
cctools_version_print(stdout, argv[0]);
return 0;
case 'W':
if(!strcmp(optarg, "files")) {
wq_option_scheduler = WORK_QUEUE_SCHEDULE_FILES;
} else if(!strcmp(optarg, "time")) {
wq_option_scheduler = WORK_QUEUE_SCHEDULE_TIME;
} else if(!strcmp(optarg, "fcfs")) {
wq_option_scheduler = WORK_QUEUE_SCHEDULE_FCFS;
} else {
fprintf(stderr, "makeflow: unknown scheduling mode %s\n", optarg);
return 1;
}
break;
case 'z':
output_len_check = 1;
break;
case 'Z':
work_queue_port_file = optarg;
port = 0;
port_set = 1; //WQ is going to set the port, so we continue as if already set.
break;
case LONG_OPT_PASSWORD:
if(copy_file_to_buffer(optarg, &work_queue_password, NULL) < 0) {
fprintf(stderr, "makeflow: couldn't open %s: %s\n", optarg, strerror(errno));
return 1;
}
break;
case LONG_OPT_DISABLE_BATCH_CACHE:
cache_mode = 0;
break;
case LONG_OPT_WQ_WAIT_FOR_WORKERS:
wq_wait_queue_size = optarg;
break;
case LONG_OPT_WORKING_DIR:
free(working_dir);
working_dir = xxstrdup(optarg);
break;
case LONG_OPT_PREFERRED_CONNECTION:
free(work_queue_preferred_connection);
work_queue_preferred_connection = xxstrdup(optarg);
break;
case LONG_OPT_DEBUG_ROTATE_MAX:
debug_config_file_size(string_metric_parse(optarg));
break;
case LONG_OPT_LOG_VERBOSE_MODE:
log_verbose_mode = 1;
break;
case LONG_OPT_WRAPPER:
if(!wrapper) wrapper = makeflow_wrapper_create();
makeflow_wrapper_add_command(wrapper, optarg);
break;
case LONG_OPT_WRAPPER_INPUT:
if(!wrapper) wrapper = makeflow_wrapper_create();
makeflow_wrapper_add_input_file(wrapper, optarg);
break;
case LONG_OPT_WRAPPER_OUTPUT:
if(!wrapper) wrapper = makeflow_wrapper_create();
makeflow_wrapper_add_output_file(wrapper, optarg);
break;
case LONG_OPT_SHARED_FS:
assert(shared_fs_list);
if (optarg[0] != '/') fatal("Shared fs must be specified as an absolute path");
list_push_head(shared_fs_list, xxstrdup(optarg));
break;
case LONG_OPT_DOCKER:
if(!wrapper) wrapper = makeflow_wrapper_create();
container_mode = CONTAINER_MODE_DOCKER;
container_image = xxstrdup(optarg);
break;
case LONG_OPT_SKIP_FILE_CHECK:
skip_file_check = 1;
break;
case LONG_OPT_DOCKER_TAR:
container_image_tar = xxstrdup(optarg);
break;
case LONG_OPT_SINGULARITY:
if(!wrapper) wrapper = makeflow_wrapper_create();
container_mode = CONTAINER_MODE_SINGULARITY;
container_image = xxstrdup(optarg);
break;
case LONG_OPT_ALLOCATION_MODE:
if(!strcmp(optarg, "throughput")) {
allocation_mode = CATEGORY_ALLOCATION_MODE_MAX_THROUGHPUT;
} else if(!strcmp(optarg, "waste")) {
allocation_mode = CATEGORY_ALLOCATION_MODE_MIN_WASTE;
} else if(!strcmp(optarg, "fixed")) {
allocation_mode = CATEGORY_ALLOCATION_MODE_FIXED;
} else {
fatal("Allocation mode '%s' is not valid. Use one of: throughput waste fixed");
}
case LONG_OPT_JX:
jx_input = 1;
case LONG_OPT_JSON:
json_input = 1;
break;
case LONG_OPT_JX_CONTEXT:
jx_context = xxstrdup(optarg);
break;
case LONG_OPT_UMBRELLA_BINARY:
if(!umbrella) umbrella = makeflow_wrapper_umbrella_create();
makeflow_wrapper_umbrella_set_binary(umbrella, (const char *)xxstrdup(optarg));
break;
case LONG_OPT_UMBRELLA_LOG_PREFIX:
if(!umbrella) umbrella = makeflow_wrapper_umbrella_create();
makeflow_wrapper_umbrella_set_log_prefix(umbrella, (const char *)xxstrdup(optarg));
break;
case LONG_OPT_UMBRELLA_MODE:
if(!umbrella) umbrella = makeflow_wrapper_umbrella_create();
makeflow_wrapper_umbrella_set_mode(umbrella, (const char *)xxstrdup(optarg));
break;
case LONG_OPT_UMBRELLA_SPEC:
if(!umbrella) umbrella = makeflow_wrapper_umbrella_create();
makeflow_wrapper_umbrella_set_spec(umbrella, (const char *)xxstrdup(optarg));
case LONG_OPT_MESOS_MASTER:
mesos_master = xxstrdup(optarg);
break;
case LONG_OPT_MESOS_PATH:
mesos_path = xxstrdup(optarg);
break;
case LONG_OPT_MESOS_PRELOAD:
mesos_preload = xxstrdup(optarg);
break;
case LONG_OPT_ARCHIVE:
should_read_archive = 1;
should_write_to_archive = 1;
set_archive_directory_string(&archive_directory, optarg);
break;
case LONG_OPT_ARCHIVE_READ_ONLY:
should_read_archive = 1;
set_archive_directory_string(&archive_directory, optarg);
break;
case LONG_OPT_ARCHIVE_WRITE_ONLY:
should_write_to_archive = 1;
set_archive_directory_string(&archive_directory, optarg);
break;
default:
show_help_run(argv[0]);
return 1;
case 'X':
change_dir = optarg;
break;
case LONG_OPT_ENFORCEMENT:
if(!enforcer) enforcer = makeflow_wrapper_create();
break;
case LONG_OPT_PARROT_PATH:
parrot_path = xxstrdup(optarg);
break;
}
}
cctools_version_debug(D_MAKEFLOW_RUN, argv[0]);
if(!did_explicit_auth)
auth_register_all();
if(chirp_tickets) {
auth_ticket_load(chirp_tickets);
free(chirp_tickets);
} else {
auth_ticket_load(NULL);
}
if (enforcer && umbrella) {
fatal("enforcement and Umbrella are mutually exclusive\n");
}
if((argc - optind) != 1) {
int rv = access("./Makeflow", R_OK);
if(rv < 0) {
fprintf(stderr, "makeflow: No makeflow specified and file \"./Makeflow\" could not be found.\n");
fprintf(stderr, "makeflow: Run \"%s -h\" for help with options.\n", argv[0]);
return 1;
}
dagfile = "./Makeflow";
} else {
dagfile = argv[optind];
}
if(batch_queue_type == BATCH_QUEUE_TYPE_WORK_QUEUE) {
if(strcmp(work_queue_master_mode, "catalog") == 0 && project == NULL) {
fprintf(stderr, "makeflow: Makeflow running in catalog mode. Please use '-N' option to specify the name of this project.\n");
fprintf(stderr, "makeflow: Run \"makeflow -h\" for help with options.\n");
return 1;
}
// Use Work Queue default port in standalone mode when port is not
// specified with -p option. In Work Queue catalog mode, Work Queue
// would choose an arbitrary port when port is not explicitly specified.
if(!port_set && strcmp(work_queue_master_mode, "standalone") == 0) {
port_set = 1;
port = WORK_QUEUE_DEFAULT_PORT;
}
if(port_set) {
char *value;
value = string_format("%d", port);
setenv("WORK_QUEUE_PORT", value, 1);
free(value);
}
}
if(!logfilename)
logfilename = string_format("%s.makeflowlog", dagfile);
printf("parsing %s...\n",dagfile);
struct dag *d;
if (json_input) {
struct jx *dag = NULL;
struct jx *ctx = NULL;
dag = jx_parse_file(dagfile);
if (!dag) fatal("failed to parse dagfile");
if (jx_input && jx_context) {
printf("using JX context %s\n", jx_context);
struct jx *t = jx_parse_file(jx_context);
if (!t) fatal("failed to parse context");
ctx = jx_eval(t, NULL);
jx_delete(t);
}
if (jx_input) {
struct jx *t = dag;
dag = jx_eval(t, ctx);
jx_delete(t);
jx_delete(ctx);
}
d = dag_from_jx(dag);
jx_delete(dag);
// JX doesn't really use errno, so give something generic
errno = EINVAL;
} else {
d = dag_from_file(dagfile);
}
if(!d) {
fatal("makeflow: couldn't load %s: %s\n", dagfile, strerror(errno));
}
d->allocation_mode = allocation_mode;
// Makeflows running LOCAL batch type have only one queue that behaves as if remote
// This forces -J vs -j to behave correctly
if(batch_queue_type == BATCH_QUEUE_TYPE_LOCAL) {
explicit_remote_jobs_max = explicit_local_jobs_max;
}
if(explicit_local_jobs_max) {
local_jobs_max = explicit_local_jobs_max;
} else {
local_jobs_max = load_average_get_cpus();
}
if(explicit_remote_jobs_max) {
remote_jobs_max = explicit_remote_jobs_max;
} else {
if(batch_queue_type == BATCH_QUEUE_TYPE_LOCAL) {
remote_jobs_max = load_average_get_cpus();
} else if(batch_queue_type == BATCH_QUEUE_TYPE_WORK_QUEUE) {
remote_jobs_max = 10 * MAX_REMOTE_JOBS_DEFAULT;
} else {
remote_jobs_max = MAX_REMOTE_JOBS_DEFAULT;
}
}
s = getenv("MAKEFLOW_MAX_REMOTE_JOBS");
if(s) {
remote_jobs_max = MIN(remote_jobs_max, atoi(s));
}
s = getenv("MAKEFLOW_MAX_LOCAL_JOBS");
if(s) {
int n = atoi(s);
local_jobs_max = MIN(local_jobs_max, n);
if(batch_queue_type == BATCH_QUEUE_TYPE_LOCAL) {
remote_jobs_max = MIN(local_jobs_max, n);
}
}
remote_queue = batch_queue_create(batch_queue_type);
if(!remote_queue) {
fprintf(stderr, "makeflow: couldn't create batch queue.\n");
if(port != 0)
fprintf(stderr, "makeflow: perhaps port %d is already in use?\n", port);
exit(EXIT_FAILURE);
}
if(!batchlogfilename) {
if(batch_queue_supports_feature(remote_queue, "batch_log_name")){
batchlogfilename = string_format(batch_queue_supports_feature(remote_queue, "batch_log_name"), dagfile);
} else {
batchlogfilename = string_format("%s.batchlog", dagfile);
}
}
if(batch_queue_type == BATCH_QUEUE_TYPE_MESOS) {
batch_queue_set_option(remote_queue, "mesos-path", mesos_path);
batch_queue_set_option(remote_queue, "mesos-master", mesos_master);
batch_queue_set_option(remote_queue, "mesos-preload", mesos_preload);
}
if(batch_queue_type == BATCH_QUEUE_TYPE_DRYRUN) {
FILE *file = fopen(batchlogfilename,"w");
if(!file) fatal("unable to open log file %s: %s\n", batchlogfilename, strerror(errno));
fprintf(file, "#!/bin/sh\n");
fprintf(file, "set -x\n");
fprintf(file, "set -e\n");
fprintf(file, "\n# %s version %s (released %s)\n\n", argv[0], CCTOOLS_VERSION, CCTOOLS_RELEASE_DATE);
fclose(file);
}
batch_queue_set_logfile(remote_queue, batchlogfilename);
batch_queue_set_option(remote_queue, "batch-options", batch_submit_options);
batch_queue_set_option(remote_queue, "skip-afs-check", skip_afs_check ? "yes" : "no");
batch_queue_set_option(remote_queue, "password", work_queue_password);
batch_queue_set_option(remote_queue, "master-mode", work_queue_master_mode);
batch_queue_set_option(remote_queue, "name", project);
batch_queue_set_option(remote_queue, "priority", priority);
batch_queue_set_option(remote_queue, "keepalive-interval", work_queue_keepalive_interval);
batch_queue_set_option(remote_queue, "keepalive-timeout", work_queue_keepalive_timeout);
batch_queue_set_option(remote_queue, "caching", cache_mode ? "yes" : "no");
batch_queue_set_option(remote_queue, "wait-queue-size", wq_wait_queue_size);
batch_queue_set_option(remote_queue, "amazon-credentials", amazon_credentials);
batch_queue_set_option(remote_queue, "amazon-ami", amazon_ami);
batch_queue_set_option(remote_queue, "working-dir", working_dir);
batch_queue_set_option(remote_queue, "master-preferred-connection", work_queue_preferred_connection);
char *fa_multiplier = string_format("%f", wq_option_fast_abort_multiplier);
batch_queue_set_option(remote_queue, "fast-abort", fa_multiplier);
free(fa_multiplier);
/* Do not create a local queue for systems where local and remote are the same. */
if(!batch_queue_supports_feature(remote_queue, "local_job_queue")) {
local_queue = 0;
} else {
local_queue = batch_queue_create(BATCH_QUEUE_TYPE_LOCAL);
if(!local_queue) {
fatal("couldn't create local job queue.");
}
}
/* Remote storage modes do not (yet) support measuring storage for garbage collection. */
if(makeflow_gc_method == MAKEFLOW_GC_SIZE && !batch_queue_supports_feature(remote_queue, "gc_size")) {
makeflow_gc_method = MAKEFLOW_GC_ALL;
}
/* Set dag_node->umbrella_spec */
if(!clean_mode) {
struct dag_node *cur;
cur = d->nodes;
while(cur) {
struct dag_variable_lookup_set s = {d, cur->category, cur, NULL};
char *spec = NULL;
spec = dag_variable_lookup_string("SPEC", &s);
if(spec) {
debug(D_MAKEFLOW_RUN, "setting dag_node->umbrella_spec (rule %d) from the makefile ...\n", cur->nodeid);
dag_node_set_umbrella_spec(cur, xxstrdup(spec));
} else if(umbrella && umbrella->spec) {
debug(D_MAKEFLOW_RUN, "setting dag_node->umbrella_spec (rule %d) from the --umbrella_spec option ...\n", cur->nodeid);
dag_node_set_umbrella_spec(cur, umbrella->spec);
}
free(spec);
cur = cur->next;
}
debug(D_MAKEFLOW_RUN, "makeflow_wrapper_umbrella_preparation...\n");
// When the user specifies umbrella specs in a makefile, but does not use any `--umbrella...` option,
// an umbrella wrapper was created to hold the default values for umbrella-related setttings such as
// log_prefix and default umbrella execution engine.
if(!umbrella) umbrella = makeflow_wrapper_umbrella_create();
makeflow_wrapper_umbrella_preparation(umbrella, d);
}
if(enforcer) {
makeflow_wrapper_enforcer_init(enforcer, parrot_path);
}
makeflow_parse_input_outputs(d);
makeflow_prepare_nested_jobs(d);
if (change_dir)
chdir(change_dir);
/* Prepare the input files specified in the mountfile. */
if(mountfile && !clean_mode) {
/* check the validity of the mountfile and load the info from the mountfile into the dag */
printf("checking the consistency of the mountfile ...\n");
if(makeflow_mounts_parse_mountfile(mountfile, d)) {
fprintf(stderr, "Failed to parse the mountfile: %s.\n", mountfile);
free(mountfile);
return -1;
}
free(mountfile);
use_mountfile = 1;
}
printf("checking %s for consistency...\n",dagfile);
if(!makeflow_check(d)) {
exit(EXIT_FAILURE);
}
if(!makeflow_check_batch_consistency(d) && clean_mode == MAKEFLOW_CLEAN_NONE) {
exit(EXIT_FAILURE);
}
printf("%s has %d rules.\n",dagfile,d->nodeid_counter);
setlinebuf(stdout);
setlinebuf(stderr);
if(mount_cache) d->cache_dir = mount_cache;
/* In case when the user uses --cache option to specify the mount cache dir and the log file also has
* a cache dir logged, these two dirs must be the same. Otherwise exit.
*/
if(makeflow_log_recover(d, logfilename, log_verbose_mode, remote_queue, clean_mode, skip_file_check )) {
dag_mount_clean(d);
exit(EXIT_FAILURE);
}
/* This check must happen after makeflow_log_recover which may load the cache_dir info into d->cache_dir.
* This check must happen before makeflow_mount_install to guarantee that the program ends before any mount is copied if any target is invliad.
*/
if(use_mountfile) {
if(makeflow_mount_check_target(d)) {
dag_mount_clean(d);
exit(EXIT_FAILURE);
}
}
if(use_mountfile && !clean_mode) {
if(makeflow_mounts_install(d)) {
fprintf(stderr, "Failed to install the dependencies specified in the mountfile!\n");
dag_mount_clean(d);
exit(EXIT_FAILURE);
}
}
if(monitor) {
if(!log_dir)
fatal("Monitor mode was enabled, but a log output directory was not specified (use --monitor=<dir>)");
if(!log_format)
log_format = xxstrdup(DEFAULT_MONITOR_LOG_FORMAT);
if(monitor->interval < 1)
fatal("Monitoring interval should be positive.");
makeflow_prepare_for_monitoring(d, monitor, remote_queue, log_dir, log_format);
free(log_dir);
free(log_format);
}
struct dag_file *f = dag_file_lookup_or_create(d, batchlogfilename);
makeflow_log_file_state_change(d, f, DAG_FILE_STATE_EXPECT);
if(batch_queue_supports_feature(remote_queue, "batch_log_transactions")) {
const char *transactions = batch_queue_get_option(remote_queue, "batch_log_transactions_name");
f = dag_file_lookup_or_create(d, transactions);
makeflow_log_file_state_change(d, f, DAG_FILE_STATE_EXPECT);
}
if(clean_mode != MAKEFLOW_CLEAN_NONE) {
printf("cleaning filesystem...\n");
if(makeflow_clean(d, remote_queue, clean_mode)) {
fprintf(stderr, "Failed to clean up makeflow!\n");
exit(EXIT_FAILURE);
}
if(clean_mode == MAKEFLOW_CLEAN_ALL) {
unlink(logfilename);
}
exit(0);
}
/* this func call guarantees the mount fields set up from the info of the makeflow log file are cleaned up
* even if the user does not use --mounts or -c option.
*/
dag_mount_clean(d);
printf("starting workflow....\n");
port = batch_queue_port(remote_queue);
if(work_queue_port_file)
opts_write_port_file(work_queue_port_file, port);
if(port > 0)
printf("listening for workers on port %d.\n", port);
signal(SIGINT, handle_abort);
signal(SIGQUIT, handle_abort);
signal(SIGTERM, handle_abort);
makeflow_log_started_event(d);
runtime = timestamp_get();
if (container_mode == CONTAINER_MODE_DOCKER) {
makeflow_wrapper_docker_init(wrapper, container_image, container_image_tar);
}else if(container_mode == CONTAINER_MODE_SINGULARITY){
makeflow_wrapper_singularity_init(wrapper, container_image);
}
d->archive_directory = archive_directory;
d->should_read_archive = should_read_archive;
d->should_write_to_archive = should_write_to_archive;
makeflow_run(d);
time_completed = timestamp_get();
runtime = time_completed - runtime;
if(local_queue)
batch_queue_delete(local_queue);
/*
* Set the abort and failed flag for batch_job_mesos mode.
* Since batch_queue_delete(struct batch_queue *q) will call
* batch_queue_mesos_free(struct batch_queue *q), which is defined
* in batch_job/src/batch_job_mesos.c. Then this function will check
* the abort and failed status of the batch_queue and inform
* the makeflow mesos scheduler.
*/
if (batch_queue_type == BATCH_QUEUE_TYPE_MESOS) {
batch_queue_set_int_option(remote_queue, "batch-queue-abort-flag", (int)makeflow_abort_flag);
batch_queue_set_int_option(remote_queue, "batch-queue-failed-flag", (int)makeflow_failed_flag);
}
batch_queue_delete(remote_queue);
if(write_summary_to || email_summary_to)
makeflow_summary_create(d, write_summary_to, email_summary_to, runtime, time_completed, argc, argv, dagfile, remote_queue, makeflow_abort_flag, makeflow_failed_flag );
/* XXX better to write created files to log, then delete those listed in log. */
if (container_mode == CONTAINER_MODE_DOCKER) {
unlink(CONTAINER_DOCKER_SH);
}else if(container_mode == CONTAINER_MODE_SINGULARITY){
unlink(CONTAINER_SINGULARITY_SH);
}
if(makeflow_abort_flag) {
makeflow_log_aborted_event(d);
fprintf(stderr, "workflow was aborted.\n");
exit(EXIT_FAILURE);
} else if(makeflow_failed_flag) {
makeflow_log_failed_event(d);
fprintf(stderr, "workflow failed.\n");
exit(EXIT_FAILURE);
} else {
makeflow_log_completed_event(d);
printf("nothing left to do.\n");
exit(EXIT_SUCCESS);
}
free(archive_directory);
return 0;
}
/* vim: set noexpandtab tabstop=4: */
| 1 | 13,932 | If you move the other functions, this should also be moved to `makeflow_gc.h`. | cooperative-computing-lab-cctools | c |
@@ -55,8 +55,12 @@ def _send_listens_to_queue(listen_type, listens):
for listen in listens:
if listen_type == LISTEN_TYPE_PLAYING_NOW:
try:
- expire_time = listen["track_metadata"]["additional_info"].get("duration",
- current_app.config['PLAYING_NOW_MAX_DURATION'])
+ if 'duration' in listen['track_metadata']['additional_info']:
+ expire_time = listen['track_metadata']['additional_info']['duration']
+ elif 'duration_ms' in listen['track_metadata']['additional_info']:
+ expire_time = listen['track_metadata']['additional_info']['duration_ms'] // 1000
+ else:
+ expire_time = current_app.config['PLAYING_NOW_MAX_DURATION']
redis_connection._redis.put_playing_now(listen['user_id'], listen, expire_time)
except Exception as e:
current_app.logger.error("Redis rpush playing_now write error: " + str(e)) | 1 | import listenbrainz.webserver.rabbitmq_connection as rabbitmq_connection
import listenbrainz.webserver.redis_connection as redis_connection
import pika
import pika.exceptions
import sys
import time
import ujson
import uuid
from flask import current_app
from listenbrainz.listen import Listen
from listenbrainz.webserver import API_LISTENED_AT_ALLOWED_SKEW
from listenbrainz.webserver.external import messybrainz
from werkzeug.exceptions import InternalServerError, ServiceUnavailable, BadRequest
#: Maximum overall listen size in bytes, to prevent egregious spamming.
MAX_LISTEN_SIZE = 10240
#: The maximum number of tags per listen.
MAX_TAGS_PER_LISTEN = 50
#: The maximum length of a tag
MAX_TAG_SIZE = 64
#: The maximum number of listens returned in a single GET request.
MAX_ITEMS_PER_GET = 100
#: The default number of listens returned in a single GET request.
DEFAULT_ITEMS_PER_GET = 25
MAX_ITEMS_PER_MESSYBRAINZ_LOOKUP = 10
# Define the values for types of listens
LISTEN_TYPE_SINGLE = 1
LISTEN_TYPE_IMPORT = 2
LISTEN_TYPE_PLAYING_NOW = 3
def insert_payload(payload, user, listen_type=LISTEN_TYPE_IMPORT):
""" Convert the payload into augmented listens then submit them.
Returns: augmented_listens
"""
try:
augmented_listens = _get_augmented_listens(payload, user, listen_type)
_send_listens_to_queue(listen_type, augmented_listens)
except (InternalServerError, ServiceUnavailable) as e:
raise
except Exception as e:
print(e)
return augmented_listens
def _send_listens_to_queue(listen_type, listens):
submit = []
for listen in listens:
if listen_type == LISTEN_TYPE_PLAYING_NOW:
try:
expire_time = listen["track_metadata"]["additional_info"].get("duration",
current_app.config['PLAYING_NOW_MAX_DURATION'])
redis_connection._redis.put_playing_now(listen['user_id'], listen, expire_time)
except Exception as e:
current_app.logger.error("Redis rpush playing_now write error: " + str(e))
raise ServiceUnavailable("Cannot record playing_now at this time.")
else:
submit.append(listen)
if submit:
# check if rabbitmq connection exists or not
# and if not then try to connect
try:
rabbitmq_connection.init_rabbitmq_connection(current_app)
except ConnectionError as e:
current_app.logger.error('Cannot connect to RabbitMQ: %s' % str(e))
raise ServiceUnavailable('Cannot submit listens to queue, please try again later.')
publish_data_to_queue(
data=submit,
exchange=current_app.config['INCOMING_EXCHANGE'],
queue=current_app.config['INCOMING_QUEUE'],
error_msg='Cannot submit listens to queue, please try again later.',
)
def validate_listen(listen, listen_type):
"""Make sure that required keys are present, filled out and not too large."""
if listen_type in (LISTEN_TYPE_SINGLE, LISTEN_TYPE_IMPORT):
if 'listened_at' not in listen:
log_raise_400("JSON document must contain the key listened_at at the top level.", listen)
try:
listen['listened_at'] = int(listen['listened_at'])
except ValueError:
log_raise_400("JSON document must contain an int value for listened_at.", listen)
if 'listened_at' in listen and 'track_metadata' in listen and len(listen) > 2:
log_raise_400("JSON document may only contain listened_at and "
"track_metadata top level keys", listen)
# if timestamp is too high, raise BadRequest
# in order to make up for possible clock skew, we allow
# timestamps to be one hour ahead of server time
if not is_valid_timestamp(listen['listened_at']):
log_raise_400("Value for key listened_at is too high.", listen)
elif listen_type == LISTEN_TYPE_PLAYING_NOW:
if 'listened_at' in listen:
log_raise_400("JSON document must not contain listened_at while submitting "
"playing_now.", listen)
if 'track_metadata' in listen and len(listen) > 1:
log_raise_400("JSON document may only contain track_metadata as top level "
"key when submitting now_playing.", listen)
# Basic metadata
try:
if not listen['track_metadata']['track_name']:
log_raise_400("JSON document does not contain required "
"track_metadata.track_name.", listen)
if not listen['track_metadata']['artist_name']:
log_raise_400("JSON document does not contain required "
"track_metadata.artist_name.", listen)
if not isinstance(listen['track_metadata']['artist_name'], str):
log_raise_400("artist_name must be a single string.", listen)
except KeyError:
log_raise_400("JSON document does not contain a valid metadata.track_name "
"and/or track_metadata.artist_name.", listen)
if 'additional_info' in listen['track_metadata']:
# Tags
if 'tags' in listen['track_metadata']['additional_info']:
tags = listen['track_metadata']['additional_info']['tags']
if len(tags) > MAX_TAGS_PER_LISTEN:
log_raise_400("JSON document may not contain more than %d items in "
"track_metadata.additional_info.tags." % MAX_TAGS_PER_LISTEN, listen)
for tag in tags:
if len(tag) > MAX_TAG_SIZE:
log_raise_400("JSON document may not contain track_metadata.additional_info.tags "
"longer than %d characters." % MAX_TAG_SIZE, listen)
# MBIDs
single_mbid_keys = ['release_mbid', 'recording_mbid', 'release_group_mbid', 'track_mbid']
for key in single_mbid_keys:
verify_mbid_validity(listen, key, multi = False)
multiple_mbid_keys = ['artist_mbids', 'work_mbids']
for key in multiple_mbid_keys:
verify_mbid_validity(listen, key, multi = True)
# lifted from AcousticBrainz
def is_valid_uuid(u):
try:
u = uuid.UUID(u)
return True
except (AttributeError, ValueError):
return False
def _get_augmented_listens(payload, user, listen_type):
""" Converts the payload to augmented list after lookup
in the MessyBrainz database
"""
augmented_listens = []
msb_listens = []
for l in payload:
listen = l.copy() # Create a local object to prevent the mutation of the passed object
listen['user_id'] = user['id']
listen['user_name'] = user['musicbrainz_id']
msb_listens.append(listen)
if len(msb_listens) >= MAX_ITEMS_PER_MESSYBRAINZ_LOOKUP:
augmented_listens.extend(_messybrainz_lookup(msb_listens))
msb_listens = []
if msb_listens:
augmented_listens.extend(_messybrainz_lookup(msb_listens))
return augmented_listens
def _messybrainz_lookup(listens):
msb_listens = []
for listen in listens:
messy_dict = {
'artist': listen['track_metadata']['artist_name'],
'title': listen['track_metadata']['track_name'],
}
if 'release_name' in listen['track_metadata']:
messy_dict['release'] = listen['track_metadata']['release_name']
if 'additional_info' in listen['track_metadata']:
ai = listen['track_metadata']['additional_info']
if 'artist_mbids' in ai and isinstance(ai['artist_mbids'], list):
messy_dict['artist_mbids'] = ai['artist_mbids']
if 'release_mbid' in ai:
messy_dict['release_mbid'] = ai['release_mbid']
if 'recording_mbid' in ai:
messy_dict['recording_mbid'] = ai['recording_mbid']
if 'track_number' in ai:
messy_dict['track_number'] = ai['track_number']
if 'spotify_id' in ai:
messy_dict['spotify_id'] = ai['spotify_id']
msb_listens.append(messy_dict)
try:
msb_responses = messybrainz.submit_listens(msb_listens)
except messybrainz.exceptions.BadDataException as e:
log_raise_400(str(e))
except messybrainz.exceptions.NoDataFoundException:
return []
except messybrainz.exceptions.ErrorAddingException as e:
raise ServiceUnavailable(str(e))
augmented_listens = []
for listen, messybrainz_resp in zip(listens, msb_responses['payload']):
messybrainz_resp = messybrainz_resp['ids']
if 'additional_info' not in listen['track_metadata']:
listen['track_metadata']['additional_info'] = {}
try:
listen['recording_msid'] = messybrainz_resp['recording_msid']
listen['track_metadata']['additional_info']['artist_msid'] = messybrainz_resp['artist_msid']
except KeyError:
current_app.logger.error("MessyBrainz did not return a proper set of ids")
raise InternalServerError
try:
listen['track_metadata']['additional_info']['release_msid'] = messybrainz_resp['release_msid']
except KeyError:
pass
artist_mbids = messybrainz_resp.get('artist_mbids', [])
release_mbid = messybrainz_resp.get('release_mbid', None)
recording_mbid = messybrainz_resp.get('recording_mbid', None)
if 'artist_mbids' not in listen['track_metadata']['additional_info'] and \
'release_mbid' not in listen['track_metadata']['additional_info'] and \
'recording_mbid' not in listen['track_metadata']['additional_info']:
if len(artist_mbids) > 0 and release_mbid and recording_mbid:
listen['track_metadata']['additional_info']['artist_mbids'] = artist_mbids
listen['track_metadata']['additional_info']['release_mbid'] = release_mbid
listen['track_metadata']['additional_info']['recording_mbid'] = recording_mbid
augmented_listens.append(listen)
return augmented_listens
def log_raise_400(msg, data=""):
""" Helper function for logging issues with request data and showing error page.
Logs the message and data, raises BadRequest exception which shows 400 Bad
Request to the user.
"""
if isinstance(data, dict):
data = ujson.dumps(data)
current_app.logger.debug("BadRequest: %s\nJSON: %s" % (msg, data))
raise BadRequest(msg)
def verify_mbid_validity(listen, key, multi):
""" Verify that mbid(s) present in listen with key `key` is valid.
Args:
listen: listen data
key: the key whose mbids is to be validated
multi: boolean value signifying if the key contains multiple mbids
"""
if not multi:
items = listen['track_metadata']['additional_info'].get(key)
items = [items] if items else []
else:
items = listen['track_metadata']['additional_info'].get(key, [])
for item in items:
if not is_valid_uuid(item):
log_raise_400("%s MBID format invalid." % (key, ), listen)
def is_valid_timestamp(ts):
""" Returns True if the timestamp passed is in the API's
allowed range of timestamps, False otherwise
Args:
ts (int): the timestamp to be checked for validity
Returns:
bool: True if timestamp is valid, False otherwise
"""
return ts <= int(time.time()) + API_LISTENED_AT_ALLOWED_SKEW
def publish_data_to_queue(data, exchange, queue, error_msg):
""" Publish specified data to the specified queue.
Args:
data: the data to be published
exchange (str): the name of the exchange
queue (str): the name of the queue
error_msg (str): the error message to be returned in case of an error
"""
try:
with rabbitmq_connection._rabbitmq.get() as connection:
channel = connection.channel
channel.exchange_declare(exchange=exchange, exchange_type='fanout')
channel.queue_declare(queue, durable=True)
channel.basic_publish(
exchange=exchange,
routing_key='',
body=ujson.dumps(data),
properties=pika.BasicProperties(delivery_mode=2, ),
)
except pika.exceptions.ConnectionClosed as e:
current_app.logger.error("Connection to rabbitmq closed while trying to publish: %s" % str(e), exc_info=True)
raise ServiceUnavailable(error_msg)
except Exception as e:
current_app.logger.error("Cannot publish to rabbitmq channel: %s / %s" % (type(e).__name__, str(e)), exc_info=True)
raise ServiceUnavailable(error_msg)
| 1 | 15,150 | This whole block bugs me. Expire_time to me suggest that an absolute time of when something happens and duration is an interval of time. While the code looks correct, it feels awkward to read. | metabrainz-listenbrainz-server | py |
@@ -93,7 +93,9 @@ export function coerceToVNode(possibleVNode) {
// Clone vnode if it has already been used. ceviche/#57
if (possibleVNode._dom!=null) {
- return createVNode(possibleVNode.type, possibleVNode.props, possibleVNode.text, possibleVNode.key, null);
+ let vnode = createVNode(possibleVNode.type, possibleVNode.props, possibleVNode.text, possibleVNode.key, null);
+ vnode._dom = possibleVNode._dom;
+ return vnode;
}
return possibleVNode; | 1 | import options from './options';
/**
* Create an virtual node (used for JSX)
* @param {import('./internal').VNode["type"]} type The node name or Component
* constructor for this virtual node
* @param {object | null | undefined} [props] The properties of the virtual node
* @param {Array<import('.').ComponentChildren>} [children] The children of the virtual node
* @returns {import('./internal').VNode}
*/
export function createElement(type, props, children) {
if (props==null) props = {};
if (arguments.length>3) {
children = [children];
for (let i=3; i<arguments.length; i++) {
children.push(arguments[i]);
}
}
if (children!=null) {
props.children = children;
}
// "type" may be undefined during development. The check is needed so that
// we can display a nice error message with our debug helpers
if (type!=null && type.defaultProps!=null) {
for (let i in type.defaultProps) {
if (props[i]===undefined) props[i] = type.defaultProps[i];
}
}
let ref = props.ref;
if (ref) delete props.ref;
let key = props.key;
if (key) delete props.key;
return createVNode(type, props, null, key, ref);
}
/**
* Create a VNode (used internally by Preact)
* @param {import('./internal').VNode["type"]} type The node name or Component
* Constructor for this virtual node
* @param {object | null} props The properites of this virtual node
* @param {string | number} text If this virtual node represents a text node,
* this is the text of the node
* @param {string |number | null} key The key for this virtual node, used when
* diffing it against its children
* @param {import('./internal').VNode["ref"]} ref The ref property that will
* receive a reference to its created child
* @returns {import('./internal').VNode}
*/
export function createVNode(type, props, text, key, ref) {
// V8 seems to be better at detecting type shapes if the object is allocated from the same call site
// Do not inline into createElement and coerceToVNode!
const vnode = {
type,
props,
text,
key,
ref,
_children: null,
_dom: null,
_lastDomChild: null,
_component: null
};
if (options.vnode) options.vnode(vnode);
return vnode;
}
export function createRef() {
return {};
}
export /* istanbul ignore next */ function Fragment() { }
/**
* Coerce an untrusted value into a VNode
* Specifically, this should be used anywhere a user could provide a boolean, string, or number where
* a VNode or Component is desired instead
* @param {boolean | string | number | import('./internal').VNode} possibleVNode A possible VNode
* @returns {import('./internal').VNode}
*/
export function coerceToVNode(possibleVNode) {
if (possibleVNode == null || typeof possibleVNode === 'boolean') return null;
if (typeof possibleVNode === 'string' || typeof possibleVNode === 'number') {
return createVNode(null, null, possibleVNode, null, null);
}
if (Array.isArray(possibleVNode)) {
return createElement(Fragment, null, possibleVNode);
}
// Clone vnode if it has already been used. ceviche/#57
if (possibleVNode._dom!=null) {
return createVNode(possibleVNode.type, possibleVNode.props, possibleVNode.text, possibleVNode.key, null);
}
return possibleVNode;
}
| 1 | 12,941 | Can we add `_dom` as an argument to createVNode here? I think it might be shorter (could totally be wrong!) | preactjs-preact | js |
@@ -1964,6 +1964,8 @@ mangle_return(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_set_src(popf, 1, memop);
PRE(ilist, instr, popf);
}
+ /* Mangles single step exception after a popf. */
+ mangle_single_step(dcontext, ilist, popf);
#ifdef X64
/* In x64 mode, iret additionally does pop->RSP and pop->ss. */ | 1 | /* ******************************************************************************
* Copyright (c) 2010-2016 Google, Inc. All rights reserved.
* Copyright (c) 2010 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2000-2001 Hewlett-Packard Company */
/* file "mangle.c" */
#include "../globals.h"
#include "../link.h"
#include "../fragment.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "decode.h"
#include "decode_fast.h"
#include "disassemble.h"
#include "../hashtable.h"
#include "../fcache.h" /* for in_fcache */
#ifdef STEAL_REGISTER
#include "steal_reg.h"
#endif
#include "instrument.h" /* for dr_insert_call */
#include "../translate.h"
#ifdef RCT_IND_BRANCH
# include "../rct.h" /* rct_add_rip_rel_addr */
#endif
#ifdef UNIX
#include <sys/syscall.h>
#endif
#include <string.h> /* for memset */
#ifdef ANNOTATIONS
# include "../annotations.h"
#endif
/* Make code more readable by shortening long lines.
* We mark everything we add as non-app instr.
*/
#define POST instrlist_meta_postinsert
#define PRE instrlist_meta_preinsert
/***************************************************************************/
void
mangle_arch_init(void)
{
/* Nothing yet. */
}
/* Convert a short-format CTI into an equivalent one using
* near-rel-format.
* Remember, the target is kept in the 0th src array position,
* and has already been converted from an 8-bit offset to an
* absolute PC, so we can just pretend instructions are longer
* than they really are.
*/
instr_t *
convert_to_near_rel_arch(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr)
{
int opcode = instr_get_opcode(instr);
app_pc target = NULL;
if (opcode == OP_jmp_short) {
instr_set_opcode(instr, OP_jmp);
return instr;
}
if (OP_jo_short <= opcode && opcode <= OP_jnle_short) {
/* WARNING! following is OP_ enum order specific */
instr_set_opcode(instr, opcode - OP_jo_short + OP_jo);
return instr;
}
if (OP_loopne <= opcode && opcode <= OP_jecxz) {
uint mangled_sz;
uint offs;
/*
* from "info as" on GNU/linux system:
Note that the `jcxz', `jecxz', `loop', `loopz', `loope', `loopnz'
and `loopne' instructions only come in byte displacements, so that if
you use these instructions (`gcc' does not use them) you may get an
error message (and incorrect code). The AT&T 80386 assembler tries to
get around this problem by expanding `jcxz foo' to
jcxz cx_zero
jmp cx_nonzero
cx_zero: jmp foo
cx_nonzero:
*
* We use that same expansion, but we want to treat the entire
* three-instruction sequence as a single conditional branch.
* Thus we use a special instruction that stores the entire
* instruction sequence as mangled bytes, yet w/ a valid target operand
* (xref PR 251646).
* patch_branch and instr_invert_cbr
* know how to find the target pc (final 4 of 9 bytes).
* When decoding anything we've written we know the only jcxz or
* loop* instructions are part of these rewritten packages, and
* we use remangle_short_rewrite to read back in the instr.
* (have to do this everywhere call decode() except original
* interp, plus in input_trace())
*
* An alternative is to change 'jcxz foo' to:
<save eflags>
cmpb %cx,$0
je foo_restore
<restore eflags>
...
foo_restore: <restore eflags>
foo:
* However the added complications of restoring the eflags on
* the taken-branch path made me choose the former solution.
*/
/* SUMMARY:
* expand 'shortjump foo' to:
shortjump taken
jmp-short nottaken
taken: jmp foo
nottaken:
*/
if (ilist != NULL) {
/* PR 266292: for meta instrs, insert separate instrs */
/* reverse order */
opnd_t tgt = instr_get_target(instr);
instr_t *nottaken = INSTR_CREATE_label(dcontext);
instr_t *taken = INSTR_CREATE_jmp(dcontext, tgt);
ASSERT(instr_is_meta(instr));
instrlist_meta_postinsert(ilist, instr, nottaken);
instrlist_meta_postinsert(ilist, instr, taken);
instrlist_meta_postinsert(ilist, instr, INSTR_CREATE_jmp_short
(dcontext, opnd_create_instr(nottaken)));
instr_set_target(instr, opnd_create_instr(taken));
return taken;
}
if (opnd_is_near_pc(instr_get_target(instr)))
target = opnd_get_pc(instr_get_target(instr));
else if (opnd_is_near_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
/* XXX: not using get_app_instr_xl8() b/c drdecodelib doesn't link
* mangle_shared.c.
*/
target = instr_get_translation(tgt);
if (target == NULL && instr_raw_bits_valid(tgt))
target = instr_get_raw_bits(tgt);
ASSERT(target != NULL);
} else
ASSERT_NOT_REACHED();
/* PR 251646: cti_short_rewrite: target is in src0, so operands are
* valid, but raw bits must also be valid, since they hide the multiple
* instrs. For x64, it is marked for re-relativization, but it's
* special since the target must be obtained from src0 and not
* from the raw bits (since that might not reach).
*/
/* need 9 bytes + possible addr prefix */
mangled_sz = CTI_SHORT_REWRITE_LENGTH;
if (!reg_is_pointer_sized(opnd_get_reg(instr_get_src(instr, 1))))
mangled_sz++; /* need addr prefix */
instr_allocate_raw_bits(dcontext, instr, mangled_sz);
offs = 0;
if (mangled_sz > CTI_SHORT_REWRITE_LENGTH) {
instr_set_raw_byte(instr, offs, ADDR_PREFIX_OPCODE);
offs++;
}
/* first 2 bytes: jecxz 8-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(opcode));
offs++;
/* remember pc-relative offsets are from start of next instr */
instr_set_raw_byte(instr, offs, (byte)2);
offs++;
/* next 2 bytes: jmp-short 8-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(OP_jmp_short));
offs++;
instr_set_raw_byte(instr, offs, (byte)5);
offs++;
/* next 5 bytes: jmp 32-bit-offset */
instr_set_raw_byte(instr, offs, decode_first_opcode_byte(OP_jmp));
offs++;
/* for x64 we may not reach, but we go ahead and try */
instr_set_raw_word(instr, offs, (int)
(target - (instr->bytes + mangled_sz)));
offs += sizeof(int);
ASSERT(offs == mangled_sz);
LOG(THREAD, LOG_INTERP, 2, "convert_to_near_rel: jecxz/loop* opcode\n");
/* original target operand is still valid */
instr_set_operands_valid(instr, true);
return instr;
}
LOG(THREAD, LOG_INTERP, 1, "convert_to_near_rel: unknown opcode: %d %s\n",
opcode, decode_opcode_name(opcode));
ASSERT_NOT_REACHED(); /* conversion not possible OR not a short-form cti */
return instr;
}
/* For jecxz and loop*, we create 3 instructions in a single
* instr that we treat like a single conditional branch.
* On re-decoding our own output we need to recreate that instr.
* This routine assumes that the instructions encoded at pc
* are indeed a mangled cti short.
* Assumes that the first instr has already been decoded into instr,
* that pc points to the start of that instr.
* Converts instr into a new 3-raw-byte-instr with a private copy of the
* original raw bits.
* Optionally modifies the target to "target" if "target" is non-null.
* Returns the pc of the instruction after the remangled sequence.
*/
byte *
remangle_short_rewrite(dcontext_t *dcontext,
instr_t *instr, byte *pc, app_pc target)
{
uint mangled_sz = CTI_SHORT_REWRITE_LENGTH;
ASSERT(instr_is_cti_short_rewrite(instr, pc));
if (*pc == ADDR_PREFIX_OPCODE)
mangled_sz++;
/* first set the target in the actual operand src0 */
if (target == NULL) {
/* acquire existing absolute target */
int rel_target = *((int *)(pc + mangled_sz - 4));
target = pc + mangled_sz + rel_target;
}
instr_set_target(instr, opnd_create_pc(target));
/* now set up the bundle of raw instructions
* we've already read the first 2-byte instruction, jecxz/loop*
* they all take up mangled_sz bytes
*/
instr_allocate_raw_bits(dcontext, instr, mangled_sz);
instr_set_raw_bytes(instr, pc, mangled_sz);
/* for x64 we may not reach, but we go ahead and try */
instr_set_raw_word(instr, mangled_sz - 4, (int)(target - (pc + mangled_sz)));
/* now make operands valid */
instr_set_operands_valid(instr, true);
return (pc+mangled_sz);
}
/***************************************************************************/
#if !defined(STANDALONE_DECODER)
int
insert_out_of_line_context_switch(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, bool save)
{
if (save) {
/* We adjust the stack so the return address will not be clobbered,
* so we can have call/return pair to take advantage of hardware
* call return stack for better performance.
* xref emit_clean_call_save @ x86/emit_utils.c
*/
PRE(ilist, instr,
INSTR_CREATE_lea
(dcontext,
opnd_create_reg(DR_REG_XSP),
opnd_create_base_disp(DR_REG_XSP, DR_REG_NULL, 0,
-(int)(get_clean_call_switch_stack_size() +
get_clean_call_temp_stack_size()),
OPSZ_lea)));
}
PRE(ilist, instr,
INSTR_CREATE_call
(dcontext, save ?
opnd_create_pc(get_clean_call_save(dcontext _IF_X64(GENCODE_X64))) :
opnd_create_pc(get_clean_call_restore(dcontext _IF_X64(GENCODE_X64)))));
return get_clean_call_switch_stack_size();
}
void
insert_clear_eflags(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr)
{
/* clear eflags for callee's usage */
if (cci == NULL || !cci->skip_clear_flags) {
if (dynamo_options.cleancall_ignore_eflags) {
/* we still clear DF since some compiler assumes
* DF is cleared at each function.
*/
PRE(ilist, instr, INSTR_CREATE_cld(dcontext));
} else {
/* on x64 a push immed is sign-extended to 64-bit */
PRE(ilist, instr,
INSTR_CREATE_push_imm(dcontext, OPND_CREATE_INT32(0)));
PRE(ilist, instr, INSTR_CREATE_popf(dcontext));
}
}
}
/* Pushes not only the GPRs but also xmm/ymm, xip, and xflags, in
* priv_mcontext_t order.
* The current stack pointer alignment should be passed. Use 1 if
* unknown (NOT 0).
* Returns the amount of data pushed. Does NOT fix up the xsp value pushed
* to be the value prior to any pushes for x64 as no caller needs that
* currently (they all build a priv_mcontext_t and have to do further xsp
* fixups anyway).
* Includes xmm0-5 for PR 264138.
*/
uint
insert_push_all_registers(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr,
uint alignment, opnd_t push_pc, reg_id_t scratch/*optional*/)
{
uint dstack_offs = 0;
int offs_beyond_xmm = 0;
if (cci == NULL)
cci = &default_clean_call_info;
if (cci->preserve_mcontext || cci->num_simd_skip != NUM_SIMD_REGS) {
int offs = XMM_SLOTS_SIZE + PRE_XMM_PADDING;
if (cci->preserve_mcontext && cci->skip_save_flags) {
offs_beyond_xmm = 2*XSP_SZ; /* pc and flags */
offs += offs_beyond_xmm;
}
PRE(ilist, instr, INSTR_CREATE_lea
(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0, -offs)));
dstack_offs += offs;
}
if (preserve_xmm_caller_saved()) {
/* PR 264138: we must preserve xmm0-5 if on a 64-bit kernel */
int i;
/* PR 266305: see discussion in emit_fcache_enter_shared on
* which opcode is better. Note that the AMD optimization
* guide says to use movlps+movhps for unaligned stores, but
* for simplicity and smaller code I'm using movups anyway.
*/
/* XXX i#438: once have SandyBridge processor need to measure
* cost of vmovdqu and whether worth arranging 32-byte alignment
* for all callers. B/c we put ymm at end of priv_mcontext_t, we do
* currently have 32-byte alignment for clean calls.
*/
uint opcode = move_mm_reg_opcode(ALIGNED(alignment, 16), ALIGNED(alignment, 32));
ASSERT(proc_has_feature(FEATURE_SSE));
for (i=0; i<NUM_SIMD_SAVED; i++) {
if (!cci->simd_skip[i]) {
PRE(ilist, instr, instr_create_1dst_1src
(dcontext, opcode,
opnd_create_base_disp(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + i*XMM_SAVED_REG_SIZE +
offs_beyond_xmm,
OPSZ_SAVED_XMM),
opnd_create_reg(REG_SAVED_XMM0 + (reg_id_t)i)));
}
}
ASSERT(i*XMM_SAVED_REG_SIZE == XMM_SAVED_SIZE);
ASSERT(XMM_SAVED_SIZE <= XMM_SLOTS_SIZE);
}
/* pc and aflags */
if (!cci->skip_save_flags) {
ASSERT(offs_beyond_xmm == 0);
if (opnd_is_immed_int(push_pc))
PRE(ilist, instr, INSTR_CREATE_push_imm(dcontext, push_pc));
else
PRE(ilist, instr, INSTR_CREATE_push(dcontext, push_pc));
dstack_offs += XSP_SZ;
PRE(ilist, instr, INSTR_CREATE_pushf(dcontext));
dstack_offs += XSP_SZ;
} else {
ASSERT(offs_beyond_xmm == 2*XSP_SZ || !cci->preserve_mcontext);
/* for cci->preserve_mcontext we added to the lea above so we ignore push_pc */
}
#ifdef X64
/* keep priv_mcontext_t order */
if (!cci->reg_skip[REG_R15 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R15)));
if (!cci->reg_skip[REG_R14 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R14)));
if (!cci->reg_skip[REG_R13 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R13)));
if (!cci->reg_skip[REG_R12 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R12)));
if (!cci->reg_skip[REG_R11 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R11)));
if (!cci->reg_skip[REG_R10 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R10)));
if (!cci->reg_skip[REG_R9 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R9)));
if (!cci->reg_skip[REG_R8 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_R8)));
if (!cci->reg_skip[REG_RAX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RAX)));
if (!cci->reg_skip[REG_RCX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RCX)));
if (!cci->reg_skip[REG_RDX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RDX)));
if (!cci->reg_skip[REG_RBX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RBX)));
/* we do NOT match pusha xsp value */
if (!cci->reg_skip[REG_RSP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RSP)));
if (!cci->reg_skip[REG_RBP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RBP)));
if (!cci->reg_skip[REG_RSI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RSI)));
if (!cci->reg_skip[REG_RDI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_push(dcontext, opnd_create_reg(REG_RDI)));
dstack_offs += (NUM_GP_REGS - cci->num_regs_skip) * XSP_SZ;
#else
PRE(ilist, instr, INSTR_CREATE_pusha(dcontext));
dstack_offs += 8 * XSP_SZ;
#endif
ASSERT(cci->skip_save_flags ||
cci->num_simd_skip != 0 ||
cci->num_regs_skip != 0 ||
dstack_offs == (uint)get_clean_call_switch_stack_size());
return dstack_offs;
}
/* User should pass the alignment from insert_push_all_registers: i.e., the
* alignment at the end of all the popping, not the alignment prior to
* the popping.
*/
void
insert_pop_all_registers(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *instr,
uint alignment)
{
int offs_beyond_xmm = 0;
if (cci == NULL)
cci = &default_clean_call_info;
#ifdef X64
/* in priv_mcontext_t order */
if (!cci->reg_skip[REG_RDI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RDI)));
if (!cci->reg_skip[REG_RSI - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RSI)));
if (!cci->reg_skip[REG_RBP - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBP)));
/* skip xsp by popping into dead rbx */
if (!cci->reg_skip[REG_RSP - REG_XAX]) {
ASSERT(!cci->reg_skip[REG_RBX - REG_XAX]);
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBX)));
}
if (!cci->reg_skip[REG_RBX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RBX)));
if (!cci->reg_skip[REG_RDX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RDX)));
if (!cci->reg_skip[REG_RCX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RCX)));
if (!cci->reg_skip[REG_RAX - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RAX)));
if (!cci->reg_skip[REG_R8 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R8)));
if (!cci->reg_skip[REG_R9 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R9)));
if (!cci->reg_skip[REG_R10 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R10)));
if (!cci->reg_skip[REG_R11 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R11)));
if (!cci->reg_skip[REG_R12 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R12)));
if (!cci->reg_skip[REG_R13 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R13)));
if (!cci->reg_skip[REG_R14 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R14)));
if (!cci->reg_skip[REG_R15 - REG_XAX])
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_R15)));
#else
PRE(ilist, instr, INSTR_CREATE_popa(dcontext));
#endif
if (!cci->skip_save_flags) {
PRE(ilist, instr, INSTR_CREATE_popf(dcontext));
offs_beyond_xmm = XSP_SZ; /* pc */;
} else if (cci->preserve_mcontext) {
offs_beyond_xmm = 2*XSP_SZ; /* aflags + pc */
}
if (preserve_xmm_caller_saved()) {
/* PR 264138: we must preserve xmm0-5 if on a 64-bit kernel */
int i;
/* See discussion in emit_fcache_enter_shared on which opcode
* is better. */
uint opcode = move_mm_reg_opcode(ALIGNED(alignment, 32), ALIGNED(alignment, 16));
ASSERT(proc_has_feature(FEATURE_SSE));
for (i=0; i<NUM_SIMD_SAVED; i++) {
if (!cci->simd_skip[i]) {
PRE(ilist, instr, instr_create_1dst_1src
(dcontext, opcode, opnd_create_reg(REG_SAVED_XMM0 + (reg_id_t)i),
opnd_create_base_disp(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + i*XMM_SAVED_REG_SIZE +
offs_beyond_xmm,
OPSZ_SAVED_XMM)));
}
}
ASSERT(i*XMM_SAVED_REG_SIZE == XMM_SAVED_SIZE);
ASSERT(XMM_SAVED_SIZE <= XMM_SLOTS_SIZE);
}
PRE(ilist, instr, INSTR_CREATE_lea
(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0,
PRE_XMM_PADDING + XMM_SLOTS_SIZE +
offs_beyond_xmm)));
}
reg_id_t
shrink_reg_for_param(reg_id_t regular, opnd_t arg)
{
#ifdef X64
if (opnd_get_size(arg) == OPSZ_4) { /* we ignore var-sized */
/* PR 250976 #2: leave 64-bit only if an immed w/ top bit set (we
* assume user wants sign-extension; that is after all what happens
* on a push of a 32-bit immed) */
if (!opnd_is_immed_int(arg) ||
(opnd_get_immed_int(arg) & 0x80000000) == 0)
return reg_64_to_32(regular);
}
#endif
return regular;
}
/* Returns the change in the stack pointer.
* N.B.: due to stack alignment and minimum stack reservation, do
* not use parameters involving esp/rsp, as its value can change!
*
* This routine only supports passing arguments that are integers or
* pointers of a size equal or smaller than the register size: i.e., no
* floating-point, multimedia, or aggregate data types.
*
* For 64-bit mode, if a 32-bit immediate integer is specified as an
* argument and it has its top bit set, we assume it is intended to be
* sign-extended to 64-bits; otherwise we zero-extend it.
*
* For 64-bit mode, variable-sized argument operands may not work
* properly.
*
* Arguments that reference REG_XSP will work for clean calls, but are not guaranteed
* to work for non-clean, especially for 64-bit where we align, etc. Arguments that
* reference sub-register portions of REG_XSP are not supported.
*
* XXX PR 307874: w/ a post optimization pass, or perhaps more clever use of
* existing passes, we could do much better on calling convention and xsp conflicting
* args. We should also really consider inlining client callees (PR 218907), since
* clean calls for 64-bit are enormous (71 instrs/264 bytes for 2-arg x64; 26
* instrs/99 bytes for x86) and we could avoid all the xmm saves and replace pushf w/
* lahf.
*/
uint
insert_parameter_preparation(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
bool clean_call, uint num_args, opnd_t *args)
{
uint i;
int r;
uint preparm_padding = 0;
uint param_stack = 0, total_stack = 0;
bool push = true;
bool restore_xax = false;
bool restore_xsp = false;
/* we need two passes for PR 250976 optimization */
/* Push/mov in reverse order. We need a label so we can also add
* instrs prior to the regular param prep. So params are POST-mark, while
* pre-param-prep is POST-prev or PRE-mark.
*/
#ifdef X64
uint arg_pre_push = 0, total_pre_push = 0;
#endif
instr_t *prev = (instr == NULL) ? instrlist_last(ilist) : instr_get_prev(instr);
instr_t *mark = INSTR_CREATE_label(dcontext);
PRE(ilist, instr, mark);
/* For a clean call, xax is dead (clobbered by prepare_for_clean_call()).
* Rather than use as scratch and restore prior to each param that uses it,
* we restore once up front if any use it, and use regparms[0] as scratch,
* which is symmetric with non-clean-calls: regparms[0] is dead since we're
* doing args in reverse order. However, we then can't use regparms[0]
* directly if referenced in earlier params, but similarly for xax, so
* there's no clear better way. (prepare_for_clean_call also clobbers xsp,
* but we just disallow args that use it).
*/
ASSERT(num_args == 0 || args != NULL);
/* We can get away w/ one pass, except for PR 250976 we want calling conv
* regs to be able to refer to priv_mcontext_t as well as potentially being
* pushed: but we need to know the total # pushes ahead of time (since hard
* to mark for post-patching)
*/
for (i = 0; i < num_args; i++) {
IF_X64(bool is_pre_push = false;)
for (r = 0; r < opnd_num_regs_used(args[i]); r++) {
reg_id_t used = opnd_get_reg_used(args[i], r);
IF_X64(int parm;)
LOG(THREAD, LOG_INTERP, 4,
"ipp: considering arg %d reg %d == %s\n", i, r, reg_names[used]);
if (clean_call && !restore_xax && reg_overlap(used, REG_XAX))
restore_xax = true;
if (reg_overlap(used, REG_XSP)) {
IF_X64(CLIENT_ASSERT(clean_call,
"Non-clean-call argument: REG_XSP not supported"));
CLIENT_ASSERT(used == REG_XSP,
"Call argument: sub-reg-xsp not supported");
if (clean_call && /*x64*/parameters_stack_padded() && !restore_xsp)
restore_xsp = true;
}
#ifdef X64
/* PR 250976 #A: count the number of pre-pushes we need */
parm = reg_parameter_num(used);
/* We can read a register used in an earlier arg since we store that
* arg later (we do reverse order), except arg0, which we use as
* scratch (we don't always need it, but not worth another pre-pass
* through all args to find out), and xsp. Otherwise, if a plain reg,
* we point at mcontext (we restore xsp slot in mcontext if nec.).
* If a mem ref, we need to pre-push onto stack.
* N.B.: this conditional is duplicated in 2nd loop.
*/
if (!is_pre_push &&
((parm == 0 && num_args > 1) || parm > (int)i ||
reg_overlap(used, REG_XSP)) &&
(!clean_call || !opnd_is_reg(args[i]))) {
total_pre_push++;
is_pre_push = true; /* ignore further regs in same arg */
}
#endif
}
}
if (parameters_stack_padded()) {
/* For x64, supposed to reserve rsp space in function prologue; we
* do next best thing and reserve it prior to setting up the args.
*/
push = false; /* store args to xsp offsets instead of pushing them */
total_stack = REGPARM_MINSTACK;
if (num_args > NUM_REGPARM)
total_stack += XSP_SZ * (num_args - NUM_REGPARM);
param_stack = total_stack;
IF_X64(total_stack += XSP_SZ * total_pre_push);
/* We assume rsp is currently 16-byte aligned. End of arguments is supposed
* to be 16-byte aligned for x64 SysV (note that retaddr will then make
* rsp 8-byte-aligned, which is ok: callee has to rectify that).
* For clean calls, prepare_for_clean_call leaves rsp aligned for x64.
* XXX PR 218790: we require users of dr_insert_call to ensure
* alignment; should we put in support to dynamically align?
*/
preparm_padding =
ALIGN_FORWARD_UINT(total_stack, REGPARM_END_ALIGN) - total_stack;
total_stack += preparm_padding;
/* we have to wait to insert the xsp adjust */
} else {
ASSERT(NUM_REGPARM == 0);
ASSERT(push);
IF_X64(ASSERT(total_pre_push == 0));
total_stack = XSP_SZ * num_args;
}
LOG(THREAD, LOG_INTERP, 3,
"insert_parameter_preparation: %d args, %d in-reg, %d pre-push, %d/%d stack\n",
num_args, NUM_REGPARM, IF_X64_ELSE(total_pre_push, 0), param_stack, total_stack);
for (i = 0; i < num_args; i++) {
/* FIXME PR 302951: we need to handle state restoration if any
* of these args references app memory. We should pull the state from
* the priv_mcontext_t on the stack if in a clean call. FIXME: what if not?
*/
opnd_t arg = args[i];
CLIENT_ASSERT(opnd_get_size(arg) == OPSZ_PTR || opnd_is_immed_int(arg)
IF_X64(|| opnd_get_size(arg) == OPSZ_4),
"Clean call arg has unsupported size");
#ifdef X64
/* PR 250976 #A: support args that reference param regs */
for (r = 0; r < opnd_num_regs_used(arg); r++) {
reg_id_t used = opnd_get_reg_used(arg, r);
int parm = reg_parameter_num(used);
/* See comments in loop above */
if ((parm == 0 && num_args > 1) || parm > (int)i ||
reg_overlap(used, REG_XSP)) {
int disp = 0;
if (clean_call && opnd_is_reg(arg)) {
/* We can point at the priv_mcontext_t slot.
* priv_mcontext_t is at the base of dstack: compute offset
* from xsp to the field we want and replace arg.
*/
disp += opnd_get_reg_dcontext_offs(opnd_get_reg(arg));
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
/* skip what this routine added */
disp += total_stack;
} else {
/* Push a temp on the stack and point at it. We
* could try to optimize by juggling registers, but
* not worth it.
*/
/* xsp was adjusted up above; we simply store to xsp offsets */
disp = param_stack + XSP_SZ * arg_pre_push;
if (opnd_is_reg(arg) && opnd_get_size(arg) == OPSZ_PTR) {
POST(ilist, prev, INSTR_CREATE_mov_st
(dcontext, OPND_CREATE_MEMPTR(REG_XSP, disp), arg));
} else {
reg_id_t xsp_scratch = regparms[0];
/* don't want to just change size since will read extra bytes.
* can't do mem-to-mem so go through scratch reg */
if (reg_overlap(used, REG_XSP)) {
/* Get original xsp into scratch[0] and replace in arg */
if (opnd_uses_reg(arg, regparms[0])) {
xsp_scratch = REG_XAX;
ASSERT(!opnd_uses_reg(arg, REG_XAX)); /* can't use 3 */
/* FIXME: rather than putting xsp into mcontext
* slot, better to just do local get from dcontext
* like we do for 32-bit below? */
POST(ilist, prev, instr_create_restore_from_tls
(dcontext, REG_XAX, TLS_XAX_SLOT));
}
opnd_replace_reg(&arg, REG_XSP, xsp_scratch);
}
POST(ilist, prev,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, disp),
opnd_create_reg(regparms[0])));
/* If sub-ptr-size, zero-extend is what we want so no movsxd */
POST(ilist, prev, INSTR_CREATE_mov_ld
(dcontext, opnd_create_reg
(shrink_reg_for_param(regparms[0], arg)), arg));
if (reg_overlap(used, REG_XSP)) {
int xsp_disp = opnd_get_reg_dcontext_offs(REG_XSP) +
clean_call_beyond_mcontext() + total_stack;
POST(ilist, prev, INSTR_CREATE_mov_ld
(dcontext, opnd_create_reg(xsp_scratch),
OPND_CREATE_MEMPTR(REG_XSP, xsp_disp)));
if (xsp_scratch == REG_XAX) {
POST(ilist, prev, instr_create_save_to_tls
(dcontext, REG_XAX, TLS_XAX_SLOT));
}
}
if (opnd_uses_reg(arg, regparms[0])) {
/* must restore since earlier arg might have clobbered */
int mc_disp = opnd_get_reg_dcontext_offs(regparms[0]) +
clean_call_beyond_mcontext() + total_stack;
POST(ilist, prev, INSTR_CREATE_mov_ld
(dcontext, opnd_create_reg(regparms[0]),
OPND_CREATE_MEMPTR(REG_XSP, mc_disp)));
}
}
arg_pre_push++; /* running counter */
}
arg = opnd_create_base_disp(REG_XSP, REG_NULL, 0,
disp, opnd_get_size(arg));
break; /* once we've handled arg ignore futher reg refs */
}
}
#endif
if (i < NUM_REGPARM) {
reg_id_t regparm = shrink_reg_for_param(regparms[i], arg);
if (opnd_is_immed_int(arg) || opnd_is_instr(arg)) {
POST(ilist, mark,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(regparm), arg));
} else {
POST(ilist, mark,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(regparm), arg));
}
} else {
if (push) {
IF_X64(ASSERT_NOT_REACHED()); /* no 64-bit push_imm! */
if (opnd_is_immed_int(arg) || opnd_is_instr(arg))
POST(ilist, mark, INSTR_CREATE_push_imm(dcontext, arg));
else {
if (clean_call && opnd_uses_reg(arg, REG_XSP)) {
/* We do a purely local expansion:
* spill eax, mc->eax, esp->eax, arg->eax, push eax, restore eax
*/
reg_id_t scratch = REG_XAX;
if (opnd_uses_reg(arg, scratch)) {
scratch = REG_XCX;
ASSERT(!opnd_uses_reg(arg, scratch)); /* can't use 3 regs */
}
opnd_replace_reg(&arg, REG_XSP, scratch);
POST(ilist, mark, instr_create_restore_from_tls
(dcontext, scratch, TLS_XAX_SLOT));
POST(ilist, mark, INSTR_CREATE_push(dcontext, arg));
POST(ilist, mark, instr_create_restore_from_dc_via_reg
(dcontext, scratch, scratch, XSP_OFFSET));
insert_get_mcontext_base
(dcontext, ilist, instr_get_next(mark), scratch);
POST(ilist, mark, instr_create_save_to_tls
(dcontext, scratch, TLS_XAX_SLOT));
} else
POST(ilist, mark, INSTR_CREATE_push(dcontext, arg));
}
} else {
/* xsp was adjusted up above; we simply store to xsp offsets */
uint offs = REGPARM_MINSTACK + XSP_SZ * (i - NUM_REGPARM);
#ifdef X64
if (opnd_is_immed_int(arg) || opnd_is_instr(arg)) {
/* PR 250976 #3: there is no memory store of 64-bit-immediate,
* so go through scratch reg */
ASSERT(NUM_REGPARM > 0);
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, offs),
opnd_create_reg(regparms[0])));
POST(ilist, mark,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(regparms[0]),
arg));
} else {
#endif
if (opnd_is_memory_reference(arg)) {
/* can't do mem-to-mem so go through scratch */
reg_id_t scratch;
if (NUM_REGPARM > 0)
scratch = regparms[0];
else {
/* This happens on Mac.
* FIXME i#1370: not safe if later arg uses xax:
* local spill? Review how regparms[0] is preserved.
*/
scratch = REG_XAX;
}
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, offs),
opnd_create_reg(scratch)));
POST(ilist, mark,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg
(shrink_reg_for_param(scratch, arg)),
arg));
} else {
POST(ilist, mark,
INSTR_CREATE_mov_st(dcontext,
OPND_CREATE_MEMPTR(REG_XSP, offs), arg));
}
#ifdef X64
}
#endif
}
}
}
if (!push && total_stack > 0) {
POST(ilist, prev, /* before everything else: pre-push and args */
/* can we use sub? may as well preserve eflags */
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_MEM_lea(REG_XSP, REG_NULL, 0,
-(int)total_stack)));
}
if (restore_xsp) {
/* before restore_xax, since we're going to clobber xax */
int disp = opnd_get_reg_dcontext_offs(REG_XSP);
instr_t *where = instr_get_next(prev);
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
insert_get_mcontext_base(dcontext, ilist, where, REG_XAX);
PRE(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, REG_XAX, REG_XAX, XSP_OFFSET));
PRE(ilist, where,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEMPTR(REG_XSP, disp),
opnd_create_reg(REG_XAX)));
/* now we need restore_xax to be AFTER this */
prev = instr_get_prev(where);
}
if (restore_xax) {
int disp = opnd_get_reg_dcontext_offs(REG_XAX);
/* skip rest of what prepare_for_clean_call adds */
disp += clean_call_beyond_mcontext();
POST(ilist, prev, /* before everything else: pre-push, args, and stack adjust */
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX),
OPND_CREATE_MEMPTR(REG_XSP, disp)));
}
return total_stack;
}
/* If jmp_instr == NULL, uses jmp_tag, otherwise uses jmp_instr
*/
void
insert_clean_call_with_arg_jmp_if_ret_true(dcontext_t *dcontext,
instrlist_t *ilist, instr_t *instr, void *callee, int arg,
app_pc jmp_tag, instr_t *jmp_instr)
{
instr_t *false_popa, *jcc;
prepare_for_clean_call(dcontext, NULL, ilist, instr);
dr_insert_call(dcontext, ilist, instr, callee, 1, OPND_CREATE_INT32(arg));
/* if the return value (xax) is 0, then jmp to internal false path */
PRE(ilist,instr, /* can't cmp w/ 64-bit immed so use test (shorter anyway) */
INSTR_CREATE_test(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_XAX)));
/* fill in jcc target once have false path */
jcc = INSTR_CREATE_jcc(dcontext, OP_jz, opnd_create_pc(NULL));
PRE(ilist, instr, jcc);
/* if it falls through, then it's true, so restore and jmp to true tag
* passed in by caller
*/
cleanup_after_clean_call(dcontext, NULL, ilist, instr);
if (jmp_instr == NULL) {
/* an exit cti, not a meta instr */
instrlist_preinsert
(ilist, instr, INSTR_CREATE_jmp(dcontext, opnd_create_pc(jmp_tag)));
} else {
PRE(ilist, instr,
INSTR_CREATE_jmp(dcontext, opnd_create_instr(jmp_instr)));
}
/* otherwise (if returned false), just do standard popf and continue */
/* get 1st instr of cleanup path */
false_popa = instr_get_prev(instr);
cleanup_after_clean_call(dcontext, NULL, ilist, instr);
false_popa = instr_get_next(false_popa);
instr_set_target(jcc, opnd_create_instr(false_popa));
}
/* If !precise, encode_pc is treated as +- a page (meant for clients
* writing an instrlist to gencode so not sure of exact placement but
* within a page).
* If encode_pc == vmcode_get_start(), checks reachability of whole
* vmcode region (meant for code going somewhere not precisely known
* in the code cache).
* Returns whether ended up using a direct cti. If inlined_tgt_instr != NULL,
* and an inlined target was used, returns a pointer to that instruction
* in *inlined_tgt_instr.
*/
bool
insert_reachable_cti(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
byte *encode_pc, byte *target, bool jmp, bool returns, bool precise,
reg_id_t scratch, instr_t **inlined_tgt_instr)
{
byte *encode_start;
byte *encode_end;
if (precise) {
encode_start = target + JMP_LONG_LENGTH;
encode_end = encode_start;
} else if (encode_pc == vmcode_get_start()) {
/* consider whole vmcode region */
encode_start = encode_pc;
encode_end = vmcode_get_end();
} else {
encode_start = (byte *) PAGE_START(encode_pc - PAGE_SIZE);
encode_end = (byte *) ALIGN_FORWARD(encode_pc + PAGE_SIZE, PAGE_SIZE);
}
if (REL32_REACHABLE(encode_start, target) &&
REL32_REACHABLE(encode_end, target)) {
/* For precise, we could consider a short cti, but so far no
* users are precise so we'll leave that for i#56.
*/
if (jmp)
PRE(ilist, where, INSTR_CREATE_jmp(dcontext, opnd_create_pc(target)));
else
PRE(ilist, where, INSTR_CREATE_call(dcontext, opnd_create_pc(target)));
return true;
} else {
opnd_t ind_tgt;
instr_t *inlined_tgt = NULL;
if (scratch == DR_REG_NULL) {
/* indirect through an inlined target */
inlined_tgt = instr_build_bits(dcontext, OP_UNDECODED, sizeof(target));
/* XXX: could use mov imm->xax and have target skip rex+opcode
* for clean disassembly
*/
instr_set_raw_bytes(inlined_tgt, (byte *) &target, sizeof(target));
/* this will copy the bytes for us, so we don't have to worry about
* the lifetime of the target param
*/
instr_allocate_raw_bits(dcontext, inlined_tgt, sizeof(target));
ind_tgt = opnd_create_mem_instr(inlined_tgt, 0, OPSZ_PTR);
if (inlined_tgt_instr != NULL)
*inlined_tgt_instr = inlined_tgt;
} else {
PRE(ilist, where, INSTR_CREATE_mov_imm
(dcontext, opnd_create_reg(scratch), OPND_CREATE_INTPTR(target)));
ind_tgt = opnd_create_reg(scratch);
if (inlined_tgt_instr != NULL)
*inlined_tgt_instr = NULL;
}
if (jmp)
PRE(ilist, where, INSTR_CREATE_jmp_ind(dcontext, ind_tgt));
else
PRE(ilist, where, INSTR_CREATE_call_ind(dcontext, ind_tgt));
if (inlined_tgt != NULL)
PRE(ilist, where, inlined_tgt);
return false;
}
}
/*###########################################################################
*###########################################################################
*
* M A N G L I N G R O U T I N E S
*/
/* If src_inst != NULL, uses it (and assumes it will be encoded at
* encode_estimate to determine whether > 32 bits or not: so if unsure where
* it will be encoded, pass a high address) as the immediate; else
* uses val.
*/
void
insert_mov_immed_arch(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
ptr_int_t val, opnd_t dst,
instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
instr_t *mov1, *mov2;
if (src_inst != NULL)
val = (ptr_int_t) encode_estimate;
#ifdef X64
if (X64_MODE_DC(dcontext) && !opnd_is_reg(dst)) {
if (val <= INT_MAX && val >= INT_MIN) {
/* mov is sign-extended, so we can use one move if it is all
* 0 or 1 in top 33 bits
*/
mov1 = INSTR_CREATE_mov_imm(dcontext, dst,
(src_inst == NULL) ?
OPND_CREATE_INT32((int)val) :
opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, mov1);
mov2 = NULL;
} else {
/* do mov-64-bit-immed in two pieces. tiny corner-case risk of racy
* access to [dst] if this thread is suspended in between or another
* thread is trying to read [dst], but o/w we have to spill and
* restore a register.
*/
CLIENT_ASSERT(opnd_is_memory_reference(dst), "invalid dst opnd");
/* mov low32 => [mem32] */
opnd_set_size(&dst, OPSZ_4);
mov1 = INSTR_CREATE_mov_st(dcontext, dst,
(src_inst == NULL) ?
OPND_CREATE_INT32((int)val) :
opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, mov1);
/* mov high32 => [mem32+4] */
if (opnd_is_base_disp(dst)) {
int disp = opnd_get_disp(dst);
CLIENT_ASSERT(disp + 4 > disp, "disp overflow");
opnd_set_disp(&dst, disp+4);
} else {
byte *addr = opnd_get_addr(dst);
CLIENT_ASSERT(!POINTER_OVERFLOW_ON_ADD(addr, 4),
"addr overflow");
dst = OPND_CREATE_ABSMEM(addr+4, OPSZ_4);
}
mov2 = INSTR_CREATE_mov_st(dcontext, dst,
(src_inst == NULL) ?
OPND_CREATE_INT32((int)(val >> 32)) :
opnd_create_instr_ex(src_inst, OPSZ_4, 32));
PRE(ilist, instr, mov2);
}
} else {
#endif
mov1 = INSTR_CREATE_mov_imm(dcontext, dst,
(src_inst == NULL) ?
OPND_CREATE_INTPTR(val) :
opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, mov1);
mov2 = NULL;
#ifdef X64
}
#endif
if (first != NULL)
*first = mov1;
if (last != NULL)
*last = mov2;
}
/* If src_inst != NULL, uses it (and assumes it will be encoded at
* encode_estimate to determine whether > 32 bits or not: so if unsure where
* it will be encoded, pass a high address) as the immediate; else
* uses val.
*/
void
insert_push_immed_arch(dcontext_t *dcontext, instr_t *src_inst, byte *encode_estimate,
ptr_int_t val, instrlist_t *ilist, instr_t *instr,
OUT instr_t **first, OUT instr_t **last)
{
instr_t *push, *mov;
if (src_inst != NULL)
val = (ptr_int_t) encode_estimate;
#ifdef X64
if (X64_MODE_DC(dcontext)) {
/* do push-64-bit-immed in two pieces. tiny corner-case risk of racy
* access to TOS if this thread is suspended in between or another
* thread is trying to read its stack, but o/w we have to spill and
* restore a register.
*/
push = INSTR_CREATE_push_imm(dcontext,
(src_inst == NULL) ?
OPND_CREATE_INT32((int)val) :
opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, push);
/* push is sign-extended, so we can skip top half if it is all 0 or 1
* in top 33 bits
*/
if (val <= INT_MAX && val >= INT_MIN) {
mov = NULL;
} else {
mov = INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 4),
(src_inst == NULL) ?
OPND_CREATE_INT32((int)(val >> 32)) :
opnd_create_instr_ex(src_inst, OPSZ_4, 32));
PRE(ilist, instr, mov);
}
} else {
#endif
push = INSTR_CREATE_push_imm(dcontext,
(src_inst == NULL) ?
OPND_CREATE_INT32(val) :
opnd_create_instr_ex(src_inst, OPSZ_4, 0));
PRE(ilist, instr, push);
mov = NULL;
#ifdef X64
}
#endif
if (first != NULL)
*first = push;
if (last != NULL)
*last = mov;
}
/* Far calls and rets have double total size */
static opnd_size_t
stack_entry_size(instr_t *instr, opnd_size_t opsize)
{
if (instr_get_opcode(instr) == OP_call_far ||
instr_get_opcode(instr) == OP_call_far_ind ||
instr_get_opcode(instr) == OP_ret_far) {
/* cut OPSZ_8_rex16_short4 in half */
if (opsize == OPSZ_4)
return OPSZ_2;
else if (opsize == OPSZ_8)
return OPSZ_4;
else {
#ifdef X64
ASSERT(opsize == OPSZ_16);
return OPSZ_8;
#else
ASSERT_NOT_REACHED();
#endif
}
} else if (instr_get_opcode(instr) == OP_iret) {
/* convert OPSZ_12_rex40_short6 */
if (opsize == OPSZ_6)
return OPSZ_2;
else if (opsize == OPSZ_12)
return OPSZ_4;
else {
#ifdef X64
ASSERT(opsize == OPSZ_40);
return OPSZ_8;
#else
ASSERT_NOT_REACHED();
#endif
}
}
return opsize;
}
/* Used for fault translation */
bool
instr_check_xsp_mangling(dcontext_t *dcontext, instr_t *inst, int *xsp_adjust)
{
ASSERT(xsp_adjust != NULL);
if (instr_get_opcode(inst) == OP_push ||
instr_get_opcode(inst) == OP_push_imm) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: push or push_imm\n");
*xsp_adjust -= opnd_size_in_bytes(opnd_get_size(instr_get_dst(inst, 1)));
} else if (instr_get_opcode(inst) == OP_pop) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: pop\n");
*xsp_adjust += opnd_size_in_bytes(opnd_get_size(instr_get_src(inst, 1)));
}
/* 1st part of push emulation from insert_push_retaddr */
else if (instr_get_opcode(inst) == OP_lea &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_get_base(instr_get_src(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_src(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: lea xsp adjust\n");
*xsp_adjust += opnd_get_disp(instr_get_src(inst, 0));
}
/* 2nd part of push emulation from insert_push_retaddr */
else if (instr_get_opcode(inst) == OP_mov_st &&
opnd_is_base_disp(instr_get_dst(inst, 0)) &&
opnd_get_base(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_dst(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: store to stack\n");
/* nothing to track: paired lea is what we undo */
}
/* retrieval of target for call* or jmp* */
else if ((instr_get_opcode(inst) == OP_movzx &&
reg_overlap(opnd_get_reg(instr_get_dst(inst, 0)), REG_XCX)) ||
(instr_get_opcode(inst) == OP_mov_ld &&
reg_overlap(opnd_get_reg(instr_get_dst(inst, 0)), REG_XCX))) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: ib tgt to *cx\n");
/* nothing: our xcx spill restore will undo */
}
/* part of pop emulation for iretd/lretd in x64 mode */
else if (instr_get_opcode(inst) == OP_mov_ld &&
opnd_is_base_disp(instr_get_src(inst, 0)) &&
opnd_get_base(instr_get_src(inst, 0)) == REG_XSP &&
opnd_get_index(instr_get_src(inst, 0)) == REG_NULL) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: load from stack\n");
/* nothing to track: paired lea is what we undo */
}
/* part of data16 ret. once we have cs preservation (PR 271317) we'll
* need to not fail when walking over a movzx to a pop cs (right now we
* do not read the stack for the pop cs).
*/
else if (instr_get_opcode(inst) == OP_movzx &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_CX) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: movzx to cx\n");
/* nothing: our xcx spill restore will undo */
}
/* fake pop of cs for iret */
else if (instr_get_opcode(inst) == OP_add &&
opnd_is_reg(instr_get_dst(inst, 0)) &&
opnd_get_reg(instr_get_dst(inst, 0)) == REG_XSP &&
opnd_is_immed_int(instr_get_src(inst, 0))) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: add to xsp\n");
ASSERT(CHECK_TRUNCATE_TYPE_int(opnd_get_immed_int(instr_get_src(inst, 0))));
*xsp_adjust += (int) opnd_get_immed_int(instr_get_src(inst, 0));
}
/* popf for iret */
else if (instr_get_opcode(inst) == OP_popf) {
LOG(THREAD_GET, LOG_INTERP, 4, "\tstate track: popf\n");
*xsp_adjust += opnd_size_in_bytes(opnd_get_size(instr_get_src(inst, 1)));
} else {
return false;
}
return true;
}
/* N.B.: keep in synch with instr_check_xsp_mangling() */
void
insert_push_retaddr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t retaddr, opnd_size_t opsize)
{
if (opsize == OPSZ_2) {
ptr_int_t val = retaddr & (ptr_int_t) 0x0000ffff;
/* can't do a non-default operand size with a push immed so we emulate */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -2,
OPSZ_lea)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM16(REG_XSP, 2),
OPND_CREATE_INT16(val)));
} else if (opsize == OPSZ_PTR
IF_X64(|| (!X64_CACHE_MODE_DC(dcontext) && opsize == OPSZ_4))) {
insert_push_immed_ptrsz(dcontext, retaddr, ilist, instr, NULL, NULL);
} else {
#ifdef X64
ptr_int_t val = retaddr & (ptr_int_t) 0xffffffff;
ASSERT(opsize == OPSZ_4);
/* can't do a non-default operand size with a push immed so we emulate */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, -4,
OPSZ_lea)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 0),
OPND_CREATE_INT32((int)val)));
#else
ASSERT_NOT_REACHED();
#endif
}
}
#ifdef CLIENT_INTERFACE
/* N.B.: keep in synch with instr_check_xsp_mangling() */
static void
insert_mov_ptr_uint_beyond_TOS(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t value, opnd_size_t opsize)
{
/* we insert non-meta b/c we want faults to go to app (should only fault
* if the ret itself faulted, barring races) for simplicity: o/w our
* our-mangling sequence gets broken up and more complex.
*/
if (opsize == OPSZ_2) {
ptr_int_t val = value & (ptr_int_t) 0x0000ffff;
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM16(REG_XSP, -2),
OPND_CREATE_INT16(val)));
} else if (opsize == OPSZ_4) {
ptr_int_t val = value & (ptr_int_t) 0xffffffff;
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -4),
OPND_CREATE_INT32(val)));
} else {
# ifdef X64
ptr_int_t val_low = value & (ptr_int_t) 0xffffffff;
ASSERT(opsize == OPSZ_8);
if (CHECK_TRUNCATE_TYPE_int(value)) {
/* prefer a single write w/ sign-extension */
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM64(REG_XSP, -8),
OPND_CREATE_INT32(val_low)));
} else {
/* we need two 32-bit writes */
ptr_int_t val_high = (value >> 32);
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -8),
OPND_CREATE_INT32(val_low)));
PRE(ilist, instr,
INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, -4),
OPND_CREATE_INT32(val_high)));
}
# else
ASSERT_NOT_REACHED();
# endif
}
}
#endif /* CLIENT_INTERFACE */
static void
insert_push_cs(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
ptr_int_t retaddr, opnd_size_t opsize)
{
#ifdef X64
if (X64_CACHE_MODE_DC(dcontext)) {
/* "push cs" is invalid; for now we push the typical cs values.
* i#823 covers doing this more generally.
*/
insert_push_retaddr(dcontext, ilist, instr,
X64_MODE_DC(dcontext) ? CS64_SELECTOR : CS32_SELECTOR, opsize);
} else {
#endif
opnd_t stackop;
/* we go ahead and push cs, but we won't pop into cs */
instr_t *push = INSTR_CREATE_push(dcontext, opnd_create_reg(SEG_CS));
/* 2nd dest is the stack operand size */
stackop = instr_get_dst(push, 1);
opnd_set_size(&stackop, opsize);
instr_set_dst(push, 1, stackop);
PRE(ilist, instr, push);
#ifdef X64
}
#endif
}
/* We spill to XCX(private dcontext) slot for private fragments,
* and to TLS MANGLE_XCX_SPILL_SLOT for shared fragments.
* (Except for DYNAMO_OPTION(private_ib_in_tls), for which all use tls,
* but that has a performance hit because of the extra data cache line)
* We can get away with the split by having the shared ibl routine copy
* xcx to the private dcontext, and by having the private ibl never
* target shared fragments.
* We also have to modify the xcx spill from tls to private dcontext when
* adding a shared basic block to a trace.
*
* FIXME: if we do make non-trace-head basic blocks valid indirect branch
* targets, we should have the private ibl have special code to test the
* flags and copy xcx to the tls slot if necessary.
*/
#define SAVE_TO_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs) \
((DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, (flags))) ? \
instr_create_save_to_tls(dc, reg, tls_offs) : \
instr_create_save_to_dcontext((dc), (reg), (dc_offs)))
#define SAVE_TO_DC_OR_TLS_OR_REG(dc, flags, reg, tls_offs, dc_offs, dest_reg) \
((X64_CACHE_MODE_DC(dc) && !X64_MODE_DC(dc) \
IF_X64(&& DYNAMO_OPTION(x86_to_x64_ibl_opt))) ? \
INSTR_CREATE_mov_ld(dc, opnd_create_reg(dest_reg), opnd_create_reg(reg)) : \
SAVE_TO_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs))
#define RESTORE_FROM_DC_OR_TLS(dc, flags, reg, tls_offs, dc_offs) \
((DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, (flags))) ? \
instr_create_restore_from_tls(dc, reg, tls_offs) : \
instr_create_restore_from_dcontext((dc), (reg), (dc_offs)))
static void
mangle_far_direct_helper(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*
* For WOW64, I tried keeping this a direct jmp for nice linking by doing the
* mode change in-fragment and then using a 64-bit stub with a 32-bit fragment,
* but that gets messy b/c a lot of code assumes it can create or calculate the
* size of exit stubs given nothing but the fragment flags. I tried adding
* FRAG_ENDS_IN_FAR_DIRECT but still need to pass another param to all the stub
* macros and routines for mid-trace exits and for prefixes for -disable_traces.
* So, going for treating as indirect and using the far_ibl. It's a trace
* barrier anyway, and rare. We treat it as indirect in all modes (including
* x86 builds) for simplicity (and eventually for full i#823 we'll want
* to issue cs changes there too).
*/
app_pc pc = opnd_get_pc(instr_get_target(instr));
#ifdef X64
if (!X64_MODE_DC(dcontext) &&
opnd_get_segment_selector(instr_get_target(instr)) == CS64_SELECTOR) {
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX,
MANGLE_FAR_SPILL_SLOT, XBX_OFFSET, REG_R10));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_INT32(CS64_SELECTOR)));
}
#endif
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX,
MANGLE_XCX_SPILL_SLOT, XCX_OFFSET, REG_R9));
ASSERT((ptr_uint_t)pc < UINT_MAX); /* 32-bit code! */
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_INT32((ptr_uint_t)pc)));
}
/***************************************************************************
* DIRECT CALL
* Returns new next_instr
*/
instr_t *
mangle_direct_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, bool mangle_calls, uint flags)
{
ptr_uint_t retaddr;
app_pc target = NULL;
opnd_t pushop = instr_get_dst(instr, 1);
opnd_size_t pushsz = stack_entry_size(instr, opnd_get_size(pushop));
if (opnd_is_near_pc(instr_get_target(instr)))
target = opnd_get_pc(instr_get_target(instr));
else if (opnd_is_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
/* assumption: target's raw bits are meaningful */
target = instr_get_raw_bits(tgt);
ASSERT(target != 0);
/* FIXME case 6962: for far instr, we ignore the segment and
* assume it matches current cs */
} else if (opnd_is_far_pc(instr_get_target(instr))) {
target = opnd_get_pc(instr_get_target(instr));
/* FIXME case 6962: we ignore the segment and assume it matches current cs */
} else
ASSERT_NOT_REACHED();
if (!mangle_calls) {
/* off-trace call that will be executed natively */
/* relative target must be re-encoded */
instr_set_raw_bits_valid(instr, false);
#ifdef STEAL_REGISTER
/* FIXME: need to push edi prior to call and pop after.
* However, need to push edi prior to any args to this call,
* and it may be hard to find pre-arg-pushing spot...
* edi is supposed to be callee-saved, we're trusting this
* off-trace call to return, we may as well trust it to
* not trash edi -- these no-inline calls are dynamo's
* own routines, after all.
*/
#endif
return next_instr;
}
retaddr = get_call_return_address(dcontext, ilist, instr);
#ifdef CHECK_RETURNS_SSE2
/* ASSUMPTION: a call to the next instr is not going to ever have a
* matching ret! */
if (target == (app_pc)retaddr) {
LOG(THREAD, LOG_INTERP, 3, "found call to next instruction "PFX"\n", target);
} else {
check_return_handle_call(dcontext, ilist, next_instr);
}
/* now do the normal thing for a call */
#endif
if (instr_get_opcode(instr) == OP_call_far) {
/* N.B.: we do not support other than flat 0-based CS, DS, SS, and ES.
* if the app wants to change segments, we won't actually issue
* a segment change, and so will only work properly if the new segment
* is also 0-based. To properly issue new segments, we'd need a special
* ibl that ends in a far cti, and all prior address manipulations would
* need to be relative to the new segment, w/o messing up current segment.
* FIXME: can we do better without too much work?
* XXX: yes, for wow64: i#823: TODO mangle this like a far direct jmp
*/
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far direct call");
STATS_INC(num_far_dir_calls);
mangle_far_direct_helper(dcontext, ilist, instr, next_instr, flags);
insert_push_cs(dcontext, ilist, instr, 0, pushsz);
}
/* convert a direct call to a push of the return address */
insert_push_retaddr(dcontext, ilist, instr, retaddr, pushsz);
/* remove the call */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return next_instr;
}
#ifdef UNIX
/***************************************************************************
* Mangle the memory reference operand that uses fs/gs semgents,
* get the segment base of fs/gs into reg, and
* replace oldop with newop using reg instead of fs/gs
* The reg must not be used in the oldop, otherwise, the reg value
* is corrupted.
*/
opnd_t
mangle_seg_ref_opnd(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *where, opnd_t oldop, reg_id_t reg)
{
opnd_t newop;
reg_id_t seg;
ASSERT(opnd_is_far_base_disp(oldop));
seg = opnd_get_segment(oldop);
/* we only mangle fs/gs */
if (seg != SEG_GS && seg != SEG_FS)
return oldop;
#ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return oldop;
#endif
/* The reg should not be used by the oldop */
ASSERT(!opnd_uses_reg(oldop, reg));
/* XXX: this mangling is pattern-matched in translation's instr_is_seg_ref_load() */
/* get app's segment base into reg. */
PRE(ilist, where,
instr_create_restore_from_tls(dcontext, reg,
os_get_app_tls_base_offset(seg)));
if (opnd_get_index(oldop) != REG_NULL &&
opnd_get_base(oldop) != REG_NULL) {
/* if both base and index are used, use
* lea [base, reg, 1] => reg
* to get the base + seg_base into reg.
*/
PRE(ilist, where,
INSTR_CREATE_lea(dcontext, opnd_create_reg(reg),
opnd_create_base_disp(opnd_get_base(oldop),
reg, 1, 0, OPSZ_lea)));
}
if (opnd_get_index(oldop) != REG_NULL) {
newop = opnd_create_base_disp(reg,
opnd_get_index(oldop),
opnd_get_scale(oldop),
opnd_get_disp(oldop),
opnd_get_size(oldop));
} else {
newop = opnd_create_base_disp(opnd_get_base(oldop),
reg, 1,
opnd_get_disp(oldop),
opnd_get_size(oldop));
}
return newop;
}
#endif /* UNIX */
/***************************************************************************
* INDIRECT CALL
*/
static reg_id_t
mangle_far_indirect_helper(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags, opnd_t *target_out)
{
opnd_t target = *target_out;
opnd_size_t addr_size;
reg_id_t reg_target = REG_NULL;
ASSERT(instr_get_opcode(instr) == OP_jmp_far_ind ||
instr_get_opcode(instr) == OP_call_far_ind);
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*/
/* opnd type is i_Ep, it's not a far base disp b/c segment is at
* memory location, not specified as segment prefix on instr
* we assume register operands are marked as invalid instrs long
* before this point.
*/
ASSERT(opnd_is_base_disp(target));
/* Segment selector is the final 2 bytes.
* For non-mixed-mode, we ignore it.
* We assume DS base == target cti CS base.
*/
/* if data16 then just 2 bytes for address
* if x64 mode and Intel and rex then 8 bytes for address */
ASSERT((X64_MODE_DC(dcontext) && opnd_get_size(target) == OPSZ_10 &&
proc_get_vendor() != VENDOR_AMD) ||
opnd_get_size(target) == OPSZ_6 || opnd_get_size(target) == OPSZ_4);
if (opnd_get_size(target) == OPSZ_10) {
addr_size = OPSZ_8;
reg_target = REG_RCX;
} else if (opnd_get_size(target) == OPSZ_6) {
addr_size = OPSZ_4;
reg_target = REG_ECX;
} else /* target has OPSZ_4 */ {
addr_size = OPSZ_2;
reg_target = REG_XCX; /* caller uses movzx so size doesn't have to match */
}
#ifdef X64
if (mixed_mode_enabled()) {
/* While we don't support arbitrary segments, we do support
* mode changes using standard cs selector values (i#823).
* We save the selector into xbx.
*/
opnd_t sel = target;
opnd_set_disp(&sel, opnd_get_disp(target) + opnd_size_in_bytes(addr_size));
opnd_set_size(&sel, OPSZ_2);
/* all scratch space should be in TLS only */
ASSERT(TEST(FRAG_SHARED, flags) || DYNAMO_OPTION(private_ib_in_tls));
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX,
MANGLE_FAR_SPILL_SLOT, XBX_OFFSET, REG_R10));
PRE(ilist, instr,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX), sel));
if (instr_uses_reg(instr, REG_XBX)) {
/* instr can't be both riprel (uses xax slot for mangling) and use
* a register, so we spill to the riprel (== xax) slot
*/
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XBX, MANGLE_RIPREL_SPILL_SLOT,
XAX_OFFSET));
POST(ilist, instr,
instr_create_restore_from_tls(dcontext, REG_XBX,
MANGLE_RIPREL_SPILL_SLOT));
}
}
#endif
opnd_set_size(target_out, addr_size);
return reg_target;
}
instr_t *
mangle_indirect_call(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, bool mangle_calls, uint flags)
{
opnd_t target;
ptr_uint_t retaddr;
opnd_t pushop = instr_get_dst(instr, 1);
opnd_size_t pushsz = stack_entry_size(instr, opnd_get_size(pushop));
reg_id_t reg_target = REG_XCX;
if (!mangle_calls)
return next_instr;
retaddr = get_call_return_address(dcontext, ilist, instr);
/* Convert near, indirect calls. The jump to the exit_stub that
* jumps to indirect_branch_lookup was already inserted into the
* instr list by interp EXCEPT for the case in which we're converting
* an indirect call to a direct call. In that case, mangle later
* inserts a direct exit stub.
*/
/* If this call is marked for conversion, do minimal processing.
* FIXME Just a note that converted calls are not subjected to any of
* the specialized builds' processing further down.
*/
if (TEST(INSTR_IND_CALL_DIRECT, instr->flags)) {
/* convert the call to a push of the return address */
insert_push_retaddr(dcontext, ilist, instr, retaddr, pushsz);
/* remove the call */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return next_instr;
}
/* put the push AFTER the instruction that calculates
* the target, b/c if target depends on xsp we must use
* the value of xsp prior to this call instruction!
* we insert before next_instr to accomplish this.
*/
if (instr_get_opcode(instr) == OP_call_far_ind) {
/* goes right before the push of the ret addr */
insert_push_cs(dcontext, ilist, next_instr, 0, pushsz);
/* see notes below -- we don't really support switching segments,
* though we do go ahead and push cs, we won't pop into cs
*/
}
insert_push_retaddr(dcontext, ilist, next_instr, retaddr, pushsz);
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX,
MANGLE_XCX_SPILL_SLOT, XCX_OFFSET, REG_R9));
#ifdef STEAL_REGISTER
/* Steal edi if call uses it, using original call instruction */
steal_reg(dcontext, instr, ilist);
if (ilist->flags)
restore_state(dcontext, next_instr, ilist);
/* It's impossible for our register stealing to use ecx
* because no call can simultaneously use 3 registers, right?
* Maximum is 2, in something like "call *(edi,ecx,4)"?
* If it is possible, need to make sure stealing's use of ecx
* doesn't conflict w/ our use
*/
#endif
/* change: call /2, Ev -> movl Ev, %xcx */
target = instr_get_src(instr, 0);
if (instr_get_opcode(instr) == OP_call_far_ind) {
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far indirect call");
STATS_INC(num_far_ind_calls);
reg_target = mangle_far_indirect_helper(dcontext, ilist, instr,
next_instr, flags, &target);
}
#ifdef UNIX
/* i#107, mangle the memory reference opnd that uses segment register. */
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(target)) {
/* FIXME: we use REG_XCX to store the segment base, which might be used
* in target and cause assertion failure in mangle_seg_ref_opnd.
*/
ASSERT_BUG_NUM(107, !opnd_uses_reg(target, REG_XCX));
target = mangle_seg_ref_opnd(dcontext, ilist, instr, target, REG_XCX);
}
#endif
/* cannot call instr_reset, it will kill prev & next ptrs */
instr_free(dcontext, instr);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_opcode(instr, opnd_get_size(target) == OPSZ_2 ? OP_movzx : OP_mov_ld);
instr_set_dst(instr, 0, opnd_create_reg(reg_target));
instr_set_src(instr, 0, target); /* src stays the same */
if (instrlist_get_translation_target(ilist) != NULL) {
/* make sure original raw bits are used for translation */
instr_set_translation(instr, instr_get_raw_bits(instr));
}
instr_set_our_mangling(instr, true);
#ifdef CHECK_RETURNS_SSE2
check_return_handle_call(dcontext, ilist, next_instr);
#endif
return next_instr;
}
/***************************************************************************
* RETURN
*/
#ifdef X64
/* Saves the selector from the top of the stack into xbx, after spilling xbx,
* for far_ibl.
*/
static void
mangle_far_return_save_selector(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
uint flags)
{
if (mixed_mode_enabled()) {
/* While we don't support arbitrary segments, we do support
* mode changes using standard cs selector values (i#823).
* We save the selector into xbx.
*/
/* We could do a pop but state xl8 is already set up to restore lea */
/* all scratch space should be in TLS only */
ASSERT(TEST(FRAG_SHARED, flags) || DYNAMO_OPTION(private_ib_in_tls));
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XBX,
MANGLE_FAR_SPILL_SLOT, XBX_OFFSET, REG_R10));
PRE(ilist, instr,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_MEM16(REG_XSP, 0)));
}
}
#endif
void
mangle_return(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
instr_t *pop;
opnd_t retaddr;
opnd_size_t retsz;
#ifdef CHECK_RETURNS_SSE2
check_return_handle_return(dcontext, ilist, next_instr);
/* now do the normal ret mangling */
#endif
/* Convert returns. If aggressive we could take advantage of the
* fact that xcx is dead at the return and not bother saving it?
* The jump to the exit_stub that jumps to indirect_branch_lookup
* was already inserted into the instr list by interp. */
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX,
MANGLE_XCX_SPILL_SLOT, XCX_OFFSET, REG_R9));
/* see if ret has an immed int operand, assumed to be 1st src */
if (instr_num_srcs(instr) > 0 && opnd_is_immed_int(instr_get_src(instr, 0))) {
/* if has an operand, return removes some stack space,
* AFTER the return address is popped
*/
int val = (int) opnd_get_immed_int(instr_get_src(instr, 0));
IF_X64(ASSERT_TRUNCATE(val, int, opnd_get_immed_int(instr_get_src(instr, 0))));
/* addl sizeof_param_area, %xsp
* except that clobbers the flags, so we use lea */
PRE(ilist, next_instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, val, OPSZ_lea)));
}
/* don't need to steal edi since return cannot use registers */
/* the retaddr operand is always the final source for all OP_ret* instrs */
retaddr = instr_get_src(instr, instr_num_srcs(instr) - 1);
retsz = stack_entry_size(instr, opnd_get_size(retaddr));
if (X64_CACHE_MODE_DC(dcontext) && retsz == OPSZ_4) {
if (instr_get_opcode(instr) == OP_iret || instr_get_opcode(instr) == OP_ret_far) {
/* N.B.: For some unfathomable reason iret and ret_far default to operand
* size 4 in 64-bit mode (making them, along w/ call_far, the only stack
* operation instructions to do so). So if we see an iret or far ret with
* OPSZ_4 in 64-bit mode we need a 4-byte pop, but since we can't actually
* generate a 4-byte pop we have to emulate it here. */
SYSLOG_INTERNAL_WARNING_ONCE("Encountered iretd/lretd in 64-bit mode!");
}
/* Note moving into ecx automatically zero extends which is what we want. */
PRE(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_MEM32(REG_RSP, 0)));
/* iret could use add since going to pop the eflags, but not lret.
* lret could combine w/ segment lea below: but not perf-crit instr, and
* anticipating cs preservation PR 271317 I'm leaving separate. */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 4, OPSZ_lea)));
} else {
/* change RET into a POP, keeping the operand size */
opnd_t memop = retaddr;
pop = INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_XCX));
/* need per-entry size, not total size (double for far ret) */
opnd_set_size(&memop, retsz);
instr_set_src(pop, 1, memop);
if (retsz == OPSZ_2)
instr_set_dst(pop, 0, opnd_create_reg(REG_CX));
/* We can't do a 4-byte pop in 64-bit mode, but excepting iretd and lretd
* handled above we should never see one. */
ASSERT(!X64_MODE_DC(dcontext) || retsz != OPSZ_4);
PRE(ilist, instr, pop);
if (retsz == OPSZ_2) {
/* we need to zero out the top 2 bytes */
PRE(ilist, instr, INSTR_CREATE_movzx
(dcontext,
opnd_create_reg(REG_ECX), opnd_create_reg(REG_CX)));
}
}
#ifdef CLIENT_INTERFACE
if (TEST(INSTR_CLOBBER_RETADDR, instr->flags)) {
/* we put the value in the note field earlier */
ptr_uint_t val = (ptr_uint_t) instr->note;
insert_mov_ptr_uint_beyond_TOS(dcontext, ilist, instr, val, retsz);
}
#endif
if (instr_get_opcode(instr) == OP_ret_far) {
/* FIXME i#823: we do not support other than flat 0-based CS, DS, SS, and ES.
* If the app wants to change segments in a WOW64 process, we will
* do the right thing for standard cs selector values (xref i#49).
* For other cs changes or in other modes, we do go through far_ibl
* today although we do not enact the cs change (nor bother to pass
* the selector in xbx).
*/
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far ret");
STATS_INC(num_far_rets);
#ifdef X64
mangle_far_return_save_selector(dcontext, ilist, instr, flags);
#endif
/* pop selector from stack, but not into cs, just junk it
* (the 16-bit selector is expanded to 32 bits on the push, unless data16)
*/
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp
(REG_XSP, REG_NULL, 0,
opnd_size_in_bytes(retsz), OPSZ_lea)));
}
if (instr_get_opcode(instr) == OP_iret) {
instr_t *popf;
/* Xref PR 215553 and PR 191977 - we actually see this on 64-bit Vista */
LOG(THREAD, LOG_INTERP, 2, "Encountered iret at "PFX" - mangling\n",
instr_get_translation(instr));
STATS_INC(num_irets);
/* In 32-bit mode this is a pop->EIP pop->CS pop->eflags.
* 64-bit mode (with either 32-bit or 64-bit operand size,
* despite the (wrong) Intel manual pseudocode: see i#833 and
* the win32.mixedmode test) extends
* the above and additionally adds pop->RSP pop->ss. N.B.: like OP_far_ret we
* ignore the CS (except mixed-mode WOW64) and SS segment changes
* (see the comments there).
*/
#ifdef X64
mangle_far_return_save_selector(dcontext, ilist, instr, flags);
#endif
/* Return address is already popped, next up is CS segment which we ignore
* (unless in mixed-mode, handled above) so
* adjust stack pointer. Note we can use an add here since the eflags will
* be written below. */
PRE(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT8
(opnd_size_in_bytes(retsz))));
/* Next up is xflags, we use a popf. Popf should be setting the right flags
* (it's difficult to tell because in the docs iret lists the flags it does
* set while popf lists the flags it doesn't set). The docs aren't entirely
* clear, but any flag that we or a user mode program would care about should
* be right. */
popf = INSTR_CREATE_popf(dcontext);
if (X64_CACHE_MODE_DC(dcontext) && retsz == OPSZ_4) {
/* We can't actually create a 32-bit popf and there's no easy way to
* simulate one. For now we'll do a 64-bit popf and fixup the stack offset.
* If AMD/INTEL ever start using the top half of the rflags register then
* we could have problems here. We could also break stack transparency and
* do a mov, push, popf to zero extend the value. */
PRE(ilist, instr, popf);
/* flags are already set, must use lea to fix stack */
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XSP),
opnd_create_base_disp
(REG_XSP, REG_NULL, 0, -4, OPSZ_lea)));
} else {
/* get popf size right the same way we do it for the return address */
opnd_t memop = retaddr;
opnd_set_size(&memop, retsz);
DOCHECK(1, if (retsz == OPSZ_2)
ASSERT_NOT_TESTED(););
instr_set_src(popf, 1, memop);
PRE(ilist, instr, popf);
}
#ifdef X64
/* In x64 mode, iret additionally does pop->RSP and pop->ss. */
if (X64_MODE_DC(dcontext)) {
if (retsz == OPSZ_8)
PRE(ilist, instr, INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_RSP)));
else if (retsz == OPSZ_4) {
PRE(ilist, instr, INSTR_CREATE_mov_ld
(dcontext, opnd_create_reg(REG_ESP), OPND_CREATE_MEM32(REG_RSP, 0)));
} else {
ASSERT_NOT_TESTED();
PRE(ilist, instr, INSTR_CREATE_movzx
(dcontext, opnd_create_reg(REG_ESP), OPND_CREATE_MEM16(REG_RSP, 0)));
}
/* We're ignoring the set of SS and since we just set RSP we don't need
* to do anything to adjust the stack for the pop (since the pop would have
* occurred with the old RSP). */
}
#endif
}
/* remove the ret */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
/***************************************************************************
* INDIRECT JUMP
*/
instr_t *
mangle_indirect_jump(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
opnd_t target;
reg_id_t reg_target = REG_XCX;
/* Convert indirect branches (that are not returns). Again, the
* jump to the exit_stub that jumps to indirect_branch_lookup
* was already inserted into the instr list by interp. */
/* save away xcx so that we can use it */
/* (it's restored in x86.s (indirect_branch_lookup) */
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS_OR_REG(dcontext, flags, REG_XCX,
MANGLE_XCX_SPILL_SLOT, XCX_OFFSET, REG_R9));
#ifdef STEAL_REGISTER
/* Steal edi if branch uses it, using original instruction */
steal_reg(dcontext, instr, ilist);
if (ilist->flags)
restore_state(dcontext, next_instr, ilist);
#endif
/* change: jmp /4, i_Ev -> movl i_Ev, %xcx */
target = instr_get_target(instr);
if (instr_get_opcode(instr) == OP_jmp_far_ind) {
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far indirect jump");
STATS_INC(num_far_ind_jmps);
reg_target = mangle_far_indirect_helper(dcontext, ilist, instr,
next_instr, flags, &target);
}
#ifdef UNIX
/* i#107, mangle the memory reference opnd that uses segment register. */
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(target)) {
/* FIXME: we use REG_XCX to store segment base, which might be used
* in target and cause assertion failure in mangle_seg_ref_opnd.
*/
ASSERT_BUG_NUM(107, !opnd_uses_reg(target, REG_XCX));
target = mangle_seg_ref_opnd(dcontext, ilist, instr, target, REG_XCX);
}
#endif
/* cannot call instr_reset, it will kill prev & next ptrs */
instr_free(dcontext, instr);
instr_set_num_opnds(dcontext, instr, 1, 1);
instr_set_opcode(instr, opnd_get_size(target) == OPSZ_2 ? OP_movzx : OP_mov_ld);
instr_set_dst(instr, 0, opnd_create_reg(reg_target));
instr_set_src(instr, 0, target); /* src stays the same */
if (instrlist_get_translation_target(ilist) != NULL) {
/* make sure original raw bits are used for translation */
instr_set_translation(instr, instr_get_raw_bits(instr));
}
instr_set_our_mangling(instr, true);
/* It's impossible for our register stealing to use ecx
* because no branch can simultaneously use 3 registers, right?
* Maximum is 2, in something like "jmp *(edi,ebx,4)"?
* If it is possible, need to make sure stealing's use of ecx
* doesn't conflict w/ our use = FIXME
*/
return next_instr;
}
/***************************************************************************
* FAR DIRECT JUMP
*/
void
mangle_far_direct_jump(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr, uint flags)
{
SYSLOG_INTERNAL_WARNING_ONCE("Encountered a far direct jmp");
STATS_INC(num_far_dir_jmps);
mangle_far_direct_helper(dcontext, ilist, instr, next_instr, flags);
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
/***************************************************************************
* SYSCALL
*/
#ifdef UNIX
/* Inserts code to handle clone into ilist.
* instr is the syscall instr itself.
* Assumes that instructions exist beyond instr in ilist.
*
* CAUTION: don't use a lot of stack in the generated code because
* get_clone_record() makes assumptions about the usage of stack being
* less than a page.
*/
void
mangle_insert_clone_code(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr
_IF_X64(gencode_mode_t mode))
{
/* int 0x80
* xchg xax,xcx
* jecxz child
* jmp parent
* child:
* xchg xax,xcx
* # i#149/PR 403015: the child is on the dstack so no need to swap stacks
* jmp new_thread_dynamo_start
* parent:
* xchg xax,xcx
* <post system call, etc.>
*/
instr_t *in = instr_get_next(instr);
instr_t *child = INSTR_CREATE_label(dcontext);
instr_t *parent = INSTR_CREATE_label(dcontext);
ASSERT(in != NULL);
PRE(ilist, in, INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX),
opnd_create_reg(REG_XCX)));
PRE(ilist, in,
INSTR_CREATE_jecxz(dcontext, opnd_create_instr(child)));
PRE(ilist, in,
INSTR_CREATE_jmp(dcontext, opnd_create_instr(parent)));
PRE(ilist, in, child);
PRE(ilist, in, INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX),
opnd_create_reg(REG_XCX)));
/* We used to insert this directly into fragments for inlined system
* calls, but not once we eliminated clean calls out of the DR cache
* for security purposes. Thus it can be a meta jmp, or an indirect jmp.
*/
insert_reachable_cti(dcontext, ilist, in, vmcode_get_start(),
(byte *) get_new_thread_start(dcontext _IF_X64(mode)),
true/*jmp*/, false/*!returns*/, false/*!precise*/,
DR_REG_NULL/*no scratch*/, NULL);
instr_set_meta(instr_get_prev(in));
PRE(ilist, in, parent);
PRE(ilist, in, INSTR_CREATE_xchg(dcontext, opnd_create_reg(REG_XAX),
opnd_create_reg(REG_XCX)));
}
#endif /* UNIX */
#ifdef WINDOWS
/* Note that ignore syscalls processing for XP and 2003 is a two-phase operation.
* For this reason, mangle_syscall() might be called with a 'next_instr' that's
* not an original app instruction but one inserted by the earlier mangling phase.
*/
#endif
/* XXX: any extra code here can interfere with mangle_syscall_code()
* and interrupted_inlined_syscall() which have assumptions about the
* exact code around inlined system calls.
*/
void
mangle_syscall_arch(dcontext_t *dcontext, instrlist_t *ilist, uint flags,
instr_t *instr, instr_t *next_instr)
{
#ifdef UNIX
/* Shared routine already checked method, handled INSTR_NI_SYSCALL*,
* and inserted the signal barrier and non-auto-restart nop.
* If we get here, we're dealing with an ignorable syscall.
*/
# ifdef MACOS
if (instr_get_opcode(instr) == OP_sysenter) {
/* The kernel returns control to whatever user-mode places in edx.
* We get control back here and then go to the ret ibl (since normally
* there's a call to a shared routine that does "pop edx").
*/
instr_t *post_sysenter = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
instrlist_insert_mov_instr_addr(dcontext, post_sysenter, NULL/*in cache*/,
opnd_create_reg(REG_XDX),
ilist, instr, NULL, NULL);
/* sysenter goes here */
PRE(ilist, next_instr, post_sysenter);
PRE(ilist, next_instr,
RESTORE_FROM_DC_OR_TLS(dcontext, flags, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
PRE(ilist, next_instr,
SAVE_TO_DC_OR_TLS(dcontext, flags, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, opnd_create_reg(REG_XCX),
opnd_create_reg(REG_XDX)));
} else if (TEST(INSTR_BRANCH_SPECIAL_EXIT, instr->flags)) {
int num = instr_get_interrupt_number(instr);
ASSERT(instr_get_opcode(instr) == OP_int);
if (num == 0x81 || num == 0x82) {
int reason = (num == 0x81) ? EXIT_REASON_NI_SYSCALL_INT_0x81 :
EXIT_REASON_NI_SYSCALL_INT_0x82;
if (DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, flags)) {
insert_shared_get_dcontext(dcontext, ilist, instr, true/*save_xdi*/);
PRE(ilist, instr, INSTR_CREATE_mov_st
(dcontext,
opnd_create_dcontext_field_via_reg_sz(dcontext, REG_NULL/*default*/,
EXIT_REASON_OFFSET, OPSZ_2),
OPND_CREATE_INT16(reason)));
insert_shared_restore_dcontext_reg(dcontext, ilist, instr);
} else {
PRE(ilist, instr,
instr_create_save_immed16_to_dcontext(dcontext, reason,
EXIT_REASON_OFFSET));
}
}
}
# endif
# ifdef STEAL_REGISTER
/* in linux, system calls get their parameters via registers.
* edi is the last one used, but there are system calls that
* use it, so we put the real value into edi. plus things
* like fork() should get the real register values.
* it's also a good idea to put the real edi into %edi for
* debugger interrupts (int3).
*/
/* the only way we can save and then restore our dc
* ptr is to use the stack!
* this should be fine, all interrupt instructions push
* both eflags and return address on stack, so esp must
* be valid at this point. there could be an application
* assuming only 2 slots on stack will be used, we use a 3rd
* slot, could mess up that app...but what can we do?
* also, if kernel examines user stack, we could have problems.
* push edi # push dcontext ptr
* restore edi # restore app edi
* <syscall>
* push ebx
* mov edi, ebx
* mov 4(esp), edi # get dcontext ptr
* save ebx to edi slot
* pop ebx
* add 4,esp # clean up push of dcontext ptr
*/
IF_X64(ASSERT_NOT_IMPLEMENTED(false));
PRE(ilist, instr,
INSTR_CREATE_push(dcontext, opnd_create_reg(REG_EDI)));
PRE(ilist, instr,
instr_create_restore_from_dcontext(dcontext, REG_EDI, XDI_OFFSET));
/* insert after in reverse order: */
POST(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_ESP),
OPND_CREATE_INT8(4)));
POST(ilist, instr,
INSTR_CREATE_pop(dcontext, opnd_create_reg(REG_EBX)));
POST(ilist, instr,
instr_create_save_to_dcontext(dcontext, REG_EBX, XDI_OFFSET));
POST(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_EDI),
OPND_CREATE_MEM32(REG_ESP, 4)));
POST(ilist, instr,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_EBX),
opnd_create_reg(REG_EDI)));
POST(ilist, instr,
INSTR_CREATE_push(dcontext, opnd_create_reg(REG_EBX)));
# endif /* STEAL_REGISTER */
#else /* WINDOWS */
/* special handling of system calls is performed in shared_syscall or
* in do_syscall
*/
/* FIXME: for ignorable syscalls,
* do we need support for exiting mid-fragment prior to a syscall
* like we do on Linux, to bound time in cache?
*/
if (does_syscall_ret_to_callsite()) {
uint len = instr_length(dcontext, instr);
if (TEST(INSTR_SHARED_SYSCALL, instr->flags)) {
ASSERT(DYNAMO_OPTION(shared_syscalls));
/* this syscall will be performed by the shared_syscall code
* we just need to place a return address into the dcontext
* xsi slot or the mangle-next-tag tls slot
*/
if (DYNAMO_OPTION(shared_fragment_shared_syscalls)) {
# ifdef X64
ASSERT(instr_raw_bits_valid(instr));
/* PR 244741: no 64-bit store-immed-to-mem
* FIXME: would be nice to move this to the stub and
* use the dead rbx register!
*/
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XCX, MANGLE_NEXT_TAG_SLOT));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR((instr->bytes + len))));
PRE(ilist, instr, INSTR_CREATE_xchg
(dcontext, opnd_create_tls_slot(os_tls_offset(MANGLE_NEXT_TAG_SLOT)),
opnd_create_reg(REG_XCX)));
# else
PRE(ilist, instr, INSTR_CREATE_mov_st
(dcontext, opnd_create_tls_slot(os_tls_offset(MANGLE_NEXT_TAG_SLOT)),
OPND_CREATE_INTPTR((instr->bytes + len))));
# endif
}
else {
PRE(ilist, instr, instr_create_save_immed32_to_dcontext
(dcontext, (uint)(ptr_uint_t)(instr->bytes + len), XSI_OFFSET));
}
}
/* Handle ignorable syscall. non-ignorable system calls are
* destroyed and removed from the list at the end of this func.
*/
else if (!TEST(INSTR_NI_SYSCALL, instr->flags)) {
if (get_syscall_method() == SYSCALL_METHOD_INT &&
DYNAMO_OPTION(sygate_int)) {
/* for Sygate need to mangle into a call to int_syscall_addr
* is anyone going to get screwed up by this change
* (say flags change?) [-ignore_syscalls only]*/
ASSERT_NOT_TESTED();
instrlist_replace(ilist, instr, create_syscall_instr(dcontext));
instr_destroy(dcontext, instr);
} else if (get_syscall_method() == SYSCALL_METHOD_SYSCALL)
ASSERT_NOT_TESTED();
else if (get_syscall_method() == SYSCALL_METHOD_WOW64)
ASSERT_NOT_TESTED();
return;
}
} else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) {
/* on XP/2003 we have a choice between inserting a trampoline at the
* return pt of the sysenter, which is 0x7ffe0304 (except for
* SP2-patched XP), which is bad since it would clobber whatever's after
* the ret there (unless we used a 0xcc, like Visual Studio 2005 debugger
* does), or replacing the ret addr on the stack -- we choose the
* latter as the lesser of two transparency evils. Note that the
* page at 0x7ffe0000 can't be made writable anyway, so hooking
* isn't possible.
*/
if (TEST(INSTR_SHARED_SYSCALL, instr->flags)) {
ASSERT(DYNAMO_OPTION(shared_syscalls));
}
/* Handle ignorable syscall. non-ignorable system calls are
* destroyed and removed from the list at the end of this func.
*/
else if (!TEST(INSTR_NI_SYSCALL, instr->flags)) {
instr_t *mov_imm;
/* even w/ ignorable syscall, need to make sure regain control */
ASSERT(next_instr != NULL);
ASSERT(DYNAMO_OPTION(indcall2direct));
/* for sygate hack need to basically duplicate what is done in
* shared_syscall, but here we could be shared so would need to
* grab dcontext first etc. */
ASSERT_NOT_IMPLEMENTED(!DYNAMO_OPTION(sygate_sysenter));
/* PR 253943: we don't support sysenter in x64 */
IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* can't have 8-byte imm-to-mem */
/* FIXME PR 303413: we won't properly translate a fault in our
* app stack reference here. It's marked as our own mangling
* so we'll at least return failure from our translate routine.
*/
mov_imm = INSTR_CREATE_mov_st(dcontext, OPND_CREATE_MEM32(REG_XSP, 0),
opnd_create_instr(next_instr));
ASSERT(instr_is_mov_imm_to_tos(mov_imm));
PRE(ilist, instr, mov_imm);
/* do not let any encoding for length be cached!
* o/w will lose pc-relative opnd
*/
/* 'next_instr' is executed after the after-syscall vsyscall
* 'ret', which is executed natively. */
instr_set_meta(instr_get_prev(instr));
return; /* leave syscall instr alone */
}
} else {
SYSLOG_INTERNAL_ERROR("unsupported system call method");
LOG(THREAD, LOG_INTERP, 1, "don't know convention for this syscall method\n");
if (!TEST(INSTR_NI_SYSCALL, instr->flags))
return;
ASSERT_NOT_IMPLEMENTED(false);
}
/* destroy the syscall instruction */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
#endif /* WINDOWS */
}
/***************************************************************************
* NON-SYSCALL INTERRUPT
*/
void
mangle_interrupt(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
#ifdef WINDOWS
int num;
if (instr_get_opcode(instr) != OP_int)
return;
num = instr_get_interrupt_number(instr);
if (num == 0x2b) {
/* A callback finishes and returns to the interruption
* point of the thread with the instruction "int 2b".
* The interrupt ends the block; remove the instruction
* since we'll come back to dynamo to perform the
* interrupt.
*/
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
}
#endif /* WINDOWS */
}
/***************************************************************************
* FLOATING POINT PC
*/
/* The offset of the last floating point PC in the saved state */
#define FNSAVE_PC_OFFS 12
#define FXSAVE_PC_OFFS 8
#define FXSAVE_SIZE 512
void
float_pc_update(dcontext_t *dcontext)
{
byte *state = *(byte **)(((byte *)dcontext->local_state) + FLOAT_PC_STATE_SLOT);
app_pc orig_pc, xl8_pc;
uint offs = 0;
LOG(THREAD, LOG_INTERP, 2, "%s: fp state "PFX"\n", __FUNCTION__, state);
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64) {
/* Check whether the FPU state was saved */
uint64 header_bv = *(uint64 *)(state + FXSAVE_SIZE);
if (!TEST(XCR0_FP, header_bv)) {
LOG(THREAD, LOG_INTERP, 2, "%s: xsave did not save FP state => nop\n",
__FUNCTION__);
}
return;
}
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FNSAVE) {
offs = FNSAVE_PC_OFFS;
} else {
offs = FXSAVE_PC_OFFS;
}
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FXSAVE64 ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64)
orig_pc = *(app_pc *)(state + offs);
else /* just bottom 32 bits of pc */
orig_pc = (app_pc)(ptr_uint_t) *(uint *)(state + offs);
if (orig_pc == NULL) {
/* no fp instr yet */
LOG(THREAD, LOG_INTERP, 2, "%s: pc is NULL\n", __FUNCTION__);
return;
}
/* i#1211-c#1: the orig_pc might be an app pc restored from fldenv */
if (!in_fcache(orig_pc) &&
/* XXX: i#698: there might be fp instr neither in fcache nor in app */
!(in_generated_routine(dcontext, orig_pc) ||
is_dynamo_address(orig_pc) ||
is_in_dynamo_dll(orig_pc)
IF_CLIENT_INTERFACE(|| is_in_client_lib(orig_pc)))) {
bool no_xl8 = true;
#ifdef X64
if (dcontext->upcontext.upcontext.exit_reason != EXIT_REASON_FLOAT_PC_FXSAVE64 &&
dcontext->upcontext.upcontext.exit_reason != EXIT_REASON_FLOAT_PC_XSAVE64) {
/* i#1427: try to fill in the top 32 bits */
ptr_uint_t vmcode = (ptr_uint_t) vmcode_get_start();
if ((vmcode & 0xffffffff00000000) > 0) {
byte *orig_try = (byte *)
((vmcode & 0xffffffff00000000) | (ptr_uint_t)orig_pc);
if (in_fcache(orig_try)) {
LOG(THREAD, LOG_INTERP, 2,
"%s: speculating: pc "PFX" + top half of vmcode = "PFX"\n",
__FUNCTION__, orig_pc, orig_try);
orig_pc = orig_try;
no_xl8 = false;
}
}
}
#endif
if (no_xl8) {
LOG(THREAD, LOG_INTERP, 2, "%s: pc "PFX" is translated already\n",
__FUNCTION__, orig_pc);
return;
}
}
/* We must either grab thread_initexit_lock or be couldbelinking to translate */
mutex_lock(&thread_initexit_lock);
xl8_pc = recreate_app_pc(dcontext, orig_pc, NULL);
mutex_unlock(&thread_initexit_lock);
LOG(THREAD, LOG_INTERP, 2, "%s: translated "PFX" to "PFX"\n", __FUNCTION__,
orig_pc, xl8_pc);
if (dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_FXSAVE64 ||
dcontext->upcontext.upcontext.exit_reason == EXIT_REASON_FLOAT_PC_XSAVE64)
*(app_pc *)(state + offs) = xl8_pc;
else /* just bottom 32 bits of pc */
*(uint *)(state + offs) = (uint)(ptr_uint_t) xl8_pc;
}
void
mangle_float_pc(dcontext_t *dcontext, instrlist_t *ilist,
instr_t *instr, instr_t *next_instr, uint *flags INOUT)
{
/* If there is a prior non-control float instr, we can inline the pc update.
* Otherwise, we go back to dispatch. In the latter case we do not support
* building traces across the float pc save: we assume it's rare.
*/
app_pc prior_float = NULL;
bool exit_is_normal = false;
int op = instr_get_opcode(instr);
opnd_t memop = instr_get_dst(instr, 0);
ASSERT(opnd_is_memory_reference(memop));
/* To simplify the code here we don't support rip-rel for local handling.
* We also don't support xsave, as it optionally writes the fpstate.
*/
if (opnd_is_base_disp(memop) && op != OP_xsave32 && op != OP_xsaveopt32 &&
op != OP_xsave64 && op != OP_xsaveopt64) {
instr_t *prev;
for (prev = instr_get_prev_expanded(dcontext, ilist, instr);
prev != NULL;
prev = instr_get_prev_expanded(dcontext, ilist, prev)) {
dr_fp_type_t type;
if (instr_is_app(prev) &&
instr_is_floating_ex(prev, &type)) {
bool control_instr = false;
if (type == DR_FP_STATE /* quick check */ &&
/* Check the list from Intel Vol 1 8.1.8 */
(op == OP_fnclex || op == OP_fldcw || op == OP_fnstcw ||
op == OP_fnstsw || op == OP_fnstenv || op == OP_fldenv ||
op == OP_fwait))
control_instr = true;
if (!control_instr) {
prior_float = get_app_instr_xl8(prev);
break;
}
}
}
}
if (prior_float != NULL) {
/* We can link this */
exit_is_normal = true;
STATS_INC(float_pc_from_cache);
/* Replace the stored code cache pc with the original app pc.
* If the app memory is unwritable, instr would have already crashed.
*/
if (op == OP_fnsave || op == OP_fnstenv) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FNSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_4);
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, memop,
OPND_CREATE_INT32((int)(ptr_int_t)prior_float)));
} else if (op == OP_fxsave32) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FXSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_4);
PRE(ilist, next_instr,
INSTR_CREATE_mov_st(dcontext, memop,
OPND_CREATE_INT32((int)(ptr_int_t)prior_float)));
} else if (op == OP_fxsave64) {
opnd_set_disp(&memop, opnd_get_disp(memop) + FXSAVE_PC_OFFS);
opnd_set_size(&memop, OPSZ_8);
insert_mov_immed_ptrsz(dcontext, (ptr_int_t)prior_float, memop,
ilist, next_instr, NULL, NULL);
} else
ASSERT_NOT_REACHED();
} else if (!DYNAMO_OPTION(translate_fpu_pc)) {
/* We only support translating when inlined.
* XXX: we can't recover the loss of coarse-grained: we live with that.
*/
exit_is_normal = true;
ASSERT_CURIOSITY(!TEST(FRAG_CANNOT_BE_TRACE, *flags) ||
/* i#1562: it could be marked as no-trace for other reasons */
TEST(FRAG_SELFMOD_SANDBOXED, *flags));
} else {
int reason = 0;
CLIENT_ASSERT(!TEST(FRAG_IS_TRACE, *flags),
"removing an FPU instr in a trace with an FPU state save "
"is not supported");
switch (op) {
case OP_fnsave:
case OP_fnstenv: reason = EXIT_REASON_FLOAT_PC_FNSAVE; break;
case OP_fxsave32: reason = EXIT_REASON_FLOAT_PC_FXSAVE; break;
case OP_fxsave64: reason = EXIT_REASON_FLOAT_PC_FXSAVE64;break;
case OP_xsave32:
case OP_xsaveopt32: reason = EXIT_REASON_FLOAT_PC_XSAVE; break;
case OP_xsave64:
case OP_xsaveopt64: reason = EXIT_REASON_FLOAT_PC_XSAVE64; break;
default: ASSERT_NOT_REACHED();
}
if (DYNAMO_OPTION(private_ib_in_tls) || TEST(FRAG_SHARED, *flags)) {
insert_shared_get_dcontext(dcontext, ilist, instr, true/*save_xdi*/);
PRE(ilist, instr, INSTR_CREATE_mov_st
(dcontext,
opnd_create_dcontext_field_via_reg_sz(dcontext, REG_NULL/*default*/,
EXIT_REASON_OFFSET, OPSZ_2),
OPND_CREATE_INT16(reason)));
} else {
PRE(ilist, instr,
instr_create_save_immed16_to_dcontext(dcontext, reason,
EXIT_REASON_OFFSET));
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XDI, DCONTEXT_BASE_SPILL_SLOT));
}
/* At this point, xdi is spilled into DCONTEXT_BASE_SPILL_SLOT */
/* We pass the address in the xbx tls slot, which is untouched by fcache_return.
*
* XXX: handle far refs! Xref drutil_insert_get_mem_addr(), and sandbox_write()
* hitting this same issue.
*/
ASSERT_CURIOSITY(!opnd_is_far_memory_reference(memop));
if (opnd_is_base_disp(memop)) {
opnd_set_size(&memop, OPSZ_lea);
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XDI), memop));
} else {
ASSERT(opnd_is_abs_addr(memop) IF_X64( || opnd_is_rel_addr(memop)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
OPND_CREATE_INTPTR(opnd_get_addr(memop))));
}
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, REG_XDI, FLOAT_PC_STATE_SLOT));
/* Restore app %xdi */
if (TEST(FRAG_SHARED, *flags))
insert_shared_restore_dcontext_reg(dcontext, ilist, instr);
else {
PRE(ilist, instr,
instr_create_restore_from_tls(dcontext, REG_XDI,
DCONTEXT_BASE_SPILL_SLOT));
}
}
if (exit_is_normal && DYNAMO_OPTION(translate_fpu_pc)) {
instr_t *exit_jmp = next_instr;
while (exit_jmp != NULL && !instr_is_exit_cti(exit_jmp))
exit_jmp = instr_get_next(next_instr);
ASSERT(exit_jmp != NULL);
ASSERT(instr_branch_special_exit(exit_jmp));
instr_branch_set_special_exit(exit_jmp, false);
/* XXX: there could be some other reason this was marked
* cannot-be-trace that we're undoing here...
*/
if (TEST(FRAG_CANNOT_BE_TRACE, *flags))
*flags &= ~FRAG_CANNOT_BE_TRACE;
}
}
/***************************************************************************
* CPUID FOOLING
*/
#ifdef FOOL_CPUID
/* values returned by cpuid for Mobile Pentium MMX processor (family 5, model 8)
* minus mmx (==0x00800000 in CPUID_1_EDX)
* FIXME: change model number to a Pentium w/o MMX!
*/
#define CPUID_0_EAX 0x00000001
#define CPUID_0_EBX 0x756e6547
#define CPUID_0_ECX 0x6c65746e
#define CPUID_0_EDX 0x49656e69
/* extended family, extended model, type, family, model, stepping id: */
/* 20:27, 16:19, 12:13, 8:11, 4:7, 0:3 */
#define CPUID_1_EAX 0x00000581
#define CPUID_1_EBX 0x00000000
#define CPUID_1_ECX 0x00000000
#define CPUID_1_EDX 0x000001bf
static void
mangle_cpuid(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
/* assumption: input value is put in eax on prev instr, or
* on instr prior to that and prev is an inc instr.
* alternative is to insert conditional branch...and save eflags, etc.
*/
instr_t *prev = instr_get_prev(instr);
opnd_t op;
int input, out_eax, out_ebx, out_ecx, out_edx;
LOG(THREAD, LOG_INTERP, 1, "fooling cpuid instruction!\n");
ASSERT(prev != NULL);
prev = instr_get_prev_expanded(dcontext, ilist, instr);
instr_decode(dcontext, instr);
if (!instr_valid(instr))
goto cpuid_give_up;
loginst(dcontext, 2, prev, "prior to cpuid");
/* FIXME: maybe should insert code to dispatch on eax, rather than
* this hack, which is based on photoshop, which either does
* "xor eax,eax" or "xor eax,eax; inc eax"
*/
if (!instr_is_mov_constant(prev, &input)) {
/* we only allow inc here */
if (instr_get_opcode(prev) != OP_inc)
goto cpuid_give_up;
op = instr_get_dst(prev, 0);
if (!opnd_is_reg(op) || opnd_get_reg(op) != REG_EAX)
goto cpuid_give_up;
/* now check instr before inc */
prev = instr_get_prev(prev);
if (!instr_is_mov_constant(prev, &input) || input != 0)
goto cpuid_give_up;
input = 1;
/* now check that mov 0 is into eax */
}
if (instr_num_dsts(prev) == 0)
goto cpuid_give_up;
op = instr_get_dst(prev, 0);
if (!opnd_is_reg(op) || opnd_get_reg(op) != REG_EAX)
goto cpuid_give_up;
if (input == 0) {
out_eax = CPUID_0_EAX;
out_ebx = CPUID_0_EBX;
out_ecx = CPUID_0_ECX;
out_edx = CPUID_0_EDX;
} else {
/* 1 or anything higher all return same info */
out_eax = CPUID_1_EAX;
out_ebx = CPUID_1_EBX;
out_ecx = CPUID_1_ECX;
out_edx = CPUID_1_EDX;
}
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EAX),
OPND_CREATE_INT32(out_eax)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EBX),
OPND_CREATE_INT32(out_ebx)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_ECX),
OPND_CREATE_INT32(out_ecx)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_EDX),
OPND_CREATE_INT32(out_edx)));
/* destroy the cpuid instruction */
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
return;
cpuid_give_up:
LOG(THREAD, LOG_INTERP, 1, "\tcpuid fool: giving up\n");
return;
}
#endif /* FOOL_CPUID */
void
mangle_exit_cti_prefixes(dcontext_t *dcontext, instr_t *instr)
{
uint prefixes = instr_get_prefixes(instr);
if (prefixes != 0) {
bool remove = false;
/* Case 8738: while for transparency it would be best to maintain all
* prefixes, our patching and other routines make assumptions about
* the length of exit ctis. Plus our elision removes the whole
* instr in any case.
*/
if (instr_is_cbr(instr)) {
if (TESTANY(~(PREFIX_JCC_TAKEN|PREFIX_JCC_NOT_TAKEN), prefixes)) {
remove = true;
prefixes &= (PREFIX_JCC_TAKEN|PREFIX_JCC_NOT_TAKEN);
}
} else {
/* prefixes on ubr or mbr should be nops and for ubr will mess up
* our size assumptions so drop them (i#435)
*/
remove = true;
prefixes = 0;
}
if (remove) {
LOG(THREAD, LOG_INTERP, 4,
"\tremoving unknown prefixes "PFX" from "PFX"\n",
prefixes, instr_get_raw_bits(instr));
ASSERT(instr_operands_valid(instr)); /* ensure will encode w/o raw bits */
instr_set_prefixes(instr, prefixes);
}
} else if (instr_get_opcode(instr) == OP_jmp &&
instr_length(dcontext, instr) != JMP_LONG_LENGTH) {
/* i#1988: remove MPX prefixes as they mess up our nop padding.
* i#1312 covers marking as actual prefixes, and we should keep them.
*/
LOG(THREAD, LOG_INTERP, 4,
"\tremoving unknown jmp prefixes from "PFX"\n",
instr_get_raw_bits(instr));
instr_set_raw_bits_valid(instr, false);
}
}
#ifdef X64
/* PR 215397: re-relativize rip-relative data addresses */
/* Should return NULL if it destroy "instr". We don't support both destroying
* (done only for x86: i#393) and changing next_instr (done only for ARM).
*/
instr_t *
mangle_rel_addr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
uint opc = instr_get_opcode(instr);
app_pc tgt;
opnd_t dst, src;
ASSERT(instr_has_rel_addr_reference(instr));
instr_get_rel_addr_target(instr, &tgt);
STATS_INC(rip_rel_instrs);
# ifdef RCT_IND_BRANCH
if (TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_call)) ||
TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_jump))) {
/* PR 215408: record addresses taken via rip-relative instrs */
rct_add_rip_rel_addr(dcontext, tgt _IF_DEBUG(instr_get_translation(instr)));
}
# endif
if (opc == OP_lea) {
/* segment overrides are ignored on lea */
opnd_t immed;
dst = instr_get_dst(instr, 0);
src = instr_get_src(instr, 0);
ASSERT(opnd_is_reg(dst));
ASSERT(opnd_is_rel_addr(src));
ASSERT(opnd_get_addr(src) == tgt);
/* Replace w/ an absolute immed of the target app address, following Intel
* Table 3-59 "64-bit Mode LEA Operation with Address and Operand Size
* Attributes" */
/* FIXME PR 253446: optimization: we could leave this as rip-rel if it
* still reaches from the code cache. */
if (reg_get_size(opnd_get_reg(dst)) == OPSZ_8) {
/* PR 253327: there is no explicit addr32 marker; we assume
* that decode or the user already zeroed out the top bits
* if there was an addr32 prefix byte or the user wants
* that effect */
immed = OPND_CREATE_INTPTR((ptr_int_t)tgt);
} else if (reg_get_size(opnd_get_reg(dst)) == OPSZ_4)
immed = OPND_CREATE_INT32((int)(ptr_int_t)tgt);
else {
ASSERT(reg_get_size(opnd_get_reg(dst)) == OPSZ_2);
immed = OPND_CREATE_INT16((short)(ptr_int_t)tgt);
}
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, dst, immed));
instrlist_remove(ilist, instr);
instr_destroy(dcontext, instr);
STATS_INC(rip_rel_lea);
return NULL; /* == destroyed instr */
} else {
/* PR 251479 will automatically re-relativize if it reaches,
* but if it doesn't we need to handle that here (since that
* involves an encoding length change, which complicates many
* use cases if done at instr encode time).
* We don't yet know exactly where we're going to encode this bb,
* so we're conservative and check for all reachability from our
* heap (assumed to be a single heap: xref PR 215395, and xref
* potential secondary code caches PR 253446.
*/
if (!rel32_reachable_from_vmcode(tgt)) {
int si = -1, di = -1;
opnd_t relop, newop;
bool spill = true;
/* FIXME PR 253446: for mbr, should share the xcx spill */
reg_id_t scratch_reg = REG_XAX;
si = instr_get_rel_addr_src_idx(instr);
di = instr_get_rel_addr_dst_idx(instr);
if (si >= 0) {
relop = instr_get_src(instr, si);
ASSERT(di < 0 || opnd_same(relop, instr_get_dst(instr, di)));
/* If it's a load (OP_mov_ld, or OP_movzx, etc.), use dead reg */
if (instr_num_srcs(instr) == 1 && /* src is the rip-rel opnd */
instr_num_dsts(instr) == 1 && /* only one dest: a register */
opnd_is_reg(instr_get_dst(instr, 0)) &&
!instr_is_predicated(instr)) {
opnd_size_t sz = opnd_get_size(instr_get_dst(instr, 0));
reg_id_t reg = opnd_get_reg(instr_get_dst(instr, 0));
/* if target is 16 or 8 bit sub-register the whole reg is not dead
* (for 32-bit, top 32 bits are cleared) */
if (reg_is_gpr(reg) && (reg_is_32bit(reg) || reg_is_64bit(reg))) {
spill = false;
scratch_reg = opnd_get_reg(instr_get_dst(instr, 0));
if (sz == OPSZ_4)
scratch_reg = reg_32_to_64(scratch_reg);
/* we checked all opnds: should not read reg */
ASSERT(!instr_reads_from_reg(instr, scratch_reg,
DR_QUERY_DEFAULT));
STATS_INC(rip_rel_unreachable_nospill);
}
}
} else {
relop = instr_get_dst(instr, di);
}
/* PR 263369: we can't just look for instr_reads_from_reg here since
* our no-spill optimization above may miss some writes.
*/
if (spill && instr_uses_reg(instr, scratch_reg)) {
/* mbr (for which we'll use xcx once we optimize) should not
* get here: can't use registers (except xsp) */
ASSERT(scratch_reg == REG_XAX);
do {
scratch_reg++;
ASSERT(scratch_reg <= REG_STOP_64);
} while (instr_uses_reg(instr, scratch_reg));
}
ASSERT(!instr_reads_from_reg(instr, scratch_reg, DR_QUERY_DEFAULT));
ASSERT(!spill || !instr_writes_to_reg(instr, scratch_reg, DR_QUERY_DEFAULT));
/* XXX PR 253446: Optimize by looking ahead for dead registers, and
* sharing single spill across whole bb, or possibly building local code
* cache to avoid unreachability: all depending on how many rip-rel
* instrs we see. We'll watch the stats.
*/
if (spill) {
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, 0, scratch_reg, MANGLE_RIPREL_SPILL_SLOT,
XAX_OFFSET));
}
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(scratch_reg),
OPND_CREATE_INTPTR((ptr_int_t)tgt)));
newop = opnd_create_far_base_disp(opnd_get_segment(relop), scratch_reg,
REG_NULL, 0, 0, opnd_get_size(relop));
if (si >= 0)
instr_set_src(instr, si, newop);
if (di >= 0)
instr_set_dst(instr, di, newop);
/* we need the whole spill...restore region to all be marked mangle */
instr_set_our_mangling(instr, true);
if (spill) {
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, scratch_reg,
MANGLE_RIPREL_SPILL_SLOT));
}
STATS_INC(rip_rel_unreachable);
}
}
return next_instr;
}
#endif
/***************************************************************************
* Reference with segment register (fs/gs)
*/
#ifdef UNIX
static int
instr_get_seg_ref_dst_idx(instr_t *instr)
{
int i;
opnd_t opnd;
if (!instr_valid(instr))
return -1;
/* must go to level 3 operands */
for (i=0; i<instr_num_dsts(instr); i++) {
opnd = instr_get_dst(instr, i);
if (opnd_is_far_base_disp(opnd) &&
(opnd_get_segment(opnd) == SEG_GS ||
opnd_get_segment(opnd) == SEG_FS))
return i;
}
return -1;
}
static int
instr_get_seg_ref_src_idx(instr_t *instr)
{
int i;
opnd_t opnd;
if (!instr_valid(instr))
return -1;
/* must go to level 3 operands */
for (i=0; i<instr_num_srcs(instr); i++) {
opnd = instr_get_src(instr, i);
if (opnd_is_far_base_disp(opnd) &&
(opnd_get_segment(opnd) == SEG_GS ||
opnd_get_segment(opnd) == SEG_FS))
return i;
}
return -1;
}
static ushort tls_slots[4] =
{TLS_XAX_SLOT, TLS_XCX_SLOT, TLS_XDX_SLOT, TLS_XBX_SLOT};
/* mangle the instruction OP_mov_seg, i.e. the instruction that
* read/update the segment register.
*/
void
mangle_mov_seg(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
reg_id_t seg;
opnd_t opnd, dst;
opnd_size_t dst_sz;
ASSERT(instr_get_opcode(instr) == OP_mov_seg);
ASSERT(instr_num_srcs(instr) == 1);
ASSERT(instr_num_dsts(instr) == 1);
STATS_INC(app_mov_seg_mangled);
/* for update, we simply change it to a nop because we will
* update it when dynamorio entering code cache to execute
* this basic block.
*/
dst = instr_get_dst(instr, 0);
if (opnd_is_reg(dst) && reg_is_segment(opnd_get_reg(dst))) {
app_pc xl8;
seg = opnd_get_reg(dst);
#ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
#endif
/* must use the original instr, which might be used by caller */
xl8 = get_app_instr_xl8(instr);
instr_reuse(dcontext, instr);
instr_set_opcode(instr, OP_nop);
instr_set_num_opnds(dcontext, instr, 0, 0);
instr_set_translation(instr, xl8);
return;
}
/* for read seg, we mangle it */
opnd = instr_get_src(instr, 0);
ASSERT(opnd_is_reg(opnd));
seg = opnd_get_reg(opnd);
ASSERT(reg_is_segment(seg));
if (seg != SEG_FS && seg != SEG_GS)
return;
#ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
#endif
/* There are two possible mov_seg instructions:
* 8C/r MOV r/m16,Sreg Move segment register to r/m16
* REX.W + 8C/r MOV r/m64,Sreg Move zero extended 16-bit segment
* register to r/m64
* Note, In 32-bit mode, the assembler may insert the 16-bit operand-size
* prefix with this instruction.
*/
/* we cannot replace the instruction but only change it. */
dst = instr_get_dst(instr, 0);
dst_sz = opnd_get_size(dst);
opnd = opnd_create_sized_tls_slot
(os_tls_offset(os_get_app_tls_reg_offset(seg)), OPSZ_2);
if (opnd_is_reg(dst)) { /* dst is a register */
/* mov %gs:off => reg */
instr_set_src(instr, 0, opnd);
instr_set_opcode(instr, OP_mov_ld);
if (dst_sz != OPSZ_2)
instr_set_opcode(instr, OP_movzx);
} else { /* dst is memory, need steal a register. */
reg_id_t reg;
instr_t *ti;
for (reg = REG_XAX; reg < REG_XBX; reg++) {
if (!instr_uses_reg(instr, reg))
break;
}
/* We need save the register to corresponding slot for correct restore,
* so only use the first four registers.
*/
ASSERT(reg <= REG_XBX);
/* save reg */
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, reg,
tls_slots[reg - REG_XAX]));
/* restore reg */
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, reg,
tls_slots[reg - REG_XAX]));
switch (dst_sz) {
case OPSZ_8:
IF_NOT_X64(ASSERT(false);)
break;
case OPSZ_4:
IF_X64(reg = reg_64_to_32(reg);)
break;
case OPSZ_2:
IF_X64(reg = reg_64_to_32(reg);)
reg = reg_32_to_16(reg);
break;
default:
ASSERT(false);
}
/* mov %gs:off => reg */
ti = INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(reg), opnd);
if (dst_sz != OPSZ_2)
instr_set_opcode(ti, OP_movzx);
PRE(ilist, instr, ti);
/* change mov_seg to mov_st: mov reg => [mem] */
instr_set_src(instr, 0, opnd_create_reg(reg));
instr_set_opcode(instr, OP_mov_st);
}
}
/* mangle the instruction that reference memory via segment register */
void
mangle_seg_ref(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr,
instr_t *next_instr)
{
int si = -1, di = -1;
opnd_t segop, newop;
bool spill = true;
reg_id_t scratch_reg = REG_XAX, seg = REG_NULL;
/* exit cti won't be seg ref */
if (instr_is_exit_cti(instr))
return;
/* mbr will be handled separatly */
if (instr_is_mbr(instr))
return;
if (instr_get_opcode(instr) == OP_lea)
return;
/* XXX: maybe using decode_cti and then a check on prefix could be
* more efficient as it only examines a few byte and avoid fully decoding
* the instruction. For simplicity, we examine every operands instead.
*/
/* 1. get ref opnd */
si = instr_get_seg_ref_src_idx(instr);
di = instr_get_seg_ref_dst_idx(instr);
if (si < 0 && di < 0)
return;
if (si >= 0) {
segop = instr_get_src(instr, si);
ASSERT(di < 0 || opnd_same(segop, instr_get_dst(instr, di)));
} else {
segop = instr_get_dst(instr, di);
}
seg = opnd_get_segment(segop);
if (seg != SEG_GS && seg != SEG_FS)
return;
#ifdef CLIENT_INTERFACE
if (seg == LIB_SEG_TLS && !INTERNAL_OPTION(private_loader))
return;
#endif
STATS_INC(app_seg_refs_mangled);
DOLOG(3, LOG_INTERP, {
loginst(dcontext, 3, instr, "reference with fs/gs segment");
});
/* 2. decide the scratch reg */
/* Opt: if it's a load (OP_mov_ld, or OP_movzx, etc.), use dead reg */
if (si >= 0 &&
instr_num_srcs(instr) == 1 && /* src is the seg ref opnd */
instr_num_dsts(instr) == 1 && /* only one dest: a register */
opnd_is_reg(instr_get_dst(instr, 0)) &&
!instr_is_predicated(instr)) {
reg_id_t reg = opnd_get_reg(instr_get_dst(instr, 0));
/* if target is 16 or 8 bit sub-register the whole reg is not dead
* (for 32-bit, top 32 bits are cleared) */
if (reg_is_gpr(reg) && (reg_is_32bit(reg) || reg_is_64bit(reg)) &&
/* mov [%fs:%xax] => %xax */
!instr_reads_from_reg(instr, reg, DR_QUERY_DEFAULT)) {
spill = false;
scratch_reg = reg;
# ifdef X64
if (opnd_get_size(instr_get_dst(instr, 0)) == OPSZ_4)
scratch_reg = reg_32_to_64(reg);
# endif
}
}
if (spill) {
/* we pick a scratch register from XAX, XBX, XCX, or XDX
* that has direct TLS slots.
*/
for (scratch_reg = REG_XAX; scratch_reg <= REG_XBX; scratch_reg++) {
/* the register must not be used by the instr, either read or write,
* because we will mangle it when executing the instr (no read from),
* and restore it after that instr (no write to).
*/
if (!instr_uses_reg(instr, scratch_reg))
break;
}
ASSERT(scratch_reg <= REG_XBX);
PRE(ilist, instr,
instr_create_save_to_tls(dcontext, scratch_reg,
tls_slots[scratch_reg - REG_XAX]));
}
newop = mangle_seg_ref_opnd(dcontext, ilist, instr, segop, scratch_reg);
if (si >= 0)
instr_set_src(instr, si, newop);
if (di >= 0)
instr_set_dst(instr, di, newop);
/* we need the whole spill...restore region to all be marked mangle */
instr_set_our_mangling(instr, true);
/* FIXME: i#107 we should check the bound and raise signal if out of bound. */
DOLOG(3, LOG_INTERP, {
loginst(dcontext, 3, instr, "re-wrote app tls reference");
});
if (spill) {
PRE(ilist, next_instr,
instr_create_restore_from_tls(dcontext, scratch_reg,
tls_slots[scratch_reg - REG_XAX]));
}
}
#endif /* UNIX */
#ifdef ANNOTATIONS
/***************************************************************************
* DR and Valgrind annotations
*/
void
mangle_annotation_helper(dcontext_t *dcontext, instr_t *label, instrlist_t *ilist)
{
dr_instr_label_data_t *label_data = instr_get_label_data_area(label);
dr_annotation_handler_t *handler = GET_ANNOTATION_HANDLER(label_data);
dr_annotation_receiver_t *receiver = handler->receiver_list;
opnd_t *args = NULL;
ASSERT(handler->type == DR_ANNOTATION_HANDLER_CALL);
while (receiver != NULL) {
if (handler->num_args != 0) {
args = HEAP_ARRAY_ALLOC(dcontext, opnd_t, handler->num_args,
ACCT_CLEANCALL, UNPROTECTED);
memcpy(args, handler->args, sizeof(opnd_t) * handler->num_args);
}
dr_insert_clean_call_ex_varg(dcontext, ilist, label,
receiver->instrumentation.callback,
receiver->save_fpstate ? DR_CLEANCALL_SAVE_FLOAT : 0,
handler->num_args, args);
if (handler->num_args != 0) {
HEAP_ARRAY_FREE(dcontext, args, opnd_t, handler->num_args,
ACCT_CLEANCALL, UNPROTECTED);
}
receiver = receiver->next;
}
}
#endif
/* END OF CONTROL-FLOW MANGLING ROUTINES
*###########################################################################
*###########################################################################
*/
/* SELF-MODIFYING-CODE SANDBOXING
*
* When we detect it, we take an exit that targets our own routine
* fragment_self_write. Dispatch checks for that target and if it finds it,
* it calls that routine, so don't worry about building a bb for it.
* Returns false if the bb has invalid instrs in the middle and it should
* be rebuilt from scratch.
*/
#undef SAVE_TO_DC_OR_TLS
#undef RESTORE_FROM_DC_OR_TLS
/* PR 244737: x64 uses tls to avoid reachability issues w/ absolute addresses */
#ifdef X64
# define SAVE_TO_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_save_to_tls((dc), (reg), (tls_offs))
# define RESTORE_FROM_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_restore_from_tls((dc), (reg), (tls_offs))
#else
# define SAVE_TO_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_save_to_dcontext((dc), (reg), (dc_offs))
# define RESTORE_FROM_DC_OR_TLS(dc, reg, tls_offs, dc_offs) \
instr_create_restore_from_dcontext((dc), (reg), (dc_offs))
#endif
static void
sandbox_rep_instr(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr, instr_t *next,
app_pc start_pc, app_pc end_pc /* end is open */)
{
/* put checks before instr, set some reg as a flag, act on it
* after instr (even if overwrite self will execute rep to completion)
* want to read DF to find direction (0=inc xsi/xdi, 1=dec),
* but only way to read is to do a pushf!
* Solution: if cld or std right before rep instr, use that info,
* otherwise check for BOTH directions!
* xcx is a pre-check, xsi/xdi are inc/dec after memory op, so
* xdi+xcx*opndsize == instr of NEXT write, so open-ended there:
* if DF==0:
* if (xdi < end_pc && xdi+xcx*opndsize > start_pc) => self-write
* if DF==1:
* if (xdi > start_pc && xdi-xcx*opndsize > end_pc) => self-write
* both:
* if (xdi-xcx*opndsize < end_pc && xdi+xcx*opndsize > start_pc) => self-write
* opndsize is 1,2, or 4 => use lea for mul
* lea (xdi,xcx,opndsize),xcx
*
* save flags and xax
* save xbx
* lea (xdi,xcx,opndsize),xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): save xdx
* if x64 && start_pc > 4GB: mov start_pc, xdx
* cmp xbx, IF_X64_>4GB_ELSE(xdx, start_pc)
* mov $0,xbx # for if ok
* jle ok # open b/c address of next rep write
* lea (,xcx,opndsize),xbx
* neg xbx # sub does dst - src
* add xdi,xbx
* if x64 && end_pc > 4GB: mov end_pc, xdx
* cmp xbx, IF_X64_>4GB_ELSE(xdx, end_pc)
* mov $0,xbx # for if ok
* jge ok # end is open
* mov $1,xbx
* ok:
* restore flags and xax (xax used by stos)
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xdx
* <rep instr> # doesn't use xbx
* (PR 267764/i#398: we special-case restore xbx on cxt xl8 if this instr faults)
* mov xbx,xcx # we can use xcx, it's dead since 0 after rep
* restore xbx
* jecxz ok2 # if xbx was 1 we'll fall through and exit
* mov $0,xcx
* jmp <instr after write, flag as INSTR_BRANCH_SPECIAL_EXIT>
* ok2:
* <label> # ok2 can't == next, b/c next may be ind br -> mangled w/ instrs
* # inserted before it, so jecxz would target too far
*/
instr_t *ok = INSTR_CREATE_label(dcontext);
instr_t *ok2 = INSTR_CREATE_label(dcontext);
instr_t *jmp;
app_pc after_write;
uint opndsize = opnd_size_in_bytes(opnd_get_size(instr_get_dst(instr,0)));
uint flags =
instr_eflags_to_fragment_eflags(forward_eflags_analysis(dcontext, ilist, next));
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
instr_t *next_app = next;
DOLOG(3, LOG_INTERP, { loginst(dcontext, 3, instr, "writes memory"); });
ASSERT(!instr_is_call_indirect(instr)); /* FIXME: can you have REP on on CALL's */
/* skip meta instrs to find next app instr (xref PR 472190) */
while (next_app != NULL && instr_is_meta(next_app))
next_app = instr_get_next(next_app);
if (next_app != NULL) {
/* client may have inserted non-meta instrs, so use translation first
* (xref PR 472190)
*/
if (instr_get_app_pc(next_app) != NULL)
after_write = instr_get_app_pc(next_app);
else if (!instr_raw_bits_valid(next_app)) {
/* next must be the final jmp! */
ASSERT(instr_is_ubr(next_app) && instr_get_next(next_app) == NULL);
after_write = opnd_get_pc(instr_get_target(next_app));
} else
after_write = instr_get_raw_bits(next_app);
} else {
after_write = end_pc;
}
insert_save_eflags(dcontext, ilist, instr, flags, use_tls, !use_tls
_IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_XDI, REG_XCX, opndsize, 0, OPSZ_lea)));
#ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
}
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XDX)));
} else {
#endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
#ifdef X64
}
#endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX), OPND_CREATE_INT32(0)));
PRE(ilist, instr,
INSTR_CREATE_jcc(dcontext, OP_jle, opnd_create_instr(ok)));
PRE(ilist, instr,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_NULL, REG_XCX, opndsize, 0, OPSZ_lea)));
PRE(ilist, instr,
INSTR_CREATE_neg(dcontext, opnd_create_reg(REG_XBX)));
PRE(ilist, instr,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XDI)));
#ifdef X64
if ((ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDX),
OPND_CREATE_INTPTR(end_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XDX)));
} else {
#endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)end_pc)));
#ifdef X64
}
#endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32(0)));
PRE(ilist, instr,
INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(ok)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX), OPND_CREATE_INT32(1)));
PRE(ilist, instr, ok);
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls, !use_tls
_IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
#ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XDX, TLS_XDX_SLOT, XDX_OFFSET));
}
#endif
/* instr goes here */
PRE(ilist, next,
INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_XBX)));
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
PRE(ilist, next,
INSTR_CREATE_jecxz(dcontext, opnd_create_instr(ok2)));
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INT32(0))); /* on x64 top 32 bits zeroed */
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(after_write));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, next, jmp);
PRE(ilist, next, ok2);
}
static void
sandbox_write(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr, instr_t *next,
opnd_t op, app_pc start_pc, app_pc end_pc /* end is open */)
{
/* can only test for equality w/o modifying flags, so save them
* if (addr < end_pc && addr+opndsize > start_pc) => self-write
* <write memory>
* save xbx
* lea memory,xbx
* save flags and xax # after lea of memory in case memory includes xax
* if x64 && (start_pc > 4GB || end_pc > 4GB): save xcx
* if x64 && end_pc > 4GB: mov end_pc, xcx
* cmp xbx, IF_X64_>4GB_ELSE(xcx, end_pc)
* jge ok # end is open
* lea opndsize(xbx),xbx
* if x64 && start_pc > 4GB: mov start_pc, xcx
* cmp xbx, IF_X64_>4GB_ELSE(xcx, start_pc)
* jle ok # open since added size
* restore flags (using xbx) and xax
* restore xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xcx
* jmp <instr after write, flag as INSTR_BRANCH_SPECIAL_EXIT>
* ok:
* restore flags and xax
* restore xbx
* if x64 && (start_pc > 4GB || end_pc > 4GB): restore xcx
*/
instr_t *ok = INSTR_CREATE_label(dcontext), *jmp;
app_pc after_write = NULL;
uint opndsize = opnd_size_in_bytes(opnd_get_size(op));
uint flags =
instr_eflags_to_fragment_eflags(forward_eflags_analysis(dcontext, ilist, next));
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
instr_t *next_app = next;
instr_t *get_addr_at = next;
int opcode = instr_get_opcode(instr);
DOLOG(3, LOG_INTERP, { loginst(dcontext, 3, instr, "writes memory"); });
/* skip meta instrs to find next app instr (xref PR 472190) */
while (next_app != NULL && instr_is_meta(next_app))
next_app = instr_get_next(next_app);
if (next_app != NULL) {
/* client may have inserted non-meta instrs, so use translation first
* (xref PR 472190)
*/
if (instr_get_app_pc(next_app) != NULL)
after_write = instr_get_app_pc(next_app);
else if (!instr_raw_bits_valid(next_app)) {
/* next must be the final artificially added jmp! */
ASSERT(instr_is_ubr(next_app) && instr_get_next(next_app) == NULL);
/* for sure this is the last jmp out, but it
* doesn't have to be a direct jmp but instead
* it could be the exit branch we add as an
* for an indirect call - which is the only ind branch
* that writes to memory
* CALL* already means that we're leaving the block and it cannot be a selfmod
* instruction even though it writes to memory
*/
DOLOG(4, LOG_INTERP, {
loginst(dcontext, 4, next_app, "next app instr");
});
after_write = opnd_get_pc(instr_get_target(next_app));
LOG(THREAD, LOG_INTERP, 4, "after_write = "PFX" next should be final jmp\n",
after_write);
} else
after_write = instr_get_raw_bits(next_app);
} else {
ASSERT_NOT_TESTED();
after_write = end_pc;
}
if (opcode == OP_ins || opcode == OP_movs || opcode == OP_stos) {
/* These instrs modify their own addressing register so we must
* get the address pre-write. None of them touch xbx.
*/
get_addr_at = instr;
ASSERT(!instr_writes_to_reg(instr, REG_XBX, DR_QUERY_DEFAULT) &&
!instr_reads_from_reg(instr, REG_XBX, DR_QUERY_DEFAULT));
}
PRE(ilist, get_addr_at,
SAVE_TO_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
/* XXX: Basically reimplementing drutil_insert_get_mem_addr(). */
/* FIXME i#986: Sandbox far writes. Not a hypothetical problem! NaCl uses
* segments for its x86 sandbox, although they are 0 based with a limit.
* qq.exe has them in sandboxed code.
*/
ASSERT_CURIOSITY(!opnd_is_far_memory_reference(op) ||
/* Standard far refs */
opcode == OP_ins || opcode == OP_movs || opcode == OP_stos);
if (opnd_is_base_disp(op)) {
/* change to OPSZ_lea for lea */
opnd_set_size(&op, OPSZ_lea);
PRE(ilist, get_addr_at,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX), op));
if ((opcode == OP_push && opnd_is_base_disp(op) &&
opnd_get_index(op) == DR_REG_NULL &&
reg_to_pointer_sized(opnd_get_base(op)) == DR_REG_XSP) ||
opcode == OP_push_imm || opcode == OP_pushf || opcode == OP_pusha ||
opcode == OP_pop /* pop into stack slot */ ||
opcode == OP_call || opcode == OP_call_ind || opcode == OP_call_far ||
opcode == OP_call_far_ind) {
/* Undo xsp adjustment made by the instruction itself.
* We could use get_addr_at to acquire the address pre-instruction
* for some of these, but some can read or write ebx.
*/
PRE(ilist, next,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_NULL, REG_XBX,
1, -opnd_get_disp(op), OPSZ_lea)));
}
} else {
/* handle abs addr pointing within fragment */
/* XXX: Can optimize this by doing address comparison at translation
* time. Might happen frequently if a JIT stores data on the same page
* as its code. For now we hook into existing sandboxing code.
*/
app_pc abs_addr;
ASSERT(opnd_is_abs_addr(op) IF_X64( || opnd_is_rel_addr(op)));
abs_addr = opnd_get_addr(op);
PRE(ilist, get_addr_at,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INTPTR(abs_addr)));
}
insert_save_eflags(dcontext, ilist, next, flags, use_tls, !use_tls
_IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
#ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
if ((ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(end_pc)));
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XCX)));
} else {
#endif
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)end_pc)));
#ifdef X64
}
#endif
PRE(ilist, next,
INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(ok)));
PRE(ilist, next,
INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_XBX),
opnd_create_base_disp(REG_XBX, REG_NULL, 0,
opndsize, OPSZ_lea)));
#ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, next,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
opnd_create_reg(REG_XCX)));
} else {
#endif
PRE(ilist, next,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XBX),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
#ifdef X64
}
#endif
PRE(ilist, next,
INSTR_CREATE_jcc(dcontext, OP_jle, opnd_create_instr(ok)));
insert_restore_eflags(dcontext, ilist, next, flags, use_tls, !use_tls
_IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
#ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
#endif
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(after_write));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, next, jmp);
PRE(ilist, next, ok);
insert_restore_eflags(dcontext, ilist, next, flags, use_tls, !use_tls
_IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XBX, TLS_XBX_SLOT, XBX_OFFSET));
#ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX || (ptr_uint_t)end_pc > UINT_MAX) {
PRE(ilist, next,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
#endif
}
static bool
sandbox_top_of_bb_check_s2ro(dcontext_t *dcontext, app_pc start_pc)
{
return (DYNAMO_OPTION(sandbox2ro_threshold) > 0 &&
/* we can't make stack regions ro so don't put in the instrumentation */
!is_address_on_stack(dcontext, start_pc) &&
/* case 9098 we don't want to ever make RO untrackable driver areas */
!is_driver_address(start_pc));
}
static void
sandbox_top_of_bb(dcontext_t *dcontext, instrlist_t *ilist,
bool s2ro, uint flags,
app_pc start_pc, app_pc end_pc, /* end is open */
bool for_cache,
/* for obtaining the two patch locations: */
patch_list_t *patchlist,
cache_pc *copy_start_loc, cache_pc *copy_end_loc)
{
/* add check at top of ilist that compares actual app instructions versus
* copy we saved, stored in cache right after fragment itself. leave its
* start address blank here, will be touched up after emitting this ilist.
*
* FIXME case 8165/PR 212600: optimize this: move reg restores to
* custom fcache_return, use cmpsd instead of cmpsb, etc.
*
* if eflags live entering this bb:
* save xax
* lahf
* seto %al
* endif
* if (-sandbox2ro_threshold > 0)
* if x64: save xcx
* incl &vm_area_t->exec_count (for x64, via xcx)
* cmp sandbox2ro_threshold, vm_area_t->exec_count (for x64, via xcx)
* if eflags live entering this bb, or x64:
* jl past_threshold
* if x64: restore xcx
* if eflags live entering this bb:
* jmp restore_eflags_and_exit
* else
* jmp start_pc marked as selfmod exit
* endif
* past_threshold:
* else
* jge start_pc marked as selfmod exit
* endif
* endif
* if (-sandbox2ro_threshold == 0) && !x64)
* save xcx
* endif
* save xsi
* save xdi
* if stats:
* inc num_sandbox_execs stat (for x64, via xsi)
* endif
* mov start_pc,xsi
* mov copy_start_pc,xdi # 1 opcode byte, then offset
* # => patch point 1
* cmpsb
* if copy_size > 1 # not an opt: for correctness: if "repe cmpsb" has xcx==0, it
* # doesn't touch eflags and we treat cmp results as cmpsb results
* jne check_results
* if x64 && start_pc > 4GB
* mov start_pc, xcx
* cmp xsi, xcx
* else
* cmp xsi, start_pc
* endif
* mov copy_size-1, xcx # -1 b/c we already checked 1st byte
* jge forward
* mov copy_end_pc - 1, xdi # -1 b/c it is the end of this basic block
* # => patch point 2
* mov end_pc - 1, xsi
* forward:
* repe cmpsb
* endif # copy_size > 1
* check_results:
* restore xcx
* restore xsi
* restore xdi
* if eflags live:
* je start_bb
* restore_eflags_and_exit:
* add $0x7f,%al
* sahf
* restore xax
* jmp start_pc marked as selfmod exit
* else
* jne start_pc marked as selfmod exit
* endif
* start_bb:
* if eflags live:
* add $0x7f,%al
* sahf
* restore xax
* endif
*/
instr_t *instr, *jmp;
instr_t *restore_eflags_and_exit = NULL;
bool use_tls = IF_X64_ELSE(true, false);
IF_X64(bool x86_to_x64_ibl_opt = DYNAMO_OPTION(x86_to_x64_ibl_opt);)
bool saved_xcx = false;
instr_t *check_results = INSTR_CREATE_label(dcontext);
instr = instrlist_first_expanded(dcontext, ilist);
insert_save_eflags(dcontext, ilist, instr, flags, use_tls, !use_tls
_IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
if (s2ro) {
/* It's difficult to use lea/jecxz here as we want to use a shared
* counter but no lock, and thus need a relative comparison, while
* lea/jecxz can only do an exact comparison. We could be exact by
* having a separate counter per (private) fragment but by spilling
* eflags we can inc memory, making the scheme here not inefficient.
*/
uint thresh = DYNAMO_OPTION(sandbox2ro_threshold);
uint *counter;
if (for_cache)
counter = get_selfmod_exec_counter(start_pc);
else {
/* Won't find exec area since not a real fragment (probably
* a recreation post-flush). Won't execute, so NULL is fine.
*/
counter = NULL;
}
#ifdef X64
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
saved_xcx = true;
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(counter)));
PRE(ilist, instr,
INSTR_CREATE_inc(dcontext, OPND_CREATE_MEM32(REG_XCX, 0)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, OPND_CREATE_MEM32(REG_XCX, 0),
OPND_CREATE_INT_32OR8((int)thresh)));
#else
PRE(ilist, instr,
INSTR_CREATE_inc(dcontext, OPND_CREATE_ABSMEM(counter, OPSZ_4)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext,
OPND_CREATE_ABSMEM(counter, OPSZ_4),
OPND_CREATE_INT_32OR8(thresh)));
#endif
if (TEST(FRAG_WRITES_EFLAGS_6, flags) IF_X64(&& false)) {
jmp = INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
} else {
instr_t *past_threshold = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jcc_short(dcontext, OP_jl_short,
opnd_create_instr(past_threshold)));
#ifdef X64
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
#endif
if (!TEST(FRAG_WRITES_EFLAGS_6, flags)) {
ASSERT(restore_eflags_and_exit == NULL);
restore_eflags_and_exit = INSTR_CREATE_label(dcontext);
PRE(ilist, instr, INSTR_CREATE_jmp
(dcontext, opnd_create_instr(restore_eflags_and_exit)));
}
#ifdef X64
else {
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
}
#endif
PRE(ilist, instr, past_threshold);
}
}
if (!saved_xcx) {
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
}
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, REG_XSI, TLS_XBX_SLOT, XSI_OFFSET));
PRE(ilist, instr,
SAVE_TO_DC_OR_TLS(dcontext, REG_XDI, TLS_XDX_SLOT, XDI_OFFSET));
DOSTATS({
if (GLOBAL_STATS_ON()) {
/* We only do global inc, not bothering w/ thread-private stats.
* We don't care about races: ballpark figure is good enough.
* We could do a direct inc of memory for 32-bit.
*/
PRE(ilist, instr, INSTR_CREATE_mov_imm
(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(GLOBAL_STAT_ADDR(num_sandbox_execs))));
PRE(ilist, instr, INSTR_CREATE_inc
(dcontext, opnd_create_base_disp(REG_XSI, REG_NULL, 0, 0, OPSZ_STATS)));
}
});
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
/* will become copy start */
OPND_CREATE_INTPTR(start_pc)));
if (patchlist != NULL) {
ASSERT(copy_start_loc != NULL);
add_patch_marker(patchlist, instr_get_prev(instr), PATCH_ASSEMBLE_ABSOLUTE,
-(short)sizeof(cache_pc), (ptr_uint_t*)copy_start_loc);
}
PRE(ilist, instr, INSTR_CREATE_cmps_1(dcontext));
/* For a 1-byte copy size we cannot use "repe cmpsb" as it won't
* touch eflags and we'll treat the cmp results as cmpsb results, which
* doesn't work (cmp will never be equal)
*/
if (end_pc - start_pc > 1) {
instr_t *forward = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jcc(dcontext, OP_jne, opnd_create_instr(check_results)));
#ifdef X64
if ((ptr_uint_t)start_pc > UINT_MAX) {
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(start_pc)));
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSI),
opnd_create_reg(REG_XCX)));
} else {
#endif
PRE(ilist, instr,
INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INT32((int)(ptr_int_t)start_pc)));
#ifdef X64
}
#endif
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XCX),
OPND_CREATE_INTPTR(end_pc - (start_pc + 1))));
/* i#2155: In the case where the direction flag is set, xsi will be lesser
* than start_pc after cmps, and the jump branch will not be taken.
*/
PRE(ilist, instr,
INSTR_CREATE_jcc(dcontext, OP_jge, opnd_create_instr(forward)));
/* i#2155: The immediate value is only psychological
* since it will be modified in finalize_selfmod_sandbox.
*/
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XDI),
/* will become copy end */
OPND_CREATE_INTPTR(end_pc - 1)));
if (patchlist != NULL) {
ASSERT(copy_end_loc != NULL);
add_patch_marker(patchlist, instr_get_prev(instr), PATCH_ASSEMBLE_ABSOLUTE,
-(short)sizeof(cache_pc), (ptr_uint_t*)copy_end_loc);
}
/* i#2155: The next rep cmps comparison will be done backward,
* and thus it should be started at end_pc - 1
* because current basic block is [start_pc:end_pc-1].
*/
PRE(ilist, instr,
INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XSI),
OPND_CREATE_INTPTR(end_pc - 1)));
PRE(ilist, instr, forward);
PRE(ilist, instr, INSTR_CREATE_rep_cmps_1(dcontext));
}
PRE(ilist, instr, check_results);
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XCX, TLS_XCX_SLOT, XCX_OFFSET));
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XSI, TLS_XBX_SLOT, XSI_OFFSET));
PRE(ilist, instr,
RESTORE_FROM_DC_OR_TLS(dcontext, REG_XDI, TLS_XDX_SLOT, XDI_OFFSET));
if (!TEST(FRAG_WRITES_EFLAGS_6, flags)) {
instr_t *start_bb = INSTR_CREATE_label(dcontext);
PRE(ilist, instr,
INSTR_CREATE_jcc(dcontext, OP_je, opnd_create_instr(start_bb)));
if (restore_eflags_and_exit != NULL) /* somebody needs this label */
PRE(ilist, instr, restore_eflags_and_exit);
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls, !use_tls
_IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
jmp = INSTR_CREATE_jmp(dcontext, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
PRE(ilist, instr, start_bb);
} else {
jmp = INSTR_CREATE_jcc(dcontext, OP_jne, opnd_create_pc(start_pc));
instr_branch_set_special_exit(jmp, true);
/* an exit cti, not a meta instr */
instrlist_preinsert(ilist, instr, jmp);
}
insert_restore_eflags(dcontext, ilist, instr, flags, use_tls, !use_tls
_IF_X64(X64_CACHE_MODE_DC(dcontext) &&
!X64_MODE_DC(dcontext) && x86_to_x64_ibl_opt));
/* fall-through to bb start */
}
/* returns false if failed to add sandboxing b/c of a problematic ilist --
* invalid instrs, elided ctis, etc.
*/
bool
insert_selfmod_sandbox(dcontext_t *dcontext, instrlist_t *ilist, uint flags,
app_pc start_pc, app_pc end_pc, /* end is open */
bool record_translation, bool for_cache)
{
instr_t *instr, *next;
if (!INTERNAL_OPTION(hw_cache_consistency))
return true; /* nothing to do */
/* this code assumes bb covers single, contiguous region */
ASSERT((flags & FRAG_HAS_DIRECT_CTI) == 0);
/* store first instr so loop below will skip top check */
instr = instrlist_first_expanded(dcontext, ilist);
instrlist_set_our_mangling(ilist, true); /* PR 267260 */
if (record_translation) {
/* skip client instrumentation, if any, as is done below */
while (instr != NULL && instr_is_meta(instr))
instr = instr_get_next_expanded(dcontext, ilist, instr);
/* make sure inserted instrs translate to the proper original instr */
ASSERT(instr != NULL && instr_get_translation(instr) != NULL);
instrlist_set_translation_target(ilist, instr_get_translation(instr));
}
sandbox_top_of_bb(dcontext, ilist,
sandbox_top_of_bb_check_s2ro(dcontext, start_pc),
flags, start_pc, end_pc, for_cache,
NULL, NULL, NULL);
if (INTERNAL_OPTION(sandbox_writes)) {
for (; instr != NULL; instr = next) {
int i, opcode;
opnd_t op;
opcode = instr_get_opcode(instr);
if (!instr_valid(instr)) {
/* invalid instr -- best to truncate block here, easiest way
* to do that and get all flags right is to re-build it,
* but this time we'll use full decode so we'll avoid the discrepancy
* between fast and full decode on invalid instr detection.
*/
if (record_translation)
instrlist_set_translation_target(ilist, NULL);
instrlist_set_our_mangling(ilist, false); /* PR 267260 */
return false;
}
/* don't mangle anything that mangle inserts! */
next = instr_get_next_expanded(dcontext, ilist, instr);
if (instr_is_meta(instr))
continue;
if (record_translation) {
/* make sure inserted instrs translate to the proper original instr */
ASSERT(instr_get_translation(instr) != NULL);
instrlist_set_translation_target(ilist, instr_get_translation(instr));
}
if (opcode == OP_rep_ins || opcode == OP_rep_movs || opcode == OP_rep_stos) {
sandbox_rep_instr(dcontext, ilist, instr, next, start_pc, end_pc);
continue;
}
/* FIXME case 8165: optimize for multiple push/pop */
for (i=0; i<instr_num_dsts(instr); i++) {
op = instr_get_dst(instr, i);
if (opnd_is_memory_reference(op)) {
/* ignore CALL* since last anyways */
if (instr_is_call_indirect(instr)) {
ASSERT(next != NULL && !instr_raw_bits_valid(next));
/* FIXME case 8165: why do we ever care about the last
* instruction modifying anything?
*/
/* conversion of IAT calls (but not elision)
* transforms this into a direct CALL,
* in that case 'next' is a direct jmp
* fall through, so has no exit flags
*/
ASSERT(EXIT_IS_CALL(instr_exit_branch_type(next)) ||
(DYNAMO_OPTION(IAT_convert) &&
TEST(INSTR_IND_CALL_DIRECT, instr->flags)));
LOG(THREAD, LOG_INTERP, 3, " ignoring CALL* at end of fragment\n");
/* This test could be done outside of this loop on
* destinations, but since it is rare it is faster
* to do it here. Using continue instead of break in case
* it gets moved out.
*/
continue;
}
if (opnd_is_abs_addr(op) IF_X64(|| opnd_is_rel_addr(op))) {
app_pc abs_addr = opnd_get_addr(op);
uint size = opnd_size_in_bytes(opnd_get_size(op));
if (!POINTER_OVERFLOW_ON_ADD(abs_addr, size) &&
(abs_addr + size < start_pc || abs_addr >= end_pc)) {
/* This is an absolute memory reference that points
* outside the current basic block and doesn't need
* sandboxing.
*/
continue;
}
}
sandbox_write(dcontext, ilist, instr, next, op, start_pc, end_pc);
}
}
}
}
if (record_translation)
instrlist_set_translation_target(ilist, NULL);
instrlist_set_our_mangling(ilist, false); /* PR 267260 */
return true;
}
/* Offsets within selfmod sandbox top-of-bb code that we patch once
* the code is emitted, as the values depend on the emitted address.
* These vary by whether sandbox_top_of_bb_check_s2ro() and whether
* eflags are not written, all written, or just OF is written.
* For the copy_size == 1 variation, we simply ignore the 2nd patch point.
*/
static bool selfmod_s2ro[] = { false, true };
static uint selfmod_eflags[] = { FRAG_WRITES_EFLAGS_6, FRAG_WRITES_EFLAGS_OF, 0 };
#define SELFMOD_NUM_S2RO (sizeof(selfmod_s2ro)/sizeof(selfmod_s2ro[0]))
#define SELFMOD_NUM_EFLAGS (sizeof(selfmod_eflags)/sizeof(selfmod_eflags[0]))
#ifdef X64 /* additional complexity: start_pc > 4GB? */
static app_pc selfmod_gt4G[] = { NULL, (app_pc)(POINTER_MAX-2)/*so end can be +2*/ };
# define SELFMOD_NUM_GT4G (sizeof(selfmod_gt4G)/sizeof(selfmod_gt4G[0]))
#endif
uint selfmod_copy_start_offs[SELFMOD_NUM_S2RO][SELFMOD_NUM_EFLAGS]
IF_X64([SELFMOD_NUM_GT4G]);
uint selfmod_copy_end_offs[SELFMOD_NUM_S2RO][SELFMOD_NUM_EFLAGS]
IF_X64([SELFMOD_NUM_GT4G]);
void
set_selfmod_sandbox_offsets(dcontext_t *dcontext)
{
int i, j;
#ifdef X64
int k;
#endif
instrlist_t ilist;
patch_list_t patch;
static byte buf[256];
uint len;
/* We assume this is called at init, when .data is +w and we need no
* synch on accessing buf */
ASSERT(!dynamo_initialized);
for (i = 0; i < SELFMOD_NUM_S2RO; i++) {
for (j = 0; j < SELFMOD_NUM_EFLAGS; j++) {
#ifdef X64
for (k = 0; k < SELFMOD_NUM_GT4G; k++) {
#endif
cache_pc start_pc, end_pc;
app_pc app_start;
instr_t *inst;
instrlist_init(&ilist);
/* sandbox_top_of_bb assumes there's an instr there */
instrlist_append(&ilist, INSTR_CREATE_label(dcontext));
init_patch_list(&patch, PATCH_TYPE_ABSOLUTE);
app_start = IF_X64_ELSE(selfmod_gt4G[k], NULL);
sandbox_top_of_bb(dcontext, &ilist,
selfmod_s2ro[i], selfmod_eflags[j],
/* we must have a >1-byte region to get
* both patch points */
app_start, app_start + 2, false,
&patch, &start_pc, &end_pc);
/* The exit cti's may not reachably encode (normally
* they'd be mangled away) so we munge them first
*/
for (inst = instrlist_first(&ilist); inst != NULL;
inst = instr_get_next(inst)) {
if (instr_is_exit_cti(inst)) {
instr_set_target(inst, opnd_create_pc(buf));
}
}
len = encode_with_patch_list(dcontext, &patch, &ilist, buf);
ASSERT(len < BUFFER_SIZE_BYTES(buf));
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(start_pc - buf)));
selfmod_copy_start_offs[i][j]IF_X64([k]) = (uint) (start_pc - buf);
IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(end_pc - buf)));
selfmod_copy_end_offs[i][j]IF_X64([k]) = (uint) (end_pc - buf);
LOG(THREAD, LOG_EMIT, 3, "selfmod offs %d %d"IF_X64(" %d")": %u %u\n",
i, j, IF_X64_(k)
selfmod_copy_start_offs[i][j]IF_X64([k]),
selfmod_copy_end_offs[i][j]IF_X64([k]));
/* free the instrlist_t elements */
instrlist_clear(dcontext, &ilist);
#ifdef X64
}
#endif
}
}
}
void
finalize_selfmod_sandbox(dcontext_t *dcontext, fragment_t *f)
{
cache_pc copy_pc = FRAGMENT_SELFMOD_COPY_PC(f);
byte *pc;
int i, j;
#ifdef X64
int k = ((ptr_uint_t)f->tag) > UINT_MAX ? 1 : 0;
#endif
i = (sandbox_top_of_bb_check_s2ro(dcontext, f->tag)) ? 1 : 0;
j = (TEST(FRAG_WRITES_EFLAGS_6, f->flags) ? 0 :
(TEST(FRAG_WRITES_EFLAGS_OF, f->flags) ? 1 : 2));
pc = FCACHE_ENTRY_PC(f) + selfmod_copy_start_offs[i][j]IF_X64([k]);
/* The copy start gets updated after sandbox_top_of_bb. */
*((cache_pc*)pc) = copy_pc;
if (FRAGMENT_SELFMOD_COPY_CODE_SIZE(f) > 1) {
pc = FCACHE_ENTRY_PC(f) + selfmod_copy_end_offs[i][j]IF_X64([k]);
/* i#2155: The copy end gets updated.
* This value will be used in the case where the direction flag is set.
* It will then be the starting point for the backward repe cmps.
*/
*((cache_pc*)pc) = (copy_pc + FRAGMENT_SELFMOD_COPY_CODE_SIZE(f) - 1);
} /* else, no 2nd patch point */
}
#endif /* !STANDALONE_DECODER */
/***************************************************************************/
| 1 | 10,996 | I think that the iret handling is not yet good. | DynamoRIO-dynamorio | c |
@@ -78,7 +78,7 @@ const DeviceSizeTabBar = ( {
{ icon }
</Tab>
);
- }
+ },
) }
</TabBar>
); | 1 | /**
* DeviceSizeTabBar component.
*
* Site Kit by Google, Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import Tab from '@material/react-tab';
import TabBar from '@material/react-tab-bar';
import PropTypes from 'prop-types';
/**
* WordPress dependencies
*/
import { useCallback } from '@wordpress/element';
import { __ } from '@wordpress/i18n';
/**
* Internal dependencies
*/
import DeviceSizeMobileIcon from '../../svg/device-size-mobile-icon.svg';
import DeviceSizeDesktopIcon from '../../svg/device-size-desktop-icon.svg';
const DeviceSizeTabBar = ( {
activeTab,
handleDeviceSizeUpdate,
deviceSizes = [
{
slug: 'mobile',
label: __( 'Mobile', 'google-site-kit' ),
icon: <DeviceSizeMobileIcon width="15" height="22" />,
},
{
slug: 'desktop',
label: __( 'Desktop', 'google-site-kit' ),
icon: <DeviceSizeDesktopIcon width="23" height="17" />,
},
],
} ) => {
const onUpdate = useCallback( ( index ) => {
const device = deviceSizes[ index ];
handleDeviceSizeUpdate( device, index );
}, [ deviceSizes, handleDeviceSizeUpdate ] );
if ( ! deviceSizes?.length ) {
return null;
}
const activeIndex = deviceSizes.findIndex( ( { slug } ) => slug === activeTab );
return (
<TabBar
className="googlesitekit-device-size-tab-bar"
activeIndex={ activeIndex }
handleActiveIndexUpdate={ onUpdate }
>
{ deviceSizes.map( ( { icon, label }, i ) => {
return (
<Tab
key={ `google-sitekit-device-size-tab-key-${ i }` }
aria-label={ label }
focusOnActivate={ false }
>
{ icon }
</Tab>
);
}
) }
</TabBar>
);
};
DeviceSizeTabBar.propTypes = {
activeTab: PropTypes.string,
deviceSizes: PropTypes.arrayOf(
PropTypes.shape( {
label: PropTypes.string,
slug: PropTypes.string,
icon: PropTypes.node,
} ),
),
handleDeviceSizeUpdate: PropTypes.func,
};
DeviceSizeTabBar.defaultProps = {
handleDeviceSizeUpdate: () => {},
};
export default DeviceSizeTabBar;
| 1 | 40,145 | Huh, that's kinda weird. I get it, but it's unexpected to me... | google-site-kit-wp | js |
@@ -172,15 +172,6 @@ func (t *Tag) Done(s State) bool {
return err == nil && n == total
}
-// DoneSplit sets total count to SPLIT count and sets the associated swarm hash for this tag
-// is meant to be called when splitter finishes for input streams of unknown size
-func (t *Tag) DoneSplit(address swarm.Address) int64 {
- total := atomic.LoadInt64(&t.Split)
- atomic.StoreInt64(&t.Total, total)
- t.Address = address
- return total
-}
-
// Status returns the value of state and the total count
func (t *Tag) Status(state State) (int64, int64, error) {
count, seen, total := t.Get(state), atomic.LoadInt64(&t.Seen), atomic.LoadInt64(&t.Total) | 1 | // Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package tags
import (
"context"
"encoding/binary"
"errors"
"sync"
"sync/atomic"
"time"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/opentracing/opentracing-go"
)
var (
errExists = errors.New("already exists")
errNA = errors.New("not available yet")
errNoETA = errors.New("unable to calculate ETA")
)
// State is the enum type for chunk states
type State = uint32
const (
TotalChunks State = iota // The total no of chunks for the tag
StateSplit // chunk has been processed by filehasher/swarm safe call
StateStored // chunk stored locally
StateSeen // chunk previously seen
StateSent // chunk sent to neighbourhood
StateSynced // proof is received; chunk removed from sync db; chunk is available everywhere
)
// Tag represents info on the status of new chunks
type Tag struct {
Total int64 // total chunks belonging to a tag
Split int64 // number of chunks already processed by splitter for hashing
Seen int64 // number of chunks already seen
Stored int64 // number of chunks already stored locally
Sent int64 // number of chunks sent for push syncing
Synced int64 // number of chunks synced with proof
Uid uint32 // a unique identifier for this tag
Anonymous bool // indicates if the tag is anonymous (i.e. if only pull sync should be used)
Name string // a name tag for this tag
Address swarm.Address // the associated swarm hash for this tag
StartedAt time.Time // tag started to calculate ETA
// end-to-end tag tracing
ctx context.Context // tracing context
span opentracing.Span // tracing root span
spanOnce sync.Once // make sure we close root span only once
}
// NewTag creates a new tag, and returns it
func NewTag(ctx context.Context, uid uint32, s string, total int64, anon bool, tracer *tracing.Tracer) *Tag {
t := &Tag{
Uid: uid,
Anonymous: anon,
Name: s,
StartedAt: time.Now(),
Total: total,
}
// context here is used only to store the root span `new.upload.tag` within Tag,
// we don't need any type of ctx Deadline or cancellation for this particular ctx
t.span, _, t.ctx = tracer.StartSpanFromContext(ctx, "new.upload.tag", nil)
return t
}
// Context accessor
func (t *Tag) Context() context.Context {
return t.ctx
}
// FinishRootSpan closes the pushsync span of the tags
func (t *Tag) FinishRootSpan() {
t.spanOnce.Do(func() {
t.span.Finish()
})
}
// IncN increments the count for a state
func (t *Tag) IncN(state State, n int) {
var v *int64
switch state {
case TotalChunks:
v = &t.Total
case StateSplit:
v = &t.Split
case StateStored:
v = &t.Stored
case StateSeen:
v = &t.Seen
case StateSent:
v = &t.Sent
case StateSynced:
v = &t.Synced
}
atomic.AddInt64(v, int64(n))
}
// Inc increments the count for a state
func (t *Tag) Inc(state State) {
t.IncN(state, 1)
}
// Get returns the count for a state on a tag
func (t *Tag) Get(state State) int64 {
var v *int64
switch state {
case TotalChunks:
v = &t.Total
case StateSplit:
v = &t.Split
case StateStored:
v = &t.Stored
case StateSeen:
v = &t.Seen
case StateSent:
v = &t.Sent
case StateSynced:
v = &t.Synced
}
return atomic.LoadInt64(v)
}
// GetTotal returns the total count
func (t *Tag) TotalCounter() int64 {
return atomic.LoadInt64(&t.Total)
}
// WaitTillDone returns without error once the tag is complete
// wrt the state given as argument
// it returns an error if the context is done
func (t *Tag) WaitTillDone(ctx context.Context, s State) error {
if t.Done(s) {
return nil
}
ticker := time.NewTicker(100 * time.Millisecond)
for {
select {
case <-ticker.C:
if t.Done(s) {
return nil
}
case <-ctx.Done():
return ctx.Err()
}
}
}
// Done returns true if tag is complete wrt the state given as argument
func (t *Tag) Done(s State) bool {
n, total, err := t.Status(s)
return err == nil && n == total
}
// DoneSplit sets total count to SPLIT count and sets the associated swarm hash for this tag
// is meant to be called when splitter finishes for input streams of unknown size
func (t *Tag) DoneSplit(address swarm.Address) int64 {
total := atomic.LoadInt64(&t.Split)
atomic.StoreInt64(&t.Total, total)
t.Address = address
return total
}
// Status returns the value of state and the total count
func (t *Tag) Status(state State) (int64, int64, error) {
count, seen, total := t.Get(state), atomic.LoadInt64(&t.Seen), atomic.LoadInt64(&t.Total)
if total == 0 {
return count, total, errNA
}
switch state {
case StateSplit, StateStored, StateSeen:
return count, total, nil
case StateSent, StateSynced:
stored := atomic.LoadInt64(&t.Stored)
if stored < total {
return count, total - seen, errNA
}
return count, total - seen, nil
}
return count, total, errNA
}
// ETA returns the time of completion estimated based on time passed and rate of completion
func (t *Tag) ETA(state State) (time.Time, error) {
cnt, total, err := t.Status(state)
if err != nil {
return time.Time{}, err
}
if cnt == 0 || total == 0 {
return time.Time{}, errNoETA
}
diff := time.Since(t.StartedAt)
dur := time.Duration(total) * diff / time.Duration(cnt)
return t.StartedAt.Add(dur), nil
}
// MarshalBinary marshals the tag into a byte slice
func (tag *Tag) MarshalBinary() (data []byte, err error) {
buffer := make([]byte, 4)
binary.BigEndian.PutUint32(buffer, tag.Uid)
encodeInt64Append(&buffer, tag.Total)
encodeInt64Append(&buffer, tag.Split)
encodeInt64Append(&buffer, tag.Seen)
encodeInt64Append(&buffer, tag.Stored)
encodeInt64Append(&buffer, tag.Sent)
encodeInt64Append(&buffer, tag.Synced)
intBuffer := make([]byte, 8)
n := binary.PutVarint(intBuffer, tag.StartedAt.Unix())
buffer = append(buffer, intBuffer[:n]...)
n = binary.PutVarint(intBuffer, int64(len(tag.Address.Bytes())))
buffer = append(buffer, intBuffer[:n]...)
buffer = append(buffer, tag.Address.Bytes()...)
buffer = append(buffer, []byte(tag.Name)...)
return buffer, nil
}
// UnmarshalBinary unmarshals a byte slice into a tag
func (tag *Tag) UnmarshalBinary(buffer []byte) error {
if len(buffer) < 13 {
return errors.New("buffer too short")
}
tag.Uid = binary.BigEndian.Uint32(buffer)
buffer = buffer[4:]
tag.Total = decodeInt64Splice(&buffer)
tag.Split = decodeInt64Splice(&buffer)
tag.Seen = decodeInt64Splice(&buffer)
tag.Stored = decodeInt64Splice(&buffer)
tag.Sent = decodeInt64Splice(&buffer)
tag.Synced = decodeInt64Splice(&buffer)
t, n := binary.Varint(buffer)
tag.StartedAt = time.Unix(t, 0)
buffer = buffer[n:]
t, n = binary.Varint(buffer)
buffer = buffer[n:]
if t > 0 {
tag.Address = swarm.NewAddress(buffer[:t])
}
tag.Name = string(buffer[t:])
return nil
}
func encodeInt64Append(buffer *[]byte, val int64) {
intBuffer := make([]byte, 8)
n := binary.PutVarint(intBuffer, val)
*buffer = append(*buffer, intBuffer[:n]...)
}
func decodeInt64Splice(buffer *[]byte) int64 {
val, n := binary.Varint((*buffer))
*buffer = (*buffer)[n:]
return val
}
| 1 | 10,770 | why is this removed? it is important when you upload from stream of unknown size | ethersphere-bee | go |
@@ -3,7 +3,7 @@ class ApiToken < ActiveRecord::Base
before_create :generate_token
- belongs_to :approval
+ belongs_to :approval, class_name: 'Approvals::Individual'
has_one :proposal, through: :approval
has_one :user, through: :approval
| 1 | class ApiToken < ActiveRecord::Base
has_paper_trail
before_create :generate_token
belongs_to :approval
has_one :proposal, through: :approval
has_one :user, through: :approval
# TODO validates :access_token, presence: true
validates :approval_id, presence: true
scope :unexpired, -> { where('expires_at >= ?', Time.now) }
scope :expired, -> { where('expires_at < ?', Time.now) }
scope :unused, -> { where(used_at: nil) }
scope :fresh, -> { unused.unexpired }
def used?
!!self.used_at
end
# @todo: validate presence of expires_at
def expired?
self.expires_at && self.expires_at < Time.now
end
def use!
self.update_attributes!(used_at: Time.now)
end
private
def generate_token
begin
self.access_token = SecureRandom.hex
end while self.class.exists?(access_token: access_token)
self.expires_at ||= Time.now + 7.days
end
end
| 1 | 13,628 | Out of curiosity, why is this needed? Does it enforce what class can be assigned? | 18F-C2 | rb |
@@ -174,6 +174,11 @@ func (a *Agent) initEndpoints() error {
return fmt.Errorf("Error creating GRPC listener: %s", err)
}
+ if addr.Network() == "unix" {
+ // Any process should be able to use this unix socket
+ os.Chmod(addr.String(), os.ModePerm)
+ }
+
go func() {
a.config.ErrorCh <- a.grpcServer.Serve(listener)
}() | 1 | package agent
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"time"
"github.com/sirupsen/logrus"
"github.com/spiffe/go-spiffe/uri"
"github.com/spiffe/spire/pkg/agent/auth"
"github.com/spiffe/spire/pkg/agent/cache"
"github.com/spiffe/spire/pkg/agent/catalog"
"github.com/spiffe/spire/proto/agent/keymanager"
"github.com/spiffe/spire/proto/agent/nodeattestor"
"github.com/spiffe/spire/proto/api/node"
"github.com/spiffe/spire/proto/api/workload"
"github.com/spiffe/spire/proto/common"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
spiffe_tls "github.com/spiffe/go-spiffe/tls"
)
type Config struct {
// Address to bind the workload api to
BindAddress *net.UnixAddr
// Distinguished Name to use for all CSRs
CertDN *pkix.Name
// Directory to store runtime data
DataDir string
// Directory for plugin configs
PluginDir string
Log logrus.FieldLogger
// Address of SPIRE server
ServerAddress *net.TCPAddr
// A channel for receiving errors from agent goroutines
ErrorCh chan error
// A channel to trigger agent shutdown
ShutdownCh chan struct{}
// Trust domain and associated CA bundle
TrustDomain url.URL
TrustBundle *x509.CertPool
}
type Agent struct {
BaseSVID []byte
baseSVIDKey *ecdsa.PrivateKey
BaseSVIDTTL int32
config *Config
grpcServer *grpc.Server
Cache cache.Cache
Catalog catalog.Catalog
serverCerts []*x509.Certificate
}
func New(c *Config) *Agent {
config := &catalog.Config{
ConfigDir: c.PluginDir,
Log: c.Log.WithField("subsystem_name", "catalog"),
}
return &Agent{config: c, Catalog: catalog.New(config)}
}
// Run the agent
// This method initializes the agent, including its plugins,
// and then blocks on the main event loop.
func (a *Agent) Run() error {
a.Cache = cache.NewCache()
err := a.initPlugins()
if err != nil {
return err
}
err = a.bootstrap()
if err != nil {
return err
}
err = a.initEndpoints()
if err != nil {
return err
}
// Main event loop
a.config.Log.Info("SPIRE Agent is now running")
for {
select {
case err = <-a.config.ErrorCh:
return err
case <-a.config.ShutdownCh:
return a.Shutdown()
}
}
}
func (a *Agent) Shutdown() error {
if a.Catalog != nil {
a.Catalog.Stop()
}
a.grpcServer.GracefulStop()
// Drain error channel, last one wins
var err error
Drain:
for {
select {
case e := <-a.config.ErrorCh:
err = e
default:
break Drain
}
}
return err
}
func (a *Agent) initPlugins() error {
err := a.Catalog.Run()
if err != nil {
return err
}
return nil
}
func (a *Agent) initEndpoints() error {
a.config.Log.Info("Starting the workload API")
maxWorkloadTTL := time.Duration(a.BaseSVIDTTL/2) * time.Second
log := a.config.Log.WithField("subsystem_name", "workload")
ws := &workloadServer{
bundle: a.serverCerts[1].Raw, // TODO: Fix handling of serverCerts
cache: a.Cache,
catalog: a.Catalog,
l: log,
maxTTL: maxWorkloadTTL,
}
// Create a gRPC server with our custom "credential" resolver
a.grpcServer = grpc.NewServer(grpc.Creds(auth.NewCredentials()))
workload.RegisterWorkloadServer(a.grpcServer, ws)
addr := a.config.BindAddress
if addr.Network() == "unix" {
_ = os.Remove(addr.String())
}
listener, err := net.Listen(addr.Network(), addr.String())
if err != nil {
return fmt.Errorf("Error creating GRPC listener: %s", err)
}
go func() {
a.config.ErrorCh <- a.grpcServer.Serve(listener)
}()
return nil
}
func (a *Agent) bootstrap() error {
a.config.Log.Info("Bootstrapping SPIRE agent")
plugins := a.Catalog.KeyManagers()
if len(plugins) != 1 {
return fmt.Errorf("Expected only one key manager plugin, found %i", len(plugins))
}
keyManager := plugins[0]
// Fetch or generate private key
res, err := keyManager.FetchPrivateKey(&keymanager.FetchPrivateKeyRequest{})
if err != nil {
return err
}
if len(res.PrivateKey) > 0 {
key, err := x509.ParseECPrivateKey(res.PrivateKey)
if err != nil {
return err
}
err = a.loadBaseSVID()
if err != nil {
return err
}
a.baseSVIDKey = key
} else {
if a.BaseSVID != nil {
a.config.Log.Info("Certificate configured but no private key found!")
}
a.config.Log.Info("Generating private key for new base SVID")
res, err := keyManager.GenerateKeyPair(&keymanager.GenerateKeyPairRequest{})
if err != nil {
return fmt.Errorf("Failed to generate private key: %s", err)
}
key, err := x509.ParseECPrivateKey(res.PrivateKey)
if err != nil {
return err
}
a.baseSVIDKey = key
// If we're here, we need to attest/Re-attest
regEntryMap, err := a.attest()
if err != nil {
return err
}
err = a.FetchSVID(regEntryMap, a.BaseSVID, a.baseSVIDKey)
if err != nil {
return err
}
}
a.config.Log.Info("Bootstrapping done")
return nil
}
/* Attest the agent, obtain a new Base SVID
returns a spiffeid->registration entries map
This map is used generated CSR for non-base SVIDs and update the agent cache entries
*/
func (a *Agent) attest() (map[string]*common.RegistrationEntry, error) {
a.config.Log.Info("Preparing to attest against %s", a.config.ServerAddress.String())
plugins := a.Catalog.NodeAttestors()
if len(plugins) != 1 {
return nil, fmt.Errorf("Expected only one node attestor plugin, found %i", len(plugins))
}
attestor := plugins[0]
pluginResponse, err := attestor.FetchAttestationData(&nodeattestor.FetchAttestationDataRequest{})
if err != nil {
return nil, fmt.Errorf("Failed to get attestation data from plugin: %s", err)
}
// Parse the SPIFFE ID, form a CSR with it
id, err := url.Parse(pluginResponse.SpiffeId)
if err != nil {
return nil, fmt.Errorf("Failed to form SPIFFE ID: %s", err)
}
csr, err := a.generateCSR(id, a.baseSVIDKey)
if err != nil {
return nil, fmt.Errorf("Failed to generate CSR for attestation: %s", err)
}
// Since we are bootstrapping, this is explicitly _not_ mTLS
conn := a.getNodeAPIClientConn(false, a.BaseSVID, a.baseSVIDKey)
defer conn.Close()
nodeClient := node.NewNodeClient(conn)
// Perform attestation
req := &node.FetchBaseSVIDRequest{
AttestedData: pluginResponse.AttestedData,
Csr: csr,
}
calloptPeer := new(peer.Peer)
serverResponse, err := nodeClient.FetchBaseSVID(context.Background(), req, grpc.Peer(calloptPeer))
if err != nil {
return nil, fmt.Errorf("Failed attestation against spire server: %s", err)
}
if tlsInfo, ok := calloptPeer.AuthInfo.(credentials.TLSInfo); ok {
a.serverCerts = tlsInfo.State.PeerCertificates
}
// Pull base SVID out of the response
svids := serverResponse.SvidUpdate.Svids
if len(svids) > 1 {
a.config.Log.Info("More than one SVID received during attestation!")
}
svid, ok := svids[id.String()]
if !ok {
return nil, fmt.Errorf("Base SVID not found in attestation response")
}
var registrationEntryMap = make(map[string]*common.RegistrationEntry)
for _, entry := range serverResponse.SvidUpdate.RegistrationEntries {
registrationEntryMap[entry.SpiffeId] = entry
}
a.BaseSVID = svid.SvidCert
a.BaseSVIDTTL = svid.Ttl
a.storeBaseSVID()
a.config.Log.Info("Attestation complete")
return registrationEntryMap, nil
}
// Generate a CSR for the given SPIFFE ID
func (a *Agent) generateCSR(spiffeID *url.URL, key *ecdsa.PrivateKey) ([]byte, error) {
a.config.Log.Info("Generating a CSR for %s", spiffeID.String())
uriSANs, err := uri.MarshalUriSANs([]string{spiffeID.String()})
if err != nil {
return []byte{}, err
}
uriSANExtension := []pkix.Extension{{
Id: uri.OidExtensionSubjectAltName,
Value: uriSANs,
Critical: true,
}}
csrData := &x509.CertificateRequest{
Subject: *a.config.CertDN,
SignatureAlgorithm: x509.ECDSAWithSHA256,
ExtraExtensions: uriSANExtension,
}
csr, err := x509.CreateCertificateRequest(rand.Reader, csrData, key)
if err != nil {
return nil, err
}
return csr, nil
}
// Read base SVID from data dir and load it
func (a *Agent) loadBaseSVID() error {
a.config.Log.Info("Loading base SVID from disk")
certPath := path.Join(a.config.DataDir, "base_svid.crt")
if _, err := os.Stat(certPath); os.IsNotExist(err) {
a.config.Log.Info("A base SVID could not be found. A new one will be generated")
return nil
}
data, err := ioutil.ReadFile(certPath)
if err != nil {
return fmt.Errorf("Could not read Base SVID at path %s: %s", certPath, err)
}
// Sanity check
_, err = x509.ParseCertificate(data)
if err != nil {
return fmt.Errorf("Certificate at %s could not be understood: %s", certPath, err)
}
a.BaseSVID = data
return nil
}
// Write base SVID to storage dir
func (a *Agent) storeBaseSVID() {
certPath := path.Join(a.config.DataDir, "base_svid.crt")
f, err := os.Create(certPath)
defer f.Close()
if err != nil {
a.config.Log.Info("Unable to store Base SVID at path %s!", certPath)
return
}
f.Write(a.BaseSVID)
f.Sync()
return
}
func (a *Agent) FetchSVID(registrationEntryMap map[string]*common.RegistrationEntry, svidCert []byte,
key *ecdsa.PrivateKey) (err error) {
if len(registrationEntryMap) != 0 {
Csrs, pkeyMap, err := a.generateCSRForRegistrationEntries(registrationEntryMap)
if err != nil {
return err
}
conn := a.getNodeAPIClientConn(true, svidCert, key)
defer conn.Close()
nodeClient := node.NewNodeClient(conn)
req := &node.FetchSVIDRequest{Csrs: Csrs}
callOptPeer := new(peer.Peer)
resp, err := nodeClient.FetchSVID(context.Background(), req, grpc.Peer(callOptPeer))
if err != nil {
return err
}
if tlsInfo, ok := callOptPeer.AuthInfo.(credentials.TLSInfo); ok {
a.serverCerts = tlsInfo.State.PeerCertificates
}
svidMap := resp.GetSvidUpdate().GetSvids()
// TODO: Fetch the referenced federated bundles and
// set them here
bundles := make(map[string][]byte)
for spiffeID, entry := range registrationEntryMap {
svid, svidInMap := svidMap[spiffeID]
pkey, pkeyInMap := pkeyMap[spiffeID]
if svidInMap && pkeyInMap {
svidCert, err := x509.ParseCertificate(svid.SvidCert)
if err != nil {
return fmt.Errorf("SVID for ID %s could not be parsed: %s", spiffeID, err)
}
entry := cache.CacheEntry{
RegistrationEntry: entry,
SVID: svid,
PrivateKey: pkey,
Bundles: bundles,
Expiry: svidCert.NotAfter,
}
a.Cache.SetEntry(entry)
}
}
newRegistrationMap := make(map[string]*common.RegistrationEntry)
if len(resp.SvidUpdate.RegistrationEntries) != 0 {
for _, entry := range resp.SvidUpdate.RegistrationEntries {
if _, ok := registrationEntryMap[entry.SpiffeId]; ok != true {
newRegistrationMap[entry.SpiffeId] = entry
}
a.FetchSVID(newRegistrationMap, svidMap[entry.SpiffeId].SvidCert, pkeyMap[entry.SpiffeId])
}
}
}
return
}
func (a *Agent) getNodeAPIClientConn(mtls bool, svid []byte, key *ecdsa.PrivateKey) (conn *grpc.ClientConn) {
serverID := a.config.TrustDomain
serverID.Path = "spiffe/cp"
var spiffePeer *spiffe_tls.TLSPeer
var tlsCert []tls.Certificate
var tlsConfig *tls.Config
if !mtls {
spiffePeer = &spiffe_tls.TLSPeer{
SpiffeIDs: []string{serverID.String()},
TrustRoots: a.config.TrustBundle,
}
tlsConfig = spiffePeer.NewTLSConfig(tlsCert)
} else {
certPool := x509.NewCertPool()
for _, cert := range a.serverCerts {
certPool.AddCert(cert)
}
spiffePeer = &spiffe_tls.TLSPeer{
SpiffeIDs: []string{serverID.String()},
TrustRoots: certPool,
}
tlsCert = append(tlsCert, tls.Certificate{Certificate: [][]byte{svid}, PrivateKey: key})
tlsConfig = spiffePeer.NewTLSConfig(tlsCert)
}
dialCreds := grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))
conn, err := grpc.Dial(a.config.ServerAddress.String(), dialCreds)
if err != nil {
return
}
return
}
func (a *Agent) generateCSRForRegistrationEntries(
regEntryMap map[string]*common.RegistrationEntry) (CSRs [][]byte, pkeyMap map[string]*ecdsa.PrivateKey, err error) {
pkeyMap = make(map[string]*ecdsa.PrivateKey)
for id, _ := range regEntryMap {
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
if err != nil {
return nil, nil, err
}
spiffeid, err := url.Parse(id)
if err != nil {
return nil, nil, err
}
csr, err := a.generateCSR(spiffeid, key)
if err != nil {
return nil, nil, err
}
CSRs = append(CSRs, csr)
pkeyMap[id] = key
}
return
}
| 1 | 8,671 | non-blocking: Now that we have a handful of statements which deal with creating a listener, it may make sense to introduce a `createListener` method or something similar | spiffe-spire | go |
@@ -34,6 +34,8 @@ namespace Nethermind.Mev
}
public IBlockProcessor.IBlockTransactionsExecutor Create(ReadOnlyTxProcessingEnv readOnlyTxProcessingEnv) =>
- new MevBlockProductionTransactionsExecutor(readOnlyTxProcessingEnv, _specProvider, _logManager);
+ LastExecutor = new MevBlockProductionTransactionsExecutor(readOnlyTxProcessingEnv, _specProvider, _logManager);
+
+ public MevBlockProductionTransactionsExecutor LastExecutor { get; private set; } = null!;
}
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
using Nethermind.Blockchain.Processing;
using Nethermind.Blockchain.Producers;
using Nethermind.Core.Specs;
using Nethermind.Logging;
namespace Nethermind.Mev
{
public class MevBlockProducerTransactionsExecutorFactory : IBlockTransactionsExecutorFactory
{
private readonly ISpecProvider _specProvider;
private readonly ILogManager _logManager;
public MevBlockProducerTransactionsExecutorFactory(ISpecProvider specProvider, ILogManager logManager)
{
_specProvider = specProvider;
_logManager = logManager;
}
public IBlockProcessor.IBlockTransactionsExecutor Create(ReadOnlyTxProcessingEnv readOnlyTxProcessingEnv) =>
new MevBlockProductionTransactionsExecutor(readOnlyTxProcessingEnv, _specProvider, _logManager);
}
}
| 1 | 26,102 | factory should be stateless if possible, looks like much complexity added | NethermindEth-nethermind | .cs |
@@ -527,6 +527,12 @@ class BigQueryLoadTask(MixinBigQueryBulkComplete, luigi.Task):
""" Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."""
return False
+ @property
+ def project_id(self):
+ """Project ID on which to run the BigQuery Job
+ """
+ return self.output().table.project_id
+
def run(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output) | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Twitter Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections
import logging
import luigi.target
import time
from luigi.contrib import gcp
logger = logging.getLogger('luigi-interface')
try:
from googleapiclient import discovery
from googleapiclient import http
except ImportError:
logger.warning('BigQuery module imported, but google-api-python-client is '
'not installed. Any BigQuery task will fail')
class CreateDisposition:
CREATE_IF_NEEDED = 'CREATE_IF_NEEDED'
CREATE_NEVER = 'CREATE_NEVER'
class WriteDisposition:
WRITE_TRUNCATE = 'WRITE_TRUNCATE'
WRITE_APPEND = 'WRITE_APPEND'
WRITE_EMPTY = 'WRITE_EMPTY'
class QueryMode:
INTERACTIVE = 'INTERACTIVE'
BATCH = 'BATCH'
class SourceFormat:
AVRO = 'AVRO'
CSV = 'CSV'
DATASTORE_BACKUP = 'DATASTORE_BACKUP'
NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
class FieldDelimiter:
"""
The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character.
To use a character in the range 128-255, you must encode the character as UTF8.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary state.
BigQuery also supports the escape sequence "\t" to specify a tab separator.
The default value is a comma (',').
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
"""
COMMA = ',' # Default
TAB = "\t"
PIPE = "|"
class PrintHeader:
TRUE = True
FALSE = False
class DestinationFormat:
AVRO = 'AVRO'
CSV = 'CSV'
NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON'
class Compression:
GZIP = 'GZIP'
NONE = 'NONE'
class Encoding:
"""
[Optional] The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. The default value is UTF-8.
BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties.
"""
UTF_8 = 'UTF-8'
ISO_8859_1 = 'ISO-8859-1'
BQDataset = collections.namedtuple('BQDataset', 'project_id dataset_id location')
class BQTable(collections.namedtuple('BQTable', 'project_id dataset_id table_id location')):
@property
def dataset(self):
return BQDataset(project_id=self.project_id, dataset_id=self.dataset_id, location=self.location)
@property
def uri(self):
return "bq://" + self.project_id + "/" + \
self.dataset.dataset_id + "/" + self.table_id
class BigQueryClient:
"""A client for Google BigQuery.
For details of how authentication and the descriptor work, see the
documentation for the GCS client. The descriptor URL for BigQuery is
https://www.googleapis.com/discovery/v1/apis/bigquery/v2/rest
"""
def __init__(self, oauth_credentials=None, descriptor='', http_=None):
authenticate_kwargs = gcp.get_authenticate_kwargs(oauth_credentials, http_)
if descriptor:
self.client = discovery.build_from_document(descriptor, **authenticate_kwargs)
else:
self.client = discovery.build('bigquery', 'v2', cache_discovery=False, **authenticate_kwargs)
def dataset_exists(self, dataset):
"""Returns whether the given dataset exists.
If regional location is specified for the dataset, that is also checked
to be compatible with the remote dataset, otherwise an exception is thrown.
:param dataset:
:type dataset: BQDataset
"""
try:
response = self.client.datasets().get(projectId=dataset.project_id,
datasetId=dataset.dataset_id).execute()
if dataset.location is not None:
fetched_location = response.get('location')
if dataset.location != fetched_location:
raise Exception('''Dataset already exists with regional location {}. Can't use {}.'''.format(
fetched_location if fetched_location is not None else 'unspecified',
dataset.location))
except http.HttpError as ex:
if ex.resp.status == 404:
return False
raise
return True
def table_exists(self, table):
"""Returns whether the given table exists.
:param table:
:type table: BQTable
"""
if not self.dataset_exists(table.dataset):
return False
try:
self.client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id).execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return False
raise
return True
def make_dataset(self, dataset, raise_if_exists=False, body=None):
"""Creates a new dataset with the default permissions.
:param dataset:
:type dataset: BQDataset
:param raise_if_exists: whether to raise an exception if the dataset already exists.
:raises luigi.target.FileAlreadyExists: if raise_if_exists=True and the dataset exists
"""
if body is None:
body = {}
try:
# Construct a message body in the format required by
# https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.datasets.html#insert
body['datasetReference'] = {
'projectId': dataset.project_id,
'datasetId': dataset.dataset_id
}
if dataset.location is not None:
body['location'] = dataset.location
self.client.datasets().insert(projectId=dataset.project_id, body=body).execute()
except http.HttpError as ex:
if ex.resp.status == 409:
if raise_if_exists:
raise luigi.target.FileAlreadyExists()
else:
raise
def delete_dataset(self, dataset, delete_nonempty=True):
"""Deletes a dataset (and optionally any tables in it), if it exists.
:param dataset:
:type dataset: BQDataset
:param delete_nonempty: if true, will delete any tables before deleting the dataset
"""
if not self.dataset_exists(dataset):
return
self.client.datasets().delete(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
deleteContents=delete_nonempty).execute()
def delete_table(self, table):
"""Deletes a table, if it exists.
:param table:
:type table: BQTable
"""
if not self.table_exists(table):
return
self.client.tables().delete(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id).execute()
def list_datasets(self, project_id):
"""Returns the list of datasets in a given project.
:param project_id:
:type project_id: str
"""
request = self.client.datasets().list(projectId=project_id,
maxResults=1000)
response = request.execute()
while response is not None:
for ds in response.get('datasets', []):
yield ds['datasetReference']['datasetId']
request = self.client.datasets().list_next(request, response)
if request is None:
break
response = request.execute()
def list_tables(self, dataset):
"""Returns the list of tables in a given dataset.
:param dataset:
:type dataset: BQDataset
"""
request = self.client.tables().list(projectId=dataset.project_id,
datasetId=dataset.dataset_id,
maxResults=1000)
response = request.execute()
while response is not None:
for t in response.get('tables', []):
yield t['tableReference']['tableId']
request = self.client.tables().list_next(request, response)
if request is None:
break
response = request.execute()
def get_view(self, table):
"""Returns the SQL query for a view, or None if it doesn't exist or is not a view.
:param table: The table containing the view.
:type table: BQTable
"""
request = self.client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id)
try:
response = request.execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return None
raise
return response['view']['query'] if 'view' in response else None
def update_view(self, table, view):
"""Updates the SQL query for a view.
If the output table exists, it is replaced with the supplied view query. Otherwise a new
table is created with this view.
:param table: The table to contain the view.
:type table: BQTable
:param view: The SQL query for the view.
:type view: str
"""
body = {
'tableReference': {
'projectId': table.project_id,
'datasetId': table.dataset_id,
'tableId': table.table_id
},
'view': {
'query': view
}
}
if self.table_exists(table):
self.client.tables().update(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id,
body=body).execute()
else:
self.client.tables().insert(projectId=table.project_id,
datasetId=table.dataset_id,
body=body).execute()
def run_job(self, project_id, body, dataset=None):
"""Runs a BigQuery "job". See the documentation for the format of body.
.. note::
You probably don't need to use this directly. Use the tasks defined below.
:param dataset:
:type dataset: BQDataset
:return: the job id of the job.
:rtype: str
:raises luigi.contrib.BigQueryExecutionError: if the job fails.
"""
if dataset and not self.dataset_exists(dataset):
self.make_dataset(dataset)
new_job = self.client.jobs().insert(projectId=project_id, body=body).execute()
job_id = new_job['jobReference']['jobId']
logger.info('Started import job %s:%s', project_id, job_id)
while True:
status = self.client.jobs().get(projectId=project_id, jobId=job_id).execute(num_retries=10)
if status['status']['state'] == 'DONE':
if status['status'].get('errorResult'):
raise BigQueryExecutionError(job_id, status['status']['errorResult'])
return job_id
logger.info('Waiting for job %s:%s to complete...', project_id, job_id)
time.sleep(5)
def copy(self,
source_table,
dest_table,
create_disposition=CreateDisposition.CREATE_IF_NEEDED,
write_disposition=WriteDisposition.WRITE_TRUNCATE):
"""Copies (or appends) a table to another table.
:param source_table:
:type source_table: BQTable
:param dest_table:
:type dest_table: BQTable
:param create_disposition: whether to create the table if needed
:type create_disposition: CreateDisposition
:param write_disposition: whether to append/truncate/fail if the table exists
:type write_disposition: WriteDisposition
"""
job = {
"configuration": {
"copy": {
"sourceTable": {
"projectId": source_table.project_id,
"datasetId": source_table.dataset_id,
"tableId": source_table.table_id,
},
"destinationTable": {
"projectId": dest_table.project_id,
"datasetId": dest_table.dataset_id,
"tableId": dest_table.table_id,
},
"createDisposition": create_disposition,
"writeDisposition": write_disposition,
}
}
}
self.run_job(dest_table.project_id, job, dataset=dest_table.dataset)
class BigQueryTarget(luigi.target.Target):
def __init__(self, project_id, dataset_id, table_id, client=None, location=None):
self.table = BQTable(project_id=project_id, dataset_id=dataset_id, table_id=table_id, location=location)
self.client = client or BigQueryClient()
@classmethod
def from_bqtable(cls, table, client=None):
"""A constructor that takes a :py:class:`BQTable`.
:param table:
:type table: BQTable
"""
return cls(table.project_id, table.dataset_id, table.table_id, client=client)
def exists(self):
return self.client.table_exists(self.table)
def __str__(self):
return str(self.table)
class MixinBigQueryBulkComplete:
"""
Allows to efficiently check if a range of BigQueryTargets are complete.
This enables scheduling tasks with luigi range tools.
If you implement a custom Luigi task with a BigQueryTarget output, make sure to also inherit
from this mixin to enable range support.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
# Instantiate the tasks to inspect them
tasks_with_params = [(cls(p), p) for p in parameter_tuples]
if not tasks_with_params:
return
# Grab the set of BigQuery datasets we are interested in
datasets = {t.output().table.dataset for t, p in tasks_with_params}
logger.info('Checking datasets %s for available tables', datasets)
# Query the available tables for all datasets
client = tasks_with_params[0][0].output().client
available_datasets = filter(client.dataset_exists, datasets)
available_tables = {d: set(client.list_tables(d)) for d in available_datasets}
# Return parameter_tuples belonging to available tables
for t, p in tasks_with_params:
table = t.output().table
if table.table_id in available_tables.get(table.dataset, []):
yield p
class BigQueryLoadTask(MixinBigQueryBulkComplete, luigi.Task):
"""Load data into BigQuery from GCS."""
@property
def source_format(self):
"""The source format to use (see :py:class:`SourceFormat`)."""
return SourceFormat.NEWLINE_DELIMITED_JSON
@property
def encoding(self):
"""The encoding of the data that is going to be loaded (see :py:class:`Encoding`)."""
return Encoding.UTF_8
@property
def write_disposition(self):
"""What to do if the table already exists. By default this will fail the job.
See :py:class:`WriteDisposition`"""
return WriteDisposition.WRITE_EMPTY
@property
def schema(self):
"""Schema in the format defined at https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.schema.
If the value is falsy, it is omitted and inferred by BigQuery."""
return []
@property
def max_bad_records(self):
""" The maximum number of bad records that BigQuery can ignore when reading data.
If the number of bad records exceeds this value, an invalid error is returned in the job result."""
return 0
@property
def field_delimiter(self):
"""The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character."""
return FieldDelimiter.COMMA
def source_uris(self):
"""The fully-qualified URIs that point to your data in Google Cloud Storage.
Each URI can contain one '*' wildcard character and it must come after the 'bucket' name."""
return [x.path for x in luigi.task.flatten(self.input())]
@property
def skip_leading_rows(self):
"""The number of rows at the top of a CSV file that BigQuery will skip when loading the data.
The default value is 0. This property is useful if you have header rows in the file that should be skipped."""
return 0
@property
def allow_jagged_rows(self):
"""Accept rows that are missing trailing optional columns. The missing values are treated as nulls.
If false, records with missing trailing columns are treated as bad records, and if there are too many bad records,
an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats."""
return False
@property
def ignore_unknown_values(self):
"""Indicates if BigQuery should allow extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns are treated as bad records,
and if there are too many bad records, an invalid error is returned in the job result. The default value is false.
The sourceFormat property determines what BigQuery treats as an extra value:
CSV: Trailing columns JSON: Named values that don't match any column names"""
return False
@property
def allow_quoted_new_lines(self):
""" Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false."""
return False
def run(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output)
bq_client = output.client
source_uris = self.source_uris()
assert all(x.startswith('gs://') for x in source_uris)
job = {
'configuration': {
'load': {
'destinationTable': {
'projectId': output.table.project_id,
'datasetId': output.table.dataset_id,
'tableId': output.table.table_id,
},
'encoding': self.encoding,
'sourceFormat': self.source_format,
'writeDisposition': self.write_disposition,
'sourceUris': source_uris,
'maxBadRecords': self.max_bad_records,
'ignoreUnknownValues': self.ignore_unknown_values
}
}
}
if self.source_format == SourceFormat.CSV:
job['configuration']['load']['fieldDelimiter'] = self.field_delimiter
job['configuration']['load']['skipLeadingRows'] = self.skip_leading_rows
job['configuration']['load']['allowJaggedRows'] = self.allow_jagged_rows
job['configuration']['load']['allowQuotedNewlines'] = self.allow_quoted_new_lines
if self.schema:
job['configuration']['load']['schema'] = {'fields': self.schema}
else:
job['configuration']['load']['autodetect'] = True
bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset)
class BigQueryRunQueryTask(MixinBigQueryBulkComplete, luigi.Task):
@property
def write_disposition(self):
"""What to do if the table already exists. By default this will fail the job.
See :py:class:`WriteDisposition`"""
return WriteDisposition.WRITE_TRUNCATE
@property
def create_disposition(self):
"""Whether to create the table or not. See :py:class:`CreateDisposition`"""
return CreateDisposition.CREATE_IF_NEEDED
@property
def flatten_results(self):
"""Flattens all nested and repeated fields in the query results.
allowLargeResults must be true if this is set to False."""
return True
@property
def query(self):
"""The query, in text form."""
raise NotImplementedError()
@property
def query_mode(self):
"""The query mode. See :py:class:`QueryMode`."""
return QueryMode.INTERACTIVE
@property
def udf_resource_uris(self):
"""Iterator of code resource to load from a Google Cloud Storage URI (gs://bucket/path).
"""
return []
@property
def use_legacy_sql(self):
"""Whether to use legacy SQL
"""
return True
def run(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output)
query = self.query
assert query, 'No query was provided'
bq_client = output.client
logger.info('Launching Query')
logger.info('Query destination: %s (%s)', output, self.write_disposition)
logger.info('Query SQL: %s', query)
job = {
'configuration': {
'query': {
'query': query,
'priority': self.query_mode,
'destinationTable': {
'projectId': output.table.project_id,
'datasetId': output.table.dataset_id,
'tableId': output.table.table_id,
},
'allowLargeResults': True,
'createDisposition': self.create_disposition,
'writeDisposition': self.write_disposition,
'flattenResults': self.flatten_results,
'userDefinedFunctionResources': [{"resourceUri": v} for v in self.udf_resource_uris],
'useLegacySql': self.use_legacy_sql,
}
}
}
bq_client.run_job(output.table.project_id, job, dataset=output.table.dataset)
class BigQueryCreateViewTask(luigi.Task):
"""
Creates (or updates) a view in BigQuery.
The output of this task needs to be a BigQueryTarget.
Instances of this class should specify the view SQL in the view property.
If a view already exist in BigQuery at output(), it will be updated.
"""
@property
def view(self):
"""The SQL query for the view, in text form."""
raise NotImplementedError()
def complete(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output)
if not output.exists():
return False
existing_view = output.client.get_view(output.table)
return existing_view == self.view
def run(self):
output = self.output()
assert isinstance(output, BigQueryTarget), 'Output must be a BigQueryTarget, not %s' % (output)
view = self.view
assert view, 'No view was provided'
logger.info('Create view')
logger.info('Destination: %s', output)
logger.info('View SQL: %s', view)
output.client.update_view(output.table, view)
class ExternalBigQueryTask(MixinBigQueryBulkComplete, luigi.ExternalTask):
"""
An external task for a BigQuery target.
"""
pass
class BigQueryExtractTask(luigi.Task):
"""
Extracts (unloads) a table from BigQuery to GCS.
This tasks requires the input to be exactly one BigQueryTarget while the
output should be one or more GCSTargets from luigi.contrib.gcs depending on
the use of destinationUris property.
"""
@property
def destination_uris(self):
"""
The fully-qualified URIs that point to your data in Google Cloud
Storage. Each URI can contain one '*' wildcard character and it must
come after the 'bucket' name.
Wildcarded destinationUris in GCSQueryTarget might not be resolved
correctly and result in incomplete data. If a GCSQueryTarget is used to
pass wildcarded destinationUris be sure to overwrite this property to
suppress the warning.
"""
return [x.path for x in luigi.task.flatten(self.output())]
@property
def print_header(self):
"""Whether to print the header or not."""
return PrintHeader.TRUE
@property
def field_delimiter(self):
"""
The separator for fields in a CSV file. The separator can be any
ISO-8859-1 single-byte character.
"""
return FieldDelimiter.COMMA
@property
def destination_format(self):
"""
The destination format to use (see :py:class:`DestinationFormat`).
"""
return DestinationFormat.CSV
@property
def compression(self):
"""Whether to use compression."""
return Compression.NONE
def run(self):
input = luigi.task.flatten(self.input())[0]
assert (
isinstance(input, BigQueryTarget) or
(len(input) == 1 and isinstance(input[0], BigQueryTarget))), \
'Input must be exactly one BigQueryTarget, not %s' % (input)
bq_client = input.client
destination_uris = self.destination_uris
assert all(x.startswith('gs://') for x in destination_uris)
logger.info('Launching Extract Job')
logger.info('Extract source: %s', input)
logger.info('Extract destination: %s', destination_uris)
job = {
'configuration': {
'extract': {
'sourceTable': {
'projectId': input.table.project_id,
'datasetId': input.table.dataset_id,
'tableId': input.table.table_id
},
'destinationUris': destination_uris,
'destinationFormat': self.destination_format,
'compression': self.compression
}
}
}
if self.destination_format == 'CSV':
# "Only exports to CSV may specify a field delimiter."
job['configuration']['extract']['printHeader'] = self.print_header
job['configuration']['extract']['fieldDelimiter'] = \
self.field_delimiter
bq_client.run_job(
input.table.project_id,
job,
dataset=input.table.dataset)
# the original inconsistently capitalized aliases, for backwards compatibility
BigqueryClient = BigQueryClient
BigqueryTarget = BigQueryTarget
MixinBigqueryBulkComplete = MixinBigQueryBulkComplete
BigqueryLoadTask = BigQueryLoadTask
BigqueryRunQueryTask = BigQueryRunQueryTask
BigqueryCreateViewTask = BigQueryCreateViewTask
ExternalBigqueryTask = ExternalBigQueryTask
class BigQueryExecutionError(Exception):
def __init__(self, job_id, error_message) -> None:
"""
:param job_id: BigQuery Job ID
:type job_id: str
:param error_message: status['status']['errorResult'] for the failed job
:type error_message: str
"""
super().__init__('BigQuery job {} failed: {}'.format(job_id, error_message))
self.error_message = error_message
self.job_id = job_id
| 1 | 19,970 | Could you please add short description for the default value as well? | spotify-luigi | py |
@@ -744,7 +744,7 @@ func (bc *blockchain) commitBlock(blk *Block) error {
if bc.sf != nil {
ExecuteContracts(blk, bc)
if err := bc.sf.CommitStateChanges(blk.Height(), blk.Transfers, blk.Votes, blk.Executions); err != nil {
- return err
+ logger.Fatal().Err(err).Msgf("Failed to commit state changes on height %d", blk.Height())
}
}
// write smart contract receipt into DB | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"math/big"
"sync"
"github.com/facebookgo/clock"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/blockchain/action"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/state"
)
// Blockchain represents the blockchain data structure and hosts the APIs to access it
type Blockchain interface {
lifecycle.StartStopper
// Balance returns balance of an account
Balance(addr string) (*big.Int, error)
// Nonce returns the nonce if the account exists
Nonce(addr string) (uint64, error)
// CreateState adds a new State with initial balance to the factory
CreateState(addr string, init uint64) (*state.State, error)
// CommitStateChanges updates a State from the given actions
CommitStateChanges(chainHeight uint64, tsf []*action.Transfer, vote []*action.Vote, executions []*action.Execution) error
// Candidates returns the candidate list
Candidates() (uint64, []*state.Candidate)
// CandidatesByHeight returns the candidate list by a given height
CandidatesByHeight(height uint64) ([]*state.Candidate, error)
// For exposing blockchain states
// GetHeightByHash returns Block's height by hash
GetHeightByHash(h hash.Hash32B) (uint64, error)
// GetHashByHeight returns Block's hash by height
GetHashByHeight(height uint64) (hash.Hash32B, error)
// GetBlockByHeight returns Block by height
GetBlockByHeight(height uint64) (*Block, error)
// GetBlockByHash returns Block by hash
GetBlockByHash(h hash.Hash32B) (*Block, error)
// GetTotalTransfers returns the total number of transfers
GetTotalTransfers() (uint64, error)
// GetTotalVotes returns the total number of votes
GetTotalVotes() (uint64, error)
// GetTotalExecutions returns the total number of executions
GetTotalExecutions() (uint64, error)
// GetTransfersFromAddress returns transaction from address
GetTransfersFromAddress(address string) ([]hash.Hash32B, error)
// GetTransfersToAddress returns transaction to address
GetTransfersToAddress(address string) ([]hash.Hash32B, error)
// GetTransfersByTransferHash returns transfer by transfer hash
GetTransferByTransferHash(h hash.Hash32B) (*action.Transfer, error)
// GetBlockHashByTransferHash returns Block hash by transfer hash
GetBlockHashByTransferHash(h hash.Hash32B) (hash.Hash32B, error)
// GetVoteFromAddress returns vote from address
GetVotesFromAddress(address string) ([]hash.Hash32B, error)
// GetVoteToAddress returns vote to address
GetVotesToAddress(address string) ([]hash.Hash32B, error)
// GetVotesByVoteHash returns vote by vote hash
GetVoteByVoteHash(h hash.Hash32B) (*action.Vote, error)
// GetBlockHashByVoteHash returns Block hash by vote hash
GetBlockHashByVoteHash(h hash.Hash32B) (hash.Hash32B, error)
// GetExecutionsFromAddress returns executions from address
GetExecutionsFromAddress(address string) ([]hash.Hash32B, error)
// GetExecutionsToAddress returns executions to address
GetExecutionsToAddress(address string) ([]hash.Hash32B, error)
// GetExecutionByExecutionHash returns execution by execution hash
GetExecutionByExecutionHash(h hash.Hash32B) (*action.Execution, error)
// GetBlockHashByExecutionHash returns Block hash by execution hash
GetBlockHashByExecutionHash(h hash.Hash32B) (hash.Hash32B, error)
// GetReceiptByExecutionHash returns the receipt by execution hash
GetReceiptByExecutionHash(h hash.Hash32B) (*Receipt, error)
// GetFactory returns the State Factory
GetFactory() state.Factory
// TipHash returns tip block's hash
TipHash() (hash.Hash32B, error)
// TipHeight returns tip block's height
TipHeight() (uint64, error)
// StateByAddr returns state of a given address
StateByAddr(address string) (*state.State, error)
// For block operations
// MintNewBlock creates a new block with given actions
// Note: the coinbase transfer will be added to the given transfers when minting a new block
MintNewBlock(tsf []*action.Transfer, vote []*action.Vote, executions []*action.Execution, address *iotxaddress.Address, data string) (*Block, error)
// TODO: Merge the MintNewDKGBlock into MintNewBlock
// MintNewDKGBlock creates a new block with given actions and dkg keys
MintNewDKGBlock(tsf []*action.Transfer, vote []*action.Vote, executions []*action.Execution,
producer *iotxaddress.Address, dkgAddress *iotxaddress.DKGAddress, seed []byte, data string) (*Block, error)
// MintDummyNewBlock creates a new dummy block, used for unreached consensus
MintNewDummyBlock() *Block
// CommitBlock validates and appends a block to the chain
CommitBlock(blk *Block) error
// ValidateBlock validates a new block before adding it to the blockchain
ValidateBlock(blk *Block) error
// For action operations
// Validator returns the current validator object
Validator() Validator
// SetValidator sets the current validator object
SetValidator(val Validator)
// For smart contract operations
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
ExecuteContractRead(*action.Execution) ([]byte, error)
}
// blockchain implements the Blockchain interface
type blockchain struct {
mu sync.RWMutex // mutex to protect utk, tipHeight and tipHash
dao *blockDAO
config *config.Config
genesis *Genesis
chainID uint32
tipHeight uint64
tipHash hash.Hash32B
validator Validator
lifecycle lifecycle.Lifecycle
clk clock.Clock
// used by account-based model
sf state.Factory
}
// Option sets blockchain construction parameter
type Option func(*blockchain, *config.Config) error
// DefaultStateFactoryOption sets blockchain's sf from config
func DefaultStateFactoryOption() Option {
return func(bc *blockchain, cfg *config.Config) error {
sf, err := state.NewFactory(cfg, state.DefaultTrieOption())
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
bc.sf = sf
return nil
}
}
// PrecreatedStateFactoryOption sets blockchain's state.Factory to sf
func PrecreatedStateFactoryOption(sf state.Factory) Option {
return func(bc *blockchain, conf *config.Config) error {
bc.sf = sf
return nil
}
}
// InMemStateFactoryOption sets blockchain's state.Factory as in memory sf
func InMemStateFactoryOption() Option {
return func(bc *blockchain, cfg *config.Config) error {
sf, err := state.NewFactory(cfg, state.InMemTrieOption())
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
bc.sf = sf
return nil
}
}
// PrecreatedDaoOption sets blockchain's dao
func PrecreatedDaoOption(dao *blockDAO) Option {
return func(bc *blockchain, conf *config.Config) error {
bc.dao = dao
return nil
}
}
// BoltDBDaoOption sets blockchain's dao with BoltDB from config.Chain.ChainDBPath
func BoltDBDaoOption() Option {
return func(bc *blockchain, cfg *config.Config) error {
bc.dao = newBlockDAO(cfg, db.NewBoltDB(cfg.Chain.ChainDBPath, &cfg.DB))
return nil
}
}
// InMemDaoOption sets blockchain's dao with MemKVStore
func InMemDaoOption() Option {
return func(bc *blockchain, cfg *config.Config) error {
bc.dao = newBlockDAO(cfg, db.NewMemKVStore())
return nil
}
}
// ClockOption overrides the default clock
func ClockOption(clk clock.Clock) Option {
return func(bc *blockchain, conf *config.Config) error {
bc.clk = clk
return nil
}
}
// NewBlockchain creates a new blockchain and DB instance
func NewBlockchain(cfg *config.Config, opts ...Option) Blockchain {
// create the Blockchain
chain := &blockchain{
config: cfg,
genesis: Gen,
clk: clock.New(),
}
for _, opt := range opts {
if err := opt(chain, cfg); err != nil {
logger.Error().Err(err).Msgf("Failed to create blockchain option %s", opt)
return nil
}
}
chain.initValidator()
if chain.dao != nil {
chain.lifecycle.Add(chain.dao)
}
if chain.sf != nil {
chain.lifecycle.Add(chain.sf)
}
if err := chain.Start(context.Background()); err != nil {
logger.Error().Err(err).Msg("Failed to start blockchain")
return nil
}
height, err := chain.TipHeight()
if err != nil {
logger.Error().Err(err).Msg("Failed to get blockchain height")
return nil
}
if height > 0 {
factoryHeight, err := chain.sf.Height()
if err != nil {
logger.Error().Err(err).Msg("Failed to get factory's height")
return nil
}
logger.Info().
Uint64("blockchain height", height).Uint64("factory height", factoryHeight).
Msg("Restarting blockchain")
return chain
}
genesis := NewGenesisBlock(cfg)
if genesis == nil {
logger.Error().Msg("Cannot create genesis block.")
return nil
}
// Genesis block has height 0
if genesis.Header.height != 0 {
logger.Error().
Uint64("Genesis block has height", genesis.Height()).
Msg("Expecting 0")
return nil
}
// add producer into Trie
if chain.sf != nil {
if _, err := chain.sf.LoadOrCreateState(Gen.CreatorAddr, Gen.TotalSupply); err != nil {
logger.Error().Err(err).Msg("Failed to add Creator into StateFactory")
return nil
}
}
// add Genesis block as very first block
if err := chain.CommitBlock(genesis); err != nil {
logger.Error().Err(err).Msg("Failed to commit Genesis block")
return nil
}
return chain
}
func (bc *blockchain) initValidator() { bc.validator = &validator{sf: bc.sf} }
// Start starts the blockchain
func (bc *blockchain) Start(ctx context.Context) (err error) {
if err = bc.lifecycle.OnStart(ctx); err != nil {
return err
}
// get blockchain tip height
bc.mu.Lock()
defer bc.mu.Unlock()
if bc.tipHeight, err = bc.dao.getBlockchainHeight(); err != nil {
return err
}
if bc.tipHeight == 0 {
return nil
}
// get blockchain tip hash
if bc.tipHash, err = bc.dao.getBlockHash(bc.tipHeight); err != nil {
return err
}
// populate state factory
if bc.sf == nil {
return errors.New("statefactory cannot be nil")
}
var startHeight uint64
if factoryHeight, err := bc.sf.Height(); err == nil {
if factoryHeight > bc.tipHeight {
return errors.New("factory is higher than blockchain")
}
startHeight = factoryHeight + 1
}
// If restarting factory from fresh db, first create creator's state
if startHeight == 0 {
if _, err := bc.sf.LoadOrCreateState(Gen.CreatorAddr, Gen.TotalSupply); err != nil {
return err
}
}
for i := startHeight; i <= bc.tipHeight; i++ {
blk, err := bc.GetBlockByHeight(i)
if err != nil {
return err
}
if err := bc.sf.CommitStateChanges(blk.Height(), blk.Transfers, blk.Votes, blk.Executions); err != nil {
return err
}
}
return nil
}
// Stop stops the blockchain.
func (bc *blockchain) Stop(ctx context.Context) error { return bc.lifecycle.OnStop(ctx) }
// Balance returns balance of address
func (bc *blockchain) Balance(addr string) (*big.Int, error) {
return bc.sf.Balance(addr)
}
// Nonce returns the nonce if the account exists
func (bc *blockchain) Nonce(addr string) (uint64, error) {
return bc.sf.Nonce(addr)
}
// CreateState adds a new State with initial balance to the factory
func (bc *blockchain) CreateState(addr string, init uint64) (*state.State, error) {
return bc.sf.LoadOrCreateState(addr, init)
}
// CommitStateChanges updates a State from the given actions
func (bc *blockchain) CommitStateChanges(blockHeight uint64, tsf []*action.Transfer, vote []*action.Vote, executions []*action.Execution) error {
return bc.sf.CommitStateChanges(blockHeight, tsf, vote, executions)
}
// Candidates returns the candidate list
func (bc *blockchain) Candidates() (uint64, []*state.Candidate) {
return bc.sf.Candidates()
}
// CandidatesByHeight returns the candidate list by a given height
func (bc *blockchain) CandidatesByHeight(height uint64) ([]*state.Candidate, error) {
return bc.sf.CandidatesByHeight(height)
}
// GetHeightByHash returns block's height by hash
func (bc *blockchain) GetHeightByHash(h hash.Hash32B) (uint64, error) {
return bc.dao.getBlockHeight(h)
}
// GetHashByHeight returns block's hash by height
func (bc *blockchain) GetHashByHeight(height uint64) (hash.Hash32B, error) {
return bc.dao.getBlockHash(height)
}
// GetBlockByHeight returns block from the blockchain hash by height
func (bc *blockchain) GetBlockByHeight(height uint64) (*Block, error) {
hash, err := bc.GetHashByHeight(height)
if err != nil {
return nil, err
}
return bc.GetBlockByHash(hash)
}
// GetBlockByHash returns block from the blockchain hash by hash
func (bc *blockchain) GetBlockByHash(h hash.Hash32B) (*Block, error) {
return bc.dao.getBlock(h)
}
// GetTotalTransfers returns the total number of transfers
func (bc *blockchain) GetTotalTransfers() (uint64, error) {
if !bc.config.Explorer.Enabled {
return 0, errors.New("explorer not enabled")
}
return bc.dao.getTotalTransfers()
}
// GetTotalVotes returns the total number of votes
func (bc *blockchain) GetTotalVotes() (uint64, error) {
if !bc.config.Explorer.Enabled {
return 0, errors.New("explorer not enabled")
}
return bc.dao.getTotalVotes()
}
// GetTotalExecutions returns the total number of executions
func (bc *blockchain) GetTotalExecutions() (uint64, error) {
if !bc.config.Explorer.Enabled {
return 0, errors.New("explorer not enabled")
}
return bc.dao.getTotalExecutions()
}
// GetTransfersFromAddress returns transfers from address
func (bc *blockchain) GetTransfersFromAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getTransfersBySenderAddress(address)
}
// GetTransfersToAddress returns transfers to address
func (bc *blockchain) GetTransfersToAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getTransfersByRecipientAddress(address)
}
// GetTransferByTransferHash returns transfer by transfer hash
func (bc *blockchain) GetTransferByTransferHash(h hash.Hash32B) (*action.Transfer, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
blkHash, err := bc.dao.getBlockHashByTransferHash(h)
if err != nil {
return nil, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return nil, err
}
for _, transfer := range blk.Transfers {
if transfer.Hash() == h {
return transfer, nil
}
}
return nil, errors.Errorf("block %x does not have transfer %x", blkHash, h)
}
// GetBlockHashByTxHash returns Block hash by transfer hash
func (bc *blockchain) GetBlockHashByTransferHash(h hash.Hash32B) (hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return hash.ZeroHash32B, errors.New("explorer not enabled")
}
return bc.dao.getBlockHashByTransferHash(h)
}
// GetVoteFromAddress returns votes from address
func (bc *blockchain) GetVotesFromAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getVotesBySenderAddress(address)
}
// GetVoteToAddress returns votes to address
func (bc *blockchain) GetVotesToAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getVotesByRecipientAddress(address)
}
// GetVotesByVoteHash returns vote by vote hash
func (bc *blockchain) GetVoteByVoteHash(h hash.Hash32B) (*action.Vote, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
blkHash, err := bc.dao.getBlockHashByVoteHash(h)
if err != nil {
return nil, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return nil, err
}
for _, vote := range blk.Votes {
if vote.Hash() == h {
return vote, nil
}
}
return nil, errors.Errorf("block %x does not have vote %x", blkHash, h)
}
// GetBlockHashByVoteHash returns Block hash by vote hash
func (bc *blockchain) GetBlockHashByVoteHash(h hash.Hash32B) (hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return hash.ZeroHash32B, errors.New("explorer not enabled")
}
return bc.dao.getBlockHashByVoteHash(h)
}
// GetExecutionsFromAddress returns executions from address
func (bc *blockchain) GetExecutionsFromAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getExecutionsByExecutorAddress(address)
}
// GetExecutionsToAddress returns executions to address
func (bc *blockchain) GetExecutionsToAddress(address string) ([]hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getExecutionsByContractAddress(address)
}
// GetExecutionByExecutionHash returns execution by execution hash
func (bc *blockchain) GetExecutionByExecutionHash(h hash.Hash32B) (*action.Execution, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
blkHash, err := bc.dao.getBlockHashByExecutionHash(h)
if err != nil {
return nil, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return nil, err
}
for _, execution := range blk.Executions {
if execution.Hash() == h {
return execution, nil
}
}
return nil, errors.Errorf("block %x does not have execution %x", blkHash, h)
}
// GetBlockHashByExecutionHash returns Block hash by execution hash
func (bc *blockchain) GetBlockHashByExecutionHash(h hash.Hash32B) (hash.Hash32B, error) {
if !bc.config.Explorer.Enabled {
return hash.ZeroHash32B, errors.New("explorer not enabled")
}
return bc.dao.getBlockHashByExecutionHash(h)
}
// GetReceiptByExecutionHash returns the receipt by execution hash
func (bc *blockchain) GetReceiptByExecutionHash(h hash.Hash32B) (*Receipt, error) {
if !bc.config.Explorer.Enabled {
return nil, errors.New("explorer not enabled")
}
return bc.dao.getReceiptByExecutionHash(h)
}
// GetFactory returns the State Factory
func (bc *blockchain) GetFactory() state.Factory {
return bc.sf
}
// TipHash returns tip block's hash
func (bc *blockchain) TipHash() (hash.Hash32B, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.tipHash, nil
}
// TipHeight returns tip block's height
func (bc *blockchain) TipHeight() (uint64, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.tipHeight, nil
}
// ValidateBlock validates a new block before adding it to the blockchain
func (bc *blockchain) ValidateBlock(blk *Block) error {
bc.mu.RLock()
defer bc.mu.RUnlock()
if bc.validator == nil {
panic("no block validator")
}
// replacement logic, used to replace a fake old dummy block
if blk.Height() != 0 && blk.Height() <= bc.tipHeight {
oldDummyBlock, err := bc.GetBlockByHeight(blk.Height())
if err != nil {
return errors.Wrapf(err, "The height of the new block is invalid")
}
if !oldDummyBlock.IsDummyBlock() {
return errors.New("The replaced block is not a dummy block")
}
lastBlock, err := bc.GetBlockByHeight(blk.Height() - 1)
if err != nil {
return errors.Wrapf(err, "Failed to get the last block when replacing the dummy block")
}
return bc.validator.Validate(blk, lastBlock.Height(), lastBlock.HashBlock())
}
return bc.validator.Validate(blk, bc.tipHeight, bc.tipHash)
}
// MintNewBlock creates a new block with given actions
// Note: the coinbase transfer will be added to the given transfers
// when minting a new block
func (bc *blockchain) MintNewBlock(tsf []*action.Transfer, vote []*action.Vote, executions []*action.Execution,
producer *iotxaddress.Address, data string) (*Block, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
tsf = append(tsf, action.NewCoinBaseTransfer(big.NewInt(int64(bc.genesis.BlockReward)), producer.RawAddress))
blk := NewBlock(bc.chainID, bc.tipHeight+1, bc.tipHash, bc.clk, tsf, vote, executions)
if producer.PrivateKey == keypair.ZeroPrivateKey {
logger.Warn().Msg("Unsigned block...")
return blk, nil
}
blk.Header.DKGID = []byte{}
blk.Header.DKGPubkey = []byte{}
blk.Header.DKGBlockSig = []byte{}
blk.Header.Pubkey = producer.PublicKey
blkHash := blk.HashBlock()
blk.Header.blockSig = crypto.EC283.Sign(producer.PrivateKey, blkHash[:])
return blk, nil
}
// MintNewDKGBlock creates a new block with given actions and dkg keys
// Note: the coinbase transfer will be added to the given transfers
// when minting a new block
func (bc *blockchain) MintNewDKGBlock(tsf []*action.Transfer, vote []*action.Vote, executions []*action.Execution,
producer *iotxaddress.Address, dkgAddress *iotxaddress.DKGAddress, seed []byte, data string) (*Block, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
tsf = append(tsf, action.NewCoinBaseTransfer(big.NewInt(int64(bc.genesis.BlockReward)), producer.RawAddress))
blk := NewBlock(bc.chainID, bc.tipHeight+1, bc.tipHash, bc.clk, tsf, vote, executions)
if producer.PrivateKey == keypair.ZeroPrivateKey {
logger.Warn().Msg("Unsigned block...")
return blk, nil
}
blk.Header.DKGID = []byte{}
blk.Header.DKGPubkey = []byte{}
blk.Header.DKGBlockSig = []byte{}
if len(dkgAddress.PublicKey) > 0 && len(dkgAddress.PrivateKey) > 0 && len(dkgAddress.ID) > 0 {
blk.Header.DKGID = dkgAddress.ID
blk.Header.DKGPubkey = dkgAddress.PublicKey
var err error
if _, blk.Header.DKGBlockSig, err = crypto.BLS.SignShare(dkgAddress.PrivateKey, seed); err != nil {
return nil, errors.Wrap(err, "Failed to do DKG sign")
}
}
blk.Header.Pubkey = producer.PublicKey
blkHash := blk.HashBlock()
blk.Header.blockSig = crypto.EC283.Sign(producer.PrivateKey, blkHash[:])
return blk, nil
}
// MintDummyNewBlock creates a new dummy block, used for unreached consensus
func (bc *blockchain) MintNewDummyBlock() *Block {
bc.mu.RLock()
defer bc.mu.RUnlock()
blk := NewBlock(bc.chainID, bc.tipHeight+1, bc.tipHash, bc.clk, nil, nil, nil)
blk.Header.Pubkey = keypair.ZeroPublicKey
blk.Header.blockSig = []byte{}
return blk
}
// CommitBlock validates and appends a block to the chain
func (bc *blockchain) CommitBlock(blk *Block) error {
if err := bc.ValidateBlock(blk); err != nil {
return err
}
return bc.commitBlock(blk)
}
// StateByAddr returns the state of an address
func (bc *blockchain) StateByAddr(address string) (*state.State, error) {
if bc.sf != nil {
s, err := bc.sf.State(address)
if err != nil {
logger.Warn().Err(err).Str("Address", address)
return nil, errors.New("account does not exist")
}
return s, nil
}
return nil, errors.New("state factory is nil")
}
// SetValidator sets the current validator object
func (bc *blockchain) SetValidator(val Validator) {
bc.validator = val
}
// Validator gets the current validator object
func (bc *blockchain) Validator() Validator {
return bc.validator
}
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
func (bc *blockchain) ExecuteContractRead(ex *action.Execution) ([]byte, error) {
// use latest block as carrier to run the offline execution
// the block itself is not used
h, _ := bc.TipHeight()
blk, err := bc.GetBlockByHeight(h)
if err != nil {
return nil, errors.Wrap(err, "failed to get block in ExecuteContractRead")
}
blk.Executions = nil
blk.Executions = []*action.Execution{ex}
blk.receipts = nil
ExecuteContracts(blk, bc)
// pull the results from receipt
exHash := ex.Hash()
receipt, ok := blk.receipts[exHash]
if !ok {
return nil, errors.Wrap(err, "failed to get receipt in ExecuteContractRead")
}
return receipt.ReturnValue, nil
}
//======================================
// private functions
//=====================================
// commitBlock commits a block to the chain
func (bc *blockchain) commitBlock(blk *Block) error {
// write block into DB
if err := bc.dao.putBlock(blk); err != nil {
return err
}
// update tip hash and height
bc.mu.Lock()
defer bc.mu.Unlock()
bc.tipHeight = blk.Header.height
bc.tipHash = blk.HashBlock()
// update state factory
if bc.sf != nil {
ExecuteContracts(blk, bc)
if err := bc.sf.CommitStateChanges(blk.Height(), blk.Transfers, blk.Votes, blk.Executions); err != nil {
return err
}
}
// write smart contract receipt into DB
if err := bc.dao.putReceipts(blk); err != nil {
return err
}
logger.Info().Uint64("height", blk.Header.height).Msg("commit a block")
return nil
}
| 1 | 11,854 | Leave a TODO message to say we will fix the log level in the future, once committing the block and the state become a transaction | iotexproject-iotex-core | go |
@@ -107,6 +107,18 @@ public class TableProperties {
public static final String DELETE_PARQUET_COMPRESSION_LEVEL = "write.delete.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
+ public static final String PARQUET_ROW_GROUP_CHECK_MIN_RECORD_COUNT =
+ "write.parquet.row-group-check-min-record-count";
+ public static final String DELETE_PARQUET_ROW_GROUP_CHECK_MIN_RECORD_COUNT =
+ "write.delete.parquet.row-group-check-min-record-count";
+ public static final String PARQUET_ROW_GROUP_CHECK_MIN_RECORD_COUNT_DEFAULT = "100";
+
+ public static final String PARQUET_ROW_GROUP_CHECK_MAX_RECORD_COUNT =
+ "write.parquet.row-group-check-max-record-count";
+ public static final String DELETE_PARQUET_ROW_GROUP_CHECK_MAX_RECORD_COUNT =
+ "write.delete.parquet.row-group-check-max-record-count";
+ public static final String PARQUET_ROW_GROUP_CHECK_MAX_RECORD_COUNT_DEFAULT = "10000";
+
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String DELETE_AVRO_COMPRESSION = "write.delete.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip"; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.Set;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet;
public class TableProperties {
private TableProperties() {
}
/**
* Reserved table property for table format version.
* <p>
* Iceberg will default a new table's format version to the latest stable and recommended version.
* This reserved property keyword allows users to override the Iceberg format version of the table metadata.
* <p>
* If this table property exists when creating a table, the table will use the specified format version.
* If a table updates this property, it will try to upgrade to the specified format version.
* <p>
* Note: incomplete or unstable versions cannot be selected using this property.
*/
public static final String FORMAT_VERSION = "format-version";
/**
* Reserved Iceberg table properties list.
* <p>
* Reserved table properties are only used to control behaviors when creating or updating a table.
* The value of these properties are not persisted as a part of the table metadata.
*/
public static final Set<String> RESERVED_PROPERTIES = ImmutableSet.of(
FORMAT_VERSION
);
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60 * 1000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 30 * 60 * 1000; // 30 minutes
public static final String COMMIT_NUM_STATUS_CHECKS = "commit.status-check.num-retries";
public static final int COMMIT_NUM_STATUS_CHECKS_DEFAULT = 3;
public static final String COMMIT_STATUS_CHECKS_MIN_WAIT_MS = "commit.status-check.min-wait-ms";
public static final long COMMIT_STATUS_CHECKS_MIN_WAIT_MS_DEFAULT = 1000; // 1 second
public static final String COMMIT_STATUS_CHECKS_MAX_WAIT_MS = "commit.status-check.max-wait-ms";
public static final long COMMIT_STATUS_CHECKS_MAX_WAIT_MS_DEFAULT = 60 * 1000; // 1 minute
public static final String COMMIT_STATUS_CHECKS_TOTAL_WAIT_MS = "commit.status-check.total-timeout-ms";
public static final long COMMIT_STATUS_CHECKS_TOTAL_WAIT_MS_DEFAULT = 30 * 60 * 1000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8 * 1024 * 1024; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String MANIFEST_MERGE_ENABLED = "commit.manifest-merge.enabled";
public static final boolean MANIFEST_MERGE_ENABLED_DEFAULT = true;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DELETE_DEFAULT_FILE_FORMAT = "write.delete.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String DELETE_PARQUET_ROW_GROUP_SIZE_BYTES = "write.delete.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String DELETE_PARQUET_PAGE_SIZE_BYTES = "write.delete.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String DELETE_PARQUET_DICT_SIZE_BYTES = "write.delete.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String DELETE_PARQUET_COMPRESSION = "write.delete.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String PARQUET_COMPRESSION_LEVEL = "write.parquet.compression-level";
public static final String DELETE_PARQUET_COMPRESSION_LEVEL = "write.delete.parquet.compression-level";
public static final String PARQUET_COMPRESSION_LEVEL_DEFAULT = null;
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String DELETE_AVRO_COMPRESSION = "write.delete.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 128 * 1024 * 1024; // 128 MB
public static final String METADATA_SPLIT_SIZE = "read.split.metadata-target-size";
public static final long METADATA_SPLIT_SIZE_DEFAULT = 32 * 1024 * 1024; // 32 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String SPLIT_OPEN_FILE_COST = "read.split.open-file-cost";
public static final long SPLIT_OPEN_FILE_COST_DEFAULT = 4 * 1024 * 1024; // 4MB
public static final String PARQUET_VECTORIZATION_ENABLED = "read.parquet.vectorization.enabled";
public static final boolean PARQUET_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String PARQUET_BATCH_SIZE = "read.parquet.vectorization.batch-size";
public static final int PARQUET_BATCH_SIZE_DEFAULT = 5000;
public static final String ORC_VECTORIZATION_ENABLED = "read.orc.vectorization.enabled";
public static final boolean ORC_VECTORIZATION_ENABLED_DEFAULT = false;
public static final String ORC_BATCH_SIZE = "read.orc.vectorization.batch-size";
public static final int ORC_BATCH_SIZE_DEFAULT = 5000;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
/**
* @deprecated Use {@link #WRITE_DATA_LOCATION} instead.
*/
@Deprecated
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
public static final String WRITE_LOCATION_PROVIDER_IMPL = "write.location-provider.impl";
/**
* @deprecated Use {@link #WRITE_DATA_LOCATION} instead.
*/
@Deprecated
public static final String WRITE_FOLDER_STORAGE_LOCATION = "write.folder-storage.path";
/**
* @deprecated will be removed in 0.14.0, use {@link #WRITE_DATA_LOCATION} instead
*/
@Deprecated
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_DATA_LOCATION = "write.data.path";
// This only applies to files written after this property is set. Files previously written aren't
// relocated to reflect this parameter.
// If not set, defaults to a "metadata" folder underneath the root path of the table.
public static final String WRITE_METADATA_LOCATION = "write.metadata.path";
public static final String WRITE_PARTITION_SUMMARY_LIMIT = "write.summary.partition-limit";
public static final int WRITE_PARTITION_SUMMARY_LIMIT_DEFAULT = 0;
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
public static final String METADATA_COMPRESSION = "write.metadata.compression-codec";
public static final String METADATA_COMPRESSION_DEFAULT = "none";
public static final String METADATA_PREVIOUS_VERSIONS_MAX = "write.metadata.previous-versions-max";
public static final int METADATA_PREVIOUS_VERSIONS_MAX_DEFAULT = 100;
// This enables to delete the oldest metadata file after commit.
public static final String METADATA_DELETE_AFTER_COMMIT_ENABLED = "write.metadata.delete-after-commit.enabled";
public static final boolean METADATA_DELETE_AFTER_COMMIT_ENABLED_DEFAULT = false;
public static final String METRICS_MODE_COLUMN_CONF_PREFIX = "write.metadata.metrics.column.";
public static final String DEFAULT_WRITE_METRICS_MODE = "write.metadata.metrics.default";
public static final String DEFAULT_WRITE_METRICS_MODE_DEFAULT = "truncate(16)";
public static final String DEFAULT_NAME_MAPPING = "schema.name-mapping.default";
public static final String WRITE_AUDIT_PUBLISH_ENABLED = "write.wap.enabled";
public static final String WRITE_AUDIT_PUBLISH_ENABLED_DEFAULT = "false";
public static final String WRITE_TARGET_FILE_SIZE_BYTES = "write.target-file-size-bytes";
public static final String DELETE_TARGET_FILE_SIZE_BYTES = "write.delete.target-file-size-bytes";
public static final long WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT = 512 * 1024 * 1024; // 512 MB
public static final String SPARK_WRITE_PARTITIONED_FANOUT_ENABLED = "write.spark.fanout.enabled";
public static final boolean SPARK_WRITE_PARTITIONED_FANOUT_ENABLED_DEFAULT = false;
public static final String SNAPSHOT_ID_INHERITANCE_ENABLED = "compatibility.snapshot-id-inheritance.enabled";
public static final boolean SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT = false;
public static final String ENGINE_HIVE_ENABLED = "engine.hive.enabled";
public static final boolean ENGINE_HIVE_ENABLED_DEFAULT = false;
public static final String WRITE_DISTRIBUTION_MODE = "write.distribution-mode";
public static final String WRITE_DISTRIBUTION_MODE_NONE = "none";
public static final String WRITE_DISTRIBUTION_MODE_HASH = "hash";
public static final String WRITE_DISTRIBUTION_MODE_RANGE = "range";
/**
* @deprecated will be removed in 0.14.0, use specific modes instead
*/
@Deprecated
public static final String WRITE_DISTRIBUTION_MODE_DEFAULT = WRITE_DISTRIBUTION_MODE_NONE;
public static final String GC_ENABLED = "gc.enabled";
public static final boolean GC_ENABLED_DEFAULT = true;
public static final String MAX_SNAPSHOT_AGE_MS = "history.expire.max-snapshot-age-ms";
public static final long MAX_SNAPSHOT_AGE_MS_DEFAULT = 5 * 24 * 60 * 60 * 1000; // 5 days
public static final String MIN_SNAPSHOTS_TO_KEEP = "history.expire.min-snapshots-to-keep";
public static final int MIN_SNAPSHOTS_TO_KEEP_DEFAULT = 1;
public static final String DELETE_ISOLATION_LEVEL = "write.delete.isolation-level";
public static final String DELETE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String DELETE_MODE = "write.delete.mode";
public static final String DELETE_MODE_DEFAULT = "copy-on-write";
public static final String DELETE_DISTRIBUTION_MODE = "write.delete.distribution-mode";
public static final String UPDATE_ISOLATION_LEVEL = "write.update.isolation-level";
public static final String UPDATE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String UPDATE_MODE = "write.update.mode";
public static final String UPDATE_MODE_DEFAULT = "copy-on-write";
public static final String UPDATE_DISTRIBUTION_MODE = "write.update.distribution-mode";
public static final String MERGE_ISOLATION_LEVEL = "write.merge.isolation-level";
public static final String MERGE_ISOLATION_LEVEL_DEFAULT = "serializable";
public static final String MERGE_MODE = "write.merge.mode";
public static final String MERGE_MODE_DEFAULT = "copy-on-write";
public static final String MERGE_CARDINALITY_CHECK_ENABLED = "write.merge.cardinality-check.enabled";
public static final boolean MERGE_CARDINALITY_CHECK_ENABLED_DEFAULT = true;
public static final String MERGE_DISTRIBUTION_MODE = "write.merge.distribution-mode";
public static final String UPSERT_ENABLED = "write.upsert.enabled";
public static final boolean UPSERT_ENABLED_DEFAULT = false;
}
| 1 | 42,409 | this can be integer | apache-iceberg | java |
@@ -69,6 +69,8 @@ class CompletionView(QTreeView):
{{ color['completion.category.border.top'] }};
border-bottom: 1px solid
{{ color['completion.category.border.bottom'] }};
+ font: {{ font['completion.category'] }};
+
}
QTreeView::item:selected, QTreeView::item:selected:hover { | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Completion view for statusbar command section.
Defines a CompletionView which uses CompletionFiterModel and CompletionModel
subclasses to provide completions.
"""
from PyQt5.QtWidgets import QStyle, QTreeView, QSizePolicy
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QItemSelectionModel
from qutebrowser.config import config, style
from qutebrowser.completion import completiondelegate, completer
from qutebrowser.completion.models import base
from qutebrowser.utils import qtutils, objreg, utils, usertypes
from qutebrowser.commands import cmdexc, cmdutils
class CompletionView(QTreeView):
"""The view showing available completions.
Based on QTreeView but heavily customized so root elements show as category
headers, and children show as flat list.
Attributes:
enabled: Whether showing the CompletionView is enabled.
_win_id: The ID of the window this CompletionView is associated with.
_height: The height to use for the CompletionView.
_height_perc: Either None or a percentage if height should be relative.
_delegate: The item delegate used.
_column_widths: A list of column widths, in percent.
Signals:
resize_completion: Emitted when the completion should be resized.
"""
# Drawing the item foreground will be done by CompletionItemDelegate, so we
# don't define that in this stylesheet.
STYLESHEET = """
QTreeView {
font: {{ font['completion'] }};
background-color: {{ color['completion.bg'] }};
alternate-background-color: {{ color['completion.alternate-bg'] }};
outline: 0;
border: 0px;
}
QTreeView::item:disabled {
background-color: {{ color['completion.category.bg'] }};
border-top: 1px solid
{{ color['completion.category.border.top'] }};
border-bottom: 1px solid
{{ color['completion.category.border.bottom'] }};
}
QTreeView::item:selected, QTreeView::item:selected:hover {
border-top: 1px solid
{{ color['completion.item.selected.border.top'] }};
border-bottom: 1px solid
{{ color['completion.item.selected.border.bottom'] }};
background-color: {{ color['completion.item.selected.bg'] }};
}
QTreeView:item::hover {
border: 0px;
}
QTreeView QScrollBar {
width: {{ config.get('completion', 'scrollbar-width') }}px;
background: {{ color['completion.scrollbar.bg'] }};
}
QTreeView QScrollBar::handle {
background: {{ color['completion.scrollbar.fg'] }};
border: {{ config.get('completion', 'scrollbar-padding') }}px solid
{{ color['completion.scrollbar.bg'] }};
min-height: 10px;
}
QTreeView QScrollBar::sub-line, QScrollBar::add-line {
border: none;
background: none;
}
"""
resize_completion = pyqtSignal()
def __init__(self, win_id, parent=None):
super().__init__(parent)
self._win_id = win_id
objreg.register('completion', self, scope='window', window=win_id)
cmd = objreg.get('status-command', scope='window', window=win_id)
completer_obj = completer.Completer(cmd, win_id, self)
completer_obj.next_prev_item.connect(self.on_next_prev_item)
objreg.register('completer', completer_obj, scope='window',
window=win_id)
self.enabled = config.get('completion', 'show')
objreg.get('config').changed.connect(self.set_enabled)
# FIXME handle new aliases.
# objreg.get('config').changed.connect(self.init_command_completion)
self._column_widths = base.BaseCompletionModel.COLUMN_WIDTHS
self._delegate = completiondelegate.CompletionItemDelegate(self)
self.setItemDelegate(self._delegate)
style.set_register_stylesheet(self)
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
self.setHeaderHidden(True)
self.setAlternatingRowColors(True)
self.setIndentation(0)
self.setItemsExpandable(False)
self.setExpandsOnDoubleClick(False)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# WORKAROUND
# This is a workaround for weird race conditions with invalid
# item indexes leading to segfaults in Qt.
#
# Some background: http://bugs.quassel-irc.org/issues/663
# The proposed fix there was later reverted because it didn't help.
self.setUniformRowHeights(True)
self.hide()
# FIXME set elidemode
# https://github.com/The-Compiler/qutebrowser/issues/118
def __repr__(self):
return utils.get_repr(self)
def _resize_columns(self):
"""Resize the completion columns based on column_widths."""
width = self.size().width()
pixel_widths = [(width * perc // 100) for perc in self._column_widths]
if self.verticalScrollBar().isVisible():
pixel_widths[-1] -= self.style().pixelMetric(
QStyle.PM_ScrollBarExtent) + 5
for i, w in enumerate(pixel_widths):
self.setColumnWidth(i, w)
def _next_idx(self, upwards):
"""Get the previous/next QModelIndex displayed in the view.
Used by tab_handler.
Args:
upwards: Get previous item, not next.
Return:
A QModelIndex.
"""
idx = self.selectionModel().currentIndex()
if not idx.isValid():
# No item selected yet
if upwards:
return self.model().last_item()
else:
return self.model().first_item()
while True:
idx = self.indexAbove(idx) if upwards else self.indexBelow(idx)
# wrap around if we arrived at beginning/end
if not idx.isValid() and upwards:
return self.model().last_item()
elif not idx.isValid() and not upwards:
idx = self.model().first_item()
self.scrollTo(idx.parent())
return idx
elif idx.parent().isValid():
# Item is a real item, not a category header -> success
return idx
@pyqtSlot(bool)
def on_next_prev_item(self, prev):
"""Handle a tab press for the CompletionView.
Select the previous/next item and write the new text to the
statusbar.
Called from the Completer's next_prev_item signal.
Args:
prev: True for prev item, False for next one.
"""
if not self.isVisible():
# No completion running at the moment, ignore keypress
return
idx = self._next_idx(prev)
qtutils.ensure_valid(idx)
self.selectionModel().setCurrentIndex(
idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows)
def set_model(self, model):
"""Switch completion to a new model.
Called from on_update_completion().
Args:
model: The model to use.
"""
old_model = self.model()
sel_model = self.selectionModel()
self.setModel(model)
if sel_model is not None:
sel_model.deleteLater()
if old_model is not None:
old_model.deleteLater()
for i in range(model.rowCount()):
self.expand(model.index(i, 0))
self._column_widths = model.srcmodel.COLUMN_WIDTHS
self._resize_columns()
self.maybe_resize_completion()
def set_pattern(self, pattern):
"""Set the completion pattern for the current model.
Called from on_update_completion().
Args:
pattern: The filter pattern to set (what the user entered).
"""
self.model().set_pattern(pattern)
self.maybe_resize_completion()
@pyqtSlot()
def maybe_resize_completion(self):
"""Emit the resize_completion signal if the config says so."""
if config.get('completion', 'shrink'):
self.resize_completion.emit()
@config.change_filter('completion', 'show')
def set_enabled(self):
"""Update self.enabled when the config changed."""
self.enabled = config.get('completion', 'show')
@pyqtSlot()
def on_clear_completion_selection(self):
"""Clear the selection model when an item is activated."""
selmod = self.selectionModel()
if selmod is not None:
selmod.clearSelection()
selmod.clearCurrentIndex()
def selectionChanged(self, selected, deselected):
"""Extend selectionChanged to call completers selection_changed."""
super().selectionChanged(selected, deselected)
completer_obj = objreg.get('completer', scope='window',
window=self._win_id)
completer_obj.selection_changed(selected, deselected)
def resizeEvent(self, e):
"""Extend resizeEvent to adjust column size."""
super().resizeEvent(e)
self._resize_columns()
def showEvent(self, e):
"""Adjust the completion size and scroll when it's freshly shown."""
self.resize_completion.emit()
scrollbar = self.verticalScrollBar()
if scrollbar is not None:
scrollbar.setValue(scrollbar.minimum())
super().showEvent(e)
@cmdutils.register(instance='completion', hide=True,
modes=[usertypes.KeyMode.command], scope='window')
def completion_item_del(self):
"""Delete the current completion item."""
if not self.currentIndex().isValid():
raise cmdexc.CommandError("No item selected!")
try:
self.model().srcmodel.delete_cur_item(self)
except NotImplementedError:
raise cmdexc.CommandError("Cannot delete this item.")
| 1 | 15,469 | nitpick: Please remove the blank line here | qutebrowser-qutebrowser | py |
@@ -4,6 +4,7 @@ class Way < ActiveRecord::Base
include ConsistencyValidations
set_table_name 'current_ways'
+ foreign_key 'changeset_id'
belongs_to :changeset
| 1 | class Way < ActiveRecord::Base
require 'xml/libxml'
include ConsistencyValidations
set_table_name 'current_ways'
belongs_to :changeset
has_many :old_ways, :order => 'version'
has_many :way_nodes, :order => 'sequence_id'
has_many :nodes, :through => :way_nodes, :order => 'sequence_id'
has_many :way_tags
has_many :containing_relation_members, :class_name => "RelationMember", :as => :member
has_many :containing_relations, :class_name => "Relation", :through => :containing_relation_members, :source => :relation, :extend => ObjectFinder
validates_presence_of :id, :on => :update
validates_presence_of :changeset_id,:version, :timestamp
validates_uniqueness_of :id
validates_inclusion_of :visible, :in => [ true, false ]
validates_numericality_of :changeset_id, :version, :integer_only => true
validates_numericality_of :id, :on => :update, :integer_only => true
validates_associated :changeset
scope :visible, where(:visible => true)
scope :invisible, where(:visible => false)
# Read in xml as text and return it's Way object representation
def self.from_xml(xml, create=false)
begin
p = XML::Parser.string(xml)
doc = p.parse
doc.find('//osm/way').each do |pt|
return Way.from_xml_node(pt, create)
end
raise OSM::APIBadXMLError.new("node", xml, "XML doesn't contain an osm/way element.")
rescue LibXML::XML::Error, ArgumentError => ex
raise OSM::APIBadXMLError.new("way", xml, ex.message)
end
end
def self.from_xml_node(pt, create=false)
way = Way.new
raise OSM::APIBadXMLError.new("way", pt, "Version is required when updating") unless create or not pt['version'].nil?
way.version = pt['version']
raise OSM::APIBadXMLError.new("way", pt, "Changeset id is missing") if pt['changeset'].nil?
way.changeset_id = pt['changeset']
unless create
raise OSM::APIBadXMLError.new("way", pt, "ID is required when updating") if pt['id'].nil?
way.id = pt['id'].to_i
# .to_i will return 0 if there is no number that can be parsed.
# We want to make sure that there is no id with zero anyway
raise OSM::APIBadUserInput.new("ID of way cannot be zero when updating.") if way.id == 0
end
# We don't care about the timestamp nor the visibility as these are either
# set explicitly or implicit in the action. The visibility is set to true,
# and manually set to false before the actual delete.
way.visible = true
pt.find('tag').each do |tag|
raise OSM::APIBadXMLError.new("way", pt, "tag is missing key") if tag['k'].nil?
raise OSM::APIBadXMLError.new("way", pt, "tag is missing value") if tag['v'].nil?
way.add_tag_keyval(tag['k'], tag['v'])
end
pt.find('nd').each do |nd|
way.add_nd_num(nd['ref'])
end
return way
end
# Find a way given it's ID, and in a single SQL call also grab its nodes
#
# You can't pull in all the tags too unless we put a sequence_id on the way_tags table and have a multipart key
def self.find_eager(id)
way = Way.find(id, :include => {:way_nodes => :node})
#If waytag had a multipart key that was real, you could do this:
#way = Way.find(id, :include => [:way_tags, {:way_nodes => :node}])
end
# Find a way given it's ID, and in a single SQL call also grab its nodes and tags
def to_xml
doc = OSM::API.new.get_xml_doc
doc.root << to_xml_node()
return doc
end
def to_xml_node(visible_nodes = nil, changeset_cache = {}, user_display_name_cache = {})
el1 = XML::Node.new 'way'
el1['id'] = self.id.to_s
el1['visible'] = self.visible.to_s
el1['timestamp'] = self.timestamp.xmlschema
el1['version'] = self.version.to_s
el1['changeset'] = self.changeset_id.to_s
if changeset_cache.key?(self.changeset_id)
# use the cache if available
else
changeset_cache[self.changeset_id] = self.changeset.user_id
end
user_id = changeset_cache[self.changeset_id]
if user_display_name_cache.key?(user_id)
# use the cache if available
elsif self.changeset.user.data_public?
user_display_name_cache[user_id] = self.changeset.user.display_name
else
user_display_name_cache[user_id] = nil
end
if not user_display_name_cache[user_id].nil?
el1['user'] = user_display_name_cache[user_id]
el1['uid'] = user_id.to_s
end
# make sure nodes are output in sequence_id order
ordered_nodes = []
self.way_nodes.each do |nd|
if visible_nodes
# if there is a list of visible nodes then use that to weed out deleted nodes
if visible_nodes[nd.node_id]
ordered_nodes[nd.sequence_id] = nd.node_id.to_s
end
else
# otherwise, manually go to the db to check things
if nd.node and nd.node.visible?
ordered_nodes[nd.sequence_id] = nd.node_id.to_s
end
end
end
ordered_nodes.each do |nd_id|
if nd_id and nd_id != '0'
e = XML::Node.new 'nd'
e['ref'] = nd_id
el1 << e
end
end
self.way_tags.each do |tag|
e = XML::Node.new 'tag'
e['k'] = tag.k
e['v'] = tag.v
el1 << e
end
return el1
end
def nds
unless @nds
@nds = Array.new
self.way_nodes.each do |nd|
@nds += [nd.node_id]
end
end
@nds
end
def tags
unless @tags
@tags = {}
self.way_tags.each do |tag|
@tags[tag.k] = tag.v
end
end
@tags
end
def nds=(s)
@nds = s
end
def tags=(t)
@tags = t
end
def add_nd_num(n)
@nds = Array.new unless @nds
@nds << n.to_i
end
def add_tag_keyval(k, v)
@tags = Hash.new unless @tags
# duplicate tags are now forbidden, so we can't allow values
# in the hash to be overwritten.
raise OSM::APIDuplicateTagsError.new("way", self.id, k) if @tags.include? k
@tags[k] = v
end
##
# the integer coords (i.e: unscaled) bounding box of the way, assuming
# straight line segments.
def bbox
lons = nodes.collect { |n| n.longitude }
lats = nodes.collect { |n| n.latitude }
BoundingBox.new(lons.min, lats.min, lons.max, lats.max)
end
def update_from(new_way, user)
Way.transaction do
self.lock!
check_consistency(self, new_way, user)
unless new_way.preconditions_ok?(self.nds)
raise OSM::APIPreconditionFailedError.new("Cannot update way #{self.id}: data is invalid.")
end
self.changeset_id = new_way.changeset_id
self.changeset = new_way.changeset
self.tags = new_way.tags
self.nds = new_way.nds
self.visible = true
save_with_history!
end
end
def create_with_history(user)
check_create_consistency(self, user)
unless self.preconditions_ok?
raise OSM::APIPreconditionFailedError.new("Cannot create way: data is invalid.")
end
self.version = 0
self.visible = true
save_with_history!
end
def preconditions_ok?(old_nodes = [])
return false if self.nds.empty?
if self.nds.length > MAX_NUMBER_OF_WAY_NODES
raise OSM::APITooManyWayNodesError.new(self.id, self.nds.length, MAX_NUMBER_OF_WAY_NODES)
end
# check only the new nodes, for efficiency - old nodes having been checked last time and can't
# be deleted when they're in-use.
new_nds = (self.nds - old_nodes).sort.uniq
unless new_nds.empty?
db_nds = Node.where(:id => new_nds, :visible => true)
if db_nds.length < new_nds.length
missing = new_nds - db_nds.collect { |n| n.id }
raise OSM::APIPreconditionFailedError.new("Way #{self.id} requires the nodes with id in (#{missing.join(',')}), which either do not exist, or are not visible.")
end
end
return true
end
def delete_with_history!(new_way, user)
unless self.visible
raise OSM::APIAlreadyDeletedError.new("way", new_way.id)
end
# need to start the transaction here, so that the database can
# provide repeatable reads for the used-by checks. this means it
# shouldn't be possible to get race conditions.
Way.transaction do
self.lock!
check_consistency(self, new_way, user)
rels = Relation.joins(:relation_members).where(:visible => true, :current_relation_members => { :member_type => "Way", :member_id => id }).order(:id)
raise OSM::APIPreconditionFailedError.new("Way #{self.id} is still used by relations #{rels.collect { |r| r.id }.join(",")}.") unless rels.empty?
self.changeset_id = new_way.changeset_id
self.changeset = new_way.changeset
self.tags = []
self.nds = []
self.visible = false
save_with_history!
end
end
# Temporary method to match interface to nodes
def tags_as_hash
return self.tags
end
##
# if any referenced nodes are placeholder IDs (i.e: are negative) then
# this calling this method will fix them using the map from placeholders
# to IDs +id_map+.
def fix_placeholders!(id_map, placeholder_id = nil)
self.nds.map! do |node_id|
if node_id < 0
new_id = id_map[:node][node_id]
raise OSM::APIBadUserInput.new("Placeholder node not found for reference #{node_id} in way #{self.id.nil? ? placeholder_id : self.id}") if new_id.nil?
new_id
else
node_id
end
end
end
private
def save_with_history!
t = Time.now.getutc
# update the bounding box, note that this has to be done both before
# and after the save, so that nodes from both versions are included in the
# bbox. we use a copy of the changeset so that it isn't reloaded
# later in the save.
cs = self.changeset
cs.update_bbox!(bbox) unless nodes.empty?
Way.transaction do
self.version += 1
self.timestamp = t
self.save!
tags = self.tags
WayTag.delete_all(:way_id => self.id)
tags.each do |k,v|
tag = WayTag.new
tag.way_id = self.id
tag.k = k
tag.v = v
tag.save!
end
nds = self.nds
WayNode.delete_all(:way_id => self.id)
sequence = 1
nds.each do |n|
nd = WayNode.new
nd.id = [self.id, sequence]
nd.node_id = n
nd.save!
sequence += 1
end
old_way = OldWay.from_way(self)
old_way.timestamp = t
old_way.save_with_dependencies!
# reload the way so that the nodes array points to the correct
# new set of nodes.
self.reload
# update and commit the bounding box, now that way nodes
# have been updated and we're in a transaction.
cs.update_bbox!(bbox) unless nodes.empty?
# tell the changeset we updated one element only
cs.add_changes! 1
cs.save!
end
end
end
| 1 | 7,515 | What is this for? The only methods I can see by that name in the rails doc are generating a foreign key name from a model class name but you seem to be giving a key name as argument? | openstreetmap-openstreetmap-website | rb |
@@ -798,6 +798,12 @@ public class CoreContainer {
SecurityConfHandler.SecurityConfig securityConfig = securityConfHandler.getSecurityConfig(false);
initializeAuthorizationPlugin((Map<String, Object>) securityConfig.getData().get("authorization"));
initializeAuthenticationPlugin((Map<String, Object>) securityConfig.getData().get("authentication"));
+ if (authenticationPlugin != null && authenticationPlugin.plugin.getMetricRegistry() == null) {
+ authenticationPlugin.plugin.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, "/authentication");
+ }
+ if (pkiAuthenticationPlugin != null && pkiAuthenticationPlugin.getMetricRegistry() == null) {
+ pkiAuthenticationPlugin.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, "/authentication/pki");
+ }
}
private static void checkForDuplicateCoreNames(List<CoreDescriptor> cds) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.core;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import org.apache.http.auth.AuthSchemeProvider;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.config.Lookup;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.store.Directory;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.AuthSchemeRegistryProvider;
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.CredentialsProviderProvider;
import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
import org.apache.solr.cloud.CloudDescriptor;
import org.apache.solr.cloud.Overseer;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.cloud.autoscaling.AutoScalingHandler;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Replica.State;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.IOUtils;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.DirectoryFactory.DirContext;
import org.apache.solr.core.backup.repository.BackupRepository;
import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
import org.apache.solr.handler.RequestHandlerBase;
import org.apache.solr.handler.SnapShooter;
import org.apache.solr.handler.admin.AutoscalingHistoryHandler;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.handler.admin.ConfigSetsHandler;
import org.apache.solr.handler.admin.CoreAdminHandler;
import org.apache.solr.handler.admin.HealthCheckHandler;
import org.apache.solr.handler.admin.InfoHandler;
import org.apache.solr.handler.admin.MetricsCollectorHandler;
import org.apache.solr.handler.admin.MetricsHandler;
import org.apache.solr.handler.admin.MetricsHistoryHandler;
import org.apache.solr.handler.admin.SecurityConfHandler;
import org.apache.solr.handler.admin.SecurityConfHandlerLocal;
import org.apache.solr.handler.admin.SecurityConfHandlerZk;
import org.apache.solr.handler.admin.ZookeeperInfoHandler;
import org.apache.solr.handler.admin.ZookeeperStatusHandler;
import org.apache.solr.handler.component.ShardHandlerFactory;
import org.apache.solr.logging.LogWatcher;
import org.apache.solr.logging.MDCLoggingContext;
import org.apache.solr.metrics.SolrCoreMetricManager;
import org.apache.solr.metrics.SolrMetricManager;
import org.apache.solr.metrics.SolrMetricProducer;
import org.apache.solr.request.SolrRequestHandler;
import org.apache.solr.search.SolrFieldCacheBean;
import org.apache.solr.security.AuthenticationPlugin;
import org.apache.solr.security.AuthorizationPlugin;
import org.apache.solr.security.HttpClientBuilderPlugin;
import org.apache.solr.security.PKIAuthenticationPlugin;
import org.apache.solr.security.PublicKeyHandler;
import org.apache.solr.security.SecurityPluginHolder;
import org.apache.solr.update.SolrCoreState;
import org.apache.solr.update.UpdateShardHandler;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.solr.util.OrderedExecutor;
import org.apache.solr.util.stats.MetricUtils;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.Objects.requireNonNull;
import static org.apache.solr.common.params.CommonParams.AUTHC_PATH;
import static org.apache.solr.common.params.CommonParams.AUTHZ_PATH;
import static org.apache.solr.common.params.CommonParams.AUTOSCALING_HISTORY_PATH;
import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.CONFIGSETS_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.CORES_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.HEALTH_CHECK_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.INFO_HANDLER_PATH;
import static org.apache.solr.common.params.CommonParams.METRICS_HISTORY_PATH;
import static org.apache.solr.common.params.CommonParams.METRICS_PATH;
import static org.apache.solr.common.params.CommonParams.ZK_PATH;
import static org.apache.solr.common.params.CommonParams.ZK_STATUS_PATH;
import static org.apache.solr.core.CorePropertiesLocator.PROPERTIES_FILENAME;
import static org.apache.solr.security.AuthenticationPlugin.AUTHENTICATION_PLUGIN_PROP;
/**
*
* @since solr 1.3
*/
public class CoreContainer {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
final SolrCores solrCores = new SolrCores(this);
public static class CoreLoadFailure {
public final CoreDescriptor cd;
public final Exception exception;
public CoreLoadFailure(CoreDescriptor cd, Exception loadFailure) {
this.cd = new CoreDescriptor(cd.getName(), cd);
this.exception = loadFailure;
}
}
protected final Map<String, CoreLoadFailure> coreInitFailures = new ConcurrentHashMap<>();
protected CoreAdminHandler coreAdminHandler = null;
protected CollectionsHandler collectionsHandler = null;
protected HealthCheckHandler healthCheckHandler = null;
private InfoHandler infoHandler;
protected ConfigSetsHandler configSetsHandler = null;
private PKIAuthenticationPlugin pkiAuthenticationPlugin;
protected Properties containerProperties;
private ConfigSetService coreConfigService;
protected ZkContainer zkSys = new ZkContainer();
protected ShardHandlerFactory shardHandlerFactory;
private UpdateShardHandler updateShardHandler;
private ExecutorService coreContainerWorkExecutor = ExecutorUtil.newMDCAwareCachedThreadPool(
new DefaultSolrThreadFactory("coreContainerWorkExecutor") );
private final OrderedExecutor replayUpdatesExecutor;
protected LogWatcher logging = null;
private CloserThread backgroundCloser = null;
protected final NodeConfig cfg;
protected final SolrResourceLoader loader;
protected final String solrHome;
protected final CoresLocator coresLocator;
private String hostName;
private final BlobRepository blobRepository = new BlobRepository(this);
private PluginBag<SolrRequestHandler> containerHandlers = new PluginBag<>(SolrRequestHandler.class, null);
private boolean asyncSolrCoreLoad;
protected SecurityConfHandler securityConfHandler;
private SecurityPluginHolder<AuthorizationPlugin> authorizationPlugin;
private SecurityPluginHolder<AuthenticationPlugin> authenticationPlugin;
private BackupRepositoryFactory backupRepoFactory;
protected SolrMetricManager metricManager;
protected String metricTag = Integer.toHexString(hashCode());
protected MetricsHandler metricsHandler;
protected MetricsHistoryHandler metricsHistoryHandler;
protected MetricsCollectorHandler metricsCollectorHandler;
protected AutoscalingHistoryHandler autoscalingHistoryHandler;
// Bits for the state variable.
public final static long LOAD_COMPLETE = 0x1L;
public final static long CORE_DISCOVERY_COMPLETE = 0x2L;
public final static long INITIAL_CORE_LOAD_COMPLETE = 0x4L;
private volatile long status = 0L;
protected AutoScalingHandler autoScalingHandler;
private enum CoreInitFailedAction { fromleader, none }
/**
* This method instantiates a new instance of {@linkplain BackupRepository}.
*
* @param repositoryName The name of the backup repository (Optional).
* If not specified, a default implementation is used.
* @return a new instance of {@linkplain BackupRepository}.
*/
public BackupRepository newBackupRepository(Optional<String> repositoryName) {
BackupRepository repository;
if (repositoryName.isPresent()) {
repository = backupRepoFactory.newInstance(getResourceLoader(), repositoryName.get());
} else {
repository = backupRepoFactory.newInstance(getResourceLoader());
}
return repository;
}
public ExecutorService getCoreZkRegisterExecutorService() {
return zkSys.getCoreZkRegisterExecutorService();
}
public SolrRequestHandler getRequestHandler(String path) {
return RequestHandlerBase.getRequestHandler(path, containerHandlers);
}
public PluginBag<SolrRequestHandler> getRequestHandlers() {
return this.containerHandlers;
}
{
log.debug("New CoreContainer " + System.identityHashCode(this));
}
/**
* Create a new CoreContainer using system properties to detect the solr home
* directory. The container's cores are not loaded.
* @see #load()
*/
public CoreContainer() {
this(new SolrResourceLoader(SolrResourceLoader.locateSolrHome()));
}
/**
* Create a new CoreContainer using the given SolrResourceLoader. The container's
* cores are not loaded.
* @param loader the SolrResourceLoader
* @see #load()
*/
public CoreContainer(SolrResourceLoader loader) {
this(SolrXmlConfig.fromSolrHome(loader, loader.getInstancePath()));
}
/**
* Create a new CoreContainer using the given solr home directory. The container's
* cores are not loaded.
* @param solrHome a String containing the path to the solr home directory
* @see #load()
*/
public CoreContainer(String solrHome) {
this(new SolrResourceLoader(Paths.get(solrHome)));
}
/**
* Create a new CoreContainer using the given SolrResourceLoader,
* configuration and CoresLocator. The container's cores are
* not loaded.
* @param config a ConfigSolr representation of this container's configuration
* @see #load()
*/
public CoreContainer(NodeConfig config) {
this(config, new Properties());
}
public CoreContainer(NodeConfig config, Properties properties) {
this(config, properties, new CorePropertiesLocator(config.getCoreRootDirectory()));
}
public CoreContainer(NodeConfig config, Properties properties, boolean asyncSolrCoreLoad) {
this(config, properties, new CorePropertiesLocator(config.getCoreRootDirectory()), asyncSolrCoreLoad);
}
public CoreContainer(NodeConfig config, Properties properties, CoresLocator locator) {
this(config, properties, locator, false);
}
public CoreContainer(NodeConfig config, Properties properties, CoresLocator locator, boolean asyncSolrCoreLoad) {
this.loader = config.getSolrResourceLoader();
this.solrHome = loader.getInstancePath().toString();
containerHandlers.put(PublicKeyHandler.PATH, new PublicKeyHandler());
this.cfg = requireNonNull(config);
this.coresLocator = locator;
this.containerProperties = new Properties(properties);
this.asyncSolrCoreLoad = asyncSolrCoreLoad;
this.replayUpdatesExecutor = new OrderedExecutor(
cfg.getReplayUpdatesThreads(),
ExecutorUtil.newMDCAwareCachedThreadPool(
cfg.getReplayUpdatesThreads(),
new DefaultSolrThreadFactory("replayUpdatesExecutor")));
}
private synchronized void initializeAuthorizationPlugin(Map<String, Object> authorizationConf) {
authorizationConf = Utils.getDeepCopy(authorizationConf, 4);
//Initialize the Authorization module
SecurityPluginHolder<AuthorizationPlugin> old = authorizationPlugin;
SecurityPluginHolder<AuthorizationPlugin> authorizationPlugin = null;
if (authorizationConf != null) {
String klas = (String) authorizationConf.get("class");
if (klas == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "class is required for authorization plugin");
}
if (old != null && old.getZnodeVersion() == readVersion(authorizationConf)) {
return;
}
log.info("Initializing authorization plugin: " + klas);
authorizationPlugin = new SecurityPluginHolder<>(readVersion(authorizationConf),
getResourceLoader().newInstance(klas, AuthorizationPlugin.class));
// Read and pass the authorization context to the plugin
authorizationPlugin.plugin.init(authorizationConf);
} else {
log.debug("Security conf doesn't exist. Skipping setup for authorization module.");
}
this.authorizationPlugin = authorizationPlugin;
if (old != null) {
try {
old.plugin.close();
} catch (Exception e) {
}
}
}
private synchronized void initializeAuthenticationPlugin(Map<String, Object> authenticationConfig) {
authenticationConfig = Utils.getDeepCopy(authenticationConfig, 4);
String pluginClassName = null;
if (authenticationConfig != null) {
if (authenticationConfig.containsKey("class")) {
pluginClassName = String.valueOf(authenticationConfig.get("class"));
} else {
throw new SolrException(ErrorCode.SERVER_ERROR, "No 'class' specified for authentication in ZK.");
}
}
if (pluginClassName != null) {
log.debug("Authentication plugin class obtained from security.json: "+pluginClassName);
} else if (System.getProperty(AUTHENTICATION_PLUGIN_PROP) != null) {
pluginClassName = System.getProperty(AUTHENTICATION_PLUGIN_PROP);
log.debug("Authentication plugin class obtained from system property '" +
AUTHENTICATION_PLUGIN_PROP + "': " + pluginClassName);
} else {
log.debug("No authentication plugin used.");
}
SecurityPluginHolder<AuthenticationPlugin> old = authenticationPlugin;
SecurityPluginHolder<AuthenticationPlugin> authenticationPlugin = null;
// Initialize the plugin
if (pluginClassName != null) {
log.info("Initializing authentication plugin: " + pluginClassName);
authenticationPlugin = new SecurityPluginHolder<>(readVersion(authenticationConfig),
getResourceLoader().newInstance(pluginClassName,
AuthenticationPlugin.class,
null,
new Class[]{CoreContainer.class},
new Object[]{this}));
}
if (authenticationPlugin != null) {
authenticationPlugin.plugin.init(authenticationConfig);
setupHttpClientForAuthPlugin(authenticationPlugin.plugin);
}
this.authenticationPlugin = authenticationPlugin;
try {
if (old != null) old.plugin.close();
} catch (Exception e) {/*do nothing*/ }
}
private void setupHttpClientForAuthPlugin(Object authcPlugin) {
if (authcPlugin instanceof HttpClientBuilderPlugin) {
// Setup HttpClient for internode communication
SolrHttpClientBuilder builder = ((HttpClientBuilderPlugin) authcPlugin).getHttpClientBuilder(HttpClientUtil.getHttpClientBuilder());
// The default http client of the core container's shardHandlerFactory has already been created and
// configured using the default httpclient configurer. We need to reconfigure it using the plugin's
// http client configurer to set it up for internode communication.
log.debug("Reconfiguring HttpClient settings.");
SolrHttpClientContextBuilder httpClientBuilder = new SolrHttpClientContextBuilder();
if (builder.getCredentialsProviderProvider() != null) {
httpClientBuilder.setDefaultCredentialsProvider(new CredentialsProviderProvider() {
@Override
public CredentialsProvider getCredentialsProvider() {
return builder.getCredentialsProviderProvider().getCredentialsProvider();
}
});
}
if (builder.getAuthSchemeRegistryProvider() != null) {
httpClientBuilder.setAuthSchemeRegistryProvider(new AuthSchemeRegistryProvider() {
@Override
public Lookup<AuthSchemeProvider> getAuthSchemeRegistry() {
return builder.getAuthSchemeRegistryProvider().getAuthSchemeRegistry();
}
});
}
HttpClientUtil.setHttpClientRequestContextBuilder(httpClientBuilder);
} else {
if (pkiAuthenticationPlugin != null) {
//this happened due to an authc plugin reload. no need to register the pkiAuthc plugin again
if(pkiAuthenticationPlugin.isInterceptorRegistered()) return;
log.info("PKIAuthenticationPlugin is managing internode requests");
setupHttpClientForAuthPlugin(pkiAuthenticationPlugin);
pkiAuthenticationPlugin.setInterceptorRegistered();
}
}
}
private static int readVersion(Map<String, Object> conf) {
if (conf == null) return -1;
Map meta = (Map) conf.get("");
if (meta == null) return -1;
Number v = (Number) meta.get("v");
return v == null ? -1 : v.intValue();
}
/**
* This method allows subclasses to construct a CoreContainer
* without any default init behavior.
*
* @param testConstructor pass (Object)null.
* @lucene.experimental
*/
protected CoreContainer(Object testConstructor) {
solrHome = null;
loader = null;
coresLocator = null;
cfg = null;
containerProperties = null;
replayUpdatesExecutor = null;
}
public static CoreContainer createAndLoad(Path solrHome) {
return createAndLoad(solrHome, solrHome.resolve(SolrXmlConfig.SOLR_XML_FILE));
}
/**
* Create a new CoreContainer and load its cores
* @param solrHome the solr home directory
* @param configFile the file containing this container's configuration
* @return a loaded CoreContainer
*/
public static CoreContainer createAndLoad(Path solrHome, Path configFile) {
SolrResourceLoader loader = new SolrResourceLoader(solrHome);
CoreContainer cc = new CoreContainer(SolrXmlConfig.fromFile(loader, configFile));
try {
cc.load();
} catch (Exception e) {
cc.shutdown();
throw e;
}
return cc;
}
public Properties getContainerProperties() {
return containerProperties;
}
public PKIAuthenticationPlugin getPkiAuthenticationPlugin() {
return pkiAuthenticationPlugin;
}
public SolrMetricManager getMetricManager() {
return metricManager;
}
public MetricsHandler getMetricsHandler() {
return metricsHandler;
}
public MetricsHistoryHandler getMetricsHistoryHandler() {
return metricsHistoryHandler;
}
public OrderedExecutor getReplayUpdatesExecutor() {
return replayUpdatesExecutor;
}
//-------------------------------------------------------------------
// Initialization / Cleanup
//-------------------------------------------------------------------
/**
* Load the cores defined for this CoreContainer
*/
public void load() {
log.debug("Loading cores into CoreContainer [instanceDir={}]", loader.getInstancePath());
// add the sharedLib to the shared resource loader before initializing cfg based plugins
String libDir = cfg.getSharedLibDirectory();
if (libDir != null) {
Path libPath = loader.getInstancePath().resolve(libDir);
try {
loader.addToClassLoader(SolrResourceLoader.getURLs(libPath));
loader.reloadLuceneSPI();
} catch (IOException e) {
if (!libDir.equals("lib")) { // Don't complain if default "lib" dir does not exist
log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage());
}
}
}
metricManager = new SolrMetricManager(loader, cfg.getMetricsConfig());
coreContainerWorkExecutor = MetricUtils.instrumentedExecutorService(
coreContainerWorkExecutor, null,
metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)),
SolrMetricManager.mkName("coreContainerWorkExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
shardHandlerFactory = ShardHandlerFactory.newInstance(cfg.getShardHandlerFactoryPluginInfo(), loader);
if (shardHandlerFactory instanceof SolrMetricProducer) {
SolrMetricProducer metricProducer = (SolrMetricProducer) shardHandlerFactory;
metricProducer.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, "httpShardHandler");
}
updateShardHandler = new UpdateShardHandler(cfg.getUpdateShardHandlerConfig());
updateShardHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, "updateShardHandler");
solrCores.load(loader);
logging = LogWatcher.newRegisteredLogWatcher(cfg.getLogWatcherConfig(), loader);
hostName = cfg.getNodeName();
zkSys.initZooKeeper(this, solrHome, cfg.getCloudConfig());
if(isZooKeeperAware()) pkiAuthenticationPlugin = new PKIAuthenticationPlugin(this, zkSys.getZkController().getNodeName(),
(PublicKeyHandler) containerHandlers.get(PublicKeyHandler.PATH));
MDCLoggingContext.setNode(this);
securityConfHandler = isZooKeeperAware() ? new SecurityConfHandlerZk(this) : new SecurityConfHandlerLocal(this);
reloadSecurityProperties();
this.backupRepoFactory = new BackupRepositoryFactory(cfg.getBackupRepositoryPlugins());
createHandler(ZK_PATH, ZookeeperInfoHandler.class.getName(), ZookeeperInfoHandler.class);
createHandler(ZK_STATUS_PATH, ZookeeperStatusHandler.class.getName(), ZookeeperStatusHandler.class);
collectionsHandler = createHandler(COLLECTIONS_HANDLER_PATH, cfg.getCollectionsHandlerClass(), CollectionsHandler.class);
healthCheckHandler = createHandler(HEALTH_CHECK_HANDLER_PATH, cfg.getHealthCheckHandlerClass(), HealthCheckHandler.class);
infoHandler = createHandler(INFO_HANDLER_PATH, cfg.getInfoHandlerClass(), InfoHandler.class);
coreAdminHandler = createHandler(CORES_HANDLER_PATH, cfg.getCoreAdminHandlerClass(), CoreAdminHandler.class);
configSetsHandler = createHandler(CONFIGSETS_HANDLER_PATH, cfg.getConfigSetsHandlerClass(), ConfigSetsHandler.class);
// metricsHistoryHandler uses metricsHandler, so create it first
metricsHandler = new MetricsHandler(this);
containerHandlers.put(METRICS_PATH, metricsHandler);
metricsHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, METRICS_PATH);
createMetricsHistoryHandler();
autoscalingHistoryHandler = createHandler(AUTOSCALING_HISTORY_PATH, AutoscalingHistoryHandler.class.getName(), AutoscalingHistoryHandler.class);
metricsCollectorHandler = createHandler(MetricsCollectorHandler.HANDLER_PATH, MetricsCollectorHandler.class.getName(), MetricsCollectorHandler.class);
// may want to add some configuration here in the future
metricsCollectorHandler.init(null);
containerHandlers.put(AUTHZ_PATH, securityConfHandler);
securityConfHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, AUTHZ_PATH);
containerHandlers.put(AUTHC_PATH, securityConfHandler);
PluginInfo[] metricReporters = cfg.getMetricsConfig().getMetricReporters();
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.node);
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.jvm);
metricManager.loadReporters(metricReporters, loader, this, null, null, SolrInfoBean.Group.jetty);
coreConfigService = ConfigSetService.createConfigSetService(cfg, loader, zkSys.zkController);
containerProperties.putAll(cfg.getSolrProperties());
// initialize gauges for reporting the number of cores and disk total/free
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.node);
String metricTag = Integer.toHexString(hashCode());
metricManager.registerGauge(null, registryName, () -> solrCores.getCores().size(),
metricTag,true, "loaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> solrCores.getLoadedCoreNames().size() - solrCores.getCores().size(),
metricTag,true, "lazy", SolrInfoBean.Category.CONTAINER.toString(), "cores");
metricManager.registerGauge(null, registryName, () -> solrCores.getAllCoreNames().size() - solrCores.getLoadedCoreNames().size(),
metricTag,true, "unloaded", SolrInfoBean.Category.CONTAINER.toString(), "cores");
Path dataHome = cfg.getSolrDataHome() != null ? cfg.getSolrDataHome() : cfg.getCoreRootDirectory();
metricManager.registerGauge(null, registryName, () -> dataHome.toFile().getTotalSpace(),
metricTag,true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
metricManager.registerGauge(null, registryName, () -> dataHome.toFile().getUsableSpace(),
metricTag,true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs");
metricManager.registerGauge(null, registryName, () -> dataHome.toAbsolutePath().toString(),
metricTag,true, "path", SolrInfoBean.Category.CONTAINER.toString(), "fs");
metricManager.registerGauge(null, registryName, () -> {
try {
return org.apache.lucene.util.IOUtils.spins(dataHome.toAbsolutePath());
} catch (IOException e) {
// default to spinning
return true;
}
},
metricTag,true, "spins", SolrInfoBean.Category.CONTAINER.toString(), "fs");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getTotalSpace(),
metricTag,true, "totalSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toFile().getUsableSpace(),
metricTag,true, "usableSpace", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
metricManager.registerGauge(null, registryName, () -> cfg.getCoreRootDirectory().toAbsolutePath().toString(),
metricTag,true, "path", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
metricManager.registerGauge(null, registryName, () -> {
try {
return org.apache.lucene.util.IOUtils.spins(cfg.getCoreRootDirectory().toAbsolutePath());
} catch (IOException e) {
// default to spinning
return true;
}
},
metricTag,true, "spins", SolrInfoBean.Category.CONTAINER.toString(), "fs", "coreRoot");
// add version information
metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getSpecificationVersion(),
metricTag,true, "specification", SolrInfoBean.Category.CONTAINER.toString(), "version");
metricManager.registerGauge(null, registryName, () -> this.getClass().getPackage().getImplementationVersion(),
metricTag,true, "implementation", SolrInfoBean.Category.CONTAINER.toString(), "version");
SolrFieldCacheBean fieldCacheBean = new SolrFieldCacheBean();
fieldCacheBean.initializeMetrics(metricManager, registryName, metricTag, null);
if (isZooKeeperAware()) {
metricManager.loadClusterReporters(metricReporters, this);
}
// setup executor to load cores in parallel
ExecutorService coreLoadExecutor = MetricUtils.instrumentedExecutorService(
ExecutorUtil.newMDCAwareFixedThreadPool(
cfg.getCoreLoadThreadCount(isZooKeeperAware()),
new DefaultSolrThreadFactory("coreLoadExecutor")), null,
metricManager.registry(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node)),
SolrMetricManager.mkName("coreLoadExecutor", SolrInfoBean.Category.CONTAINER.toString(), "threadPool"));
final List<Future<SolrCore>> futures = new ArrayList<>();
try {
List<CoreDescriptor> cds = coresLocator.discover(this);
if (isZooKeeperAware()) {
//sort the cores if it is in SolrCloud. In standalone node the order does not matter
CoreSorter coreComparator = new CoreSorter().init(this);
cds = new ArrayList<>(cds);//make a copy
Collections.sort(cds, coreComparator::compare);
}
checkForDuplicateCoreNames(cds);
status |= CORE_DISCOVERY_COMPLETE;
for (final CoreDescriptor cd : cds) {
if (cd.isTransient() || !cd.isLoadOnStartup()) {
solrCores.addCoreDescriptor(cd);
} else if (asyncSolrCoreLoad) {
solrCores.markCoreAsLoading(cd);
}
if (cd.isLoadOnStartup()) {
futures.add(coreLoadExecutor.submit(() -> {
SolrCore core;
try {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(cd);
}
solrCores.waitAddPendingCoreOps(cd.getName());
core = createFromDescriptor(cd, false, false);
} finally {
solrCores.removeFromPendingOps(cd.getName());
if (asyncSolrCoreLoad) {
solrCores.markCoreAsNotLoading(cd);
}
}
try {
zkSys.registerInZk(core, true, false);
} catch (RuntimeException e) {
SolrException.log(log, "Error registering SolrCore", e);
}
return core;
}));
}
}
// Start the background thread
backgroundCloser = new CloserThread(this, solrCores, cfg);
backgroundCloser.start();
} finally {
if (asyncSolrCoreLoad && futures != null) {
coreContainerWorkExecutor.submit(() -> {
try {
for (Future<SolrCore> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
} catch (ExecutionException e) {
log.error("Error waiting for SolrCore to be loaded on startup", e.getCause());
}
}
} finally {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
});
} else {
ExecutorUtil.shutdownAndAwaitTermination(coreLoadExecutor);
}
}
if (isZooKeeperAware()) {
zkSys.getZkController().checkOverseerDesignate();
// initialize this handler here when SolrCloudManager is ready
autoScalingHandler = new AutoScalingHandler(getZkController().getSolrCloudManager(), loader);
containerHandlers.put(AutoScalingHandler.HANDLER_PATH, autoScalingHandler);
autoScalingHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, AutoScalingHandler.HANDLER_PATH);
}
// This is a bit redundant but these are two distinct concepts for all they're accomplished at the same time.
status |= LOAD_COMPLETE | INITIAL_CORE_LOAD_COMPLETE;
}
// MetricsHistoryHandler supports both cloud and standalone configs
private void createMetricsHistoryHandler() {
PluginInfo plugin = cfg.getMetricsConfig().getHistoryHandler();
Map<String, Object> initArgs;
if (plugin != null && plugin.initArgs != null) {
initArgs = plugin.initArgs.asMap(5);
initArgs.put(MetricsHistoryHandler.ENABLE_PROP, plugin.isEnabled());
} else {
initArgs = new HashMap<>();
}
String name;
SolrCloudManager cloudManager;
SolrClient client;
if (isZooKeeperAware()) {
name = getZkController().getNodeName();
cloudManager = getZkController().getSolrCloudManager();
client = new CloudSolrClient.Builder(Collections.singletonList(getZkController().getZkServerAddress()), Optional.empty())
.withHttpClient(updateShardHandler.getDefaultHttpClient()).build();
} else {
name = getNodeConfig().getNodeName();
if (name == null || name.isEmpty()) {
name = "localhost";
}
cloudManager = null;
client = new EmbeddedSolrServer(this, CollectionAdminParams.SYSTEM_COLL) {
@Override
public void close() throws IOException {
// do nothing - we close the container ourselves
}
};
// enable local metrics unless specifically set otherwise
if (!initArgs.containsKey(MetricsHistoryHandler.ENABLE_NODES_PROP)) {
initArgs.put(MetricsHistoryHandler.ENABLE_NODES_PROP, true);
}
if (!initArgs.containsKey(MetricsHistoryHandler.ENABLE_REPLICAS_PROP)) {
initArgs.put(MetricsHistoryHandler.ENABLE_REPLICAS_PROP, true);
}
}
metricsHistoryHandler = new MetricsHistoryHandler(name, metricsHandler,
client, cloudManager, initArgs);
containerHandlers.put(METRICS_HISTORY_PATH, metricsHistoryHandler);
metricsHistoryHandler.initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, METRICS_HISTORY_PATH);
}
public void securityNodeChanged() {
log.info("Security node changed, reloading security.json");
reloadSecurityProperties();
}
/**
* Make sure securityConfHandler is initialized
*/
private void reloadSecurityProperties() {
SecurityConfHandler.SecurityConfig securityConfig = securityConfHandler.getSecurityConfig(false);
initializeAuthorizationPlugin((Map<String, Object>) securityConfig.getData().get("authorization"));
initializeAuthenticationPlugin((Map<String, Object>) securityConfig.getData().get("authentication"));
}
private static void checkForDuplicateCoreNames(List<CoreDescriptor> cds) {
Map<String, Path> addedCores = Maps.newHashMap();
for (CoreDescriptor cd : cds) {
final String name = cd.getName();
if (addedCores.containsKey(name))
throw new SolrException(ErrorCode.SERVER_ERROR,
String.format(Locale.ROOT, "Found multiple cores with the name [%s], with instancedirs [%s] and [%s]",
name, addedCores.get(name), cd.getInstanceDir()));
addedCores.put(name, cd.getInstanceDir());
}
}
private volatile boolean isShutDown = false;
public boolean isShutDown() {
return isShutDown;
}
/**
* Stops all cores.
*/
public void shutdown() {
log.info("Shutting down CoreContainer instance="
+ System.identityHashCode(this));
isShutDown = true;
ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor);
replayUpdatesExecutor.shutdownAndAwaitTermination();
if (metricsHistoryHandler != null) {
IOUtils.closeQuietly(metricsHistoryHandler.getSolrClient());
metricsHistoryHandler.close();
}
if (metricManager != null) {
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node));
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm));
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty));
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.node), metricTag);
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jvm), metricTag);
metricManager.unregisterGauges(SolrMetricManager.getRegistryName(SolrInfoBean.Group.jetty), metricTag);
}
if (isZooKeeperAware()) {
cancelCoreRecoveries();
zkSys.zkController.publishNodeAsDown(zkSys.zkController.getNodeName());
try {
zkSys.zkController.removeEphemeralLiveNode();
} catch (Exception e) {
log.warn("Error removing live node. Continuing to close CoreContainer", e);
}
if (metricManager != null) {
metricManager.closeReporters(SolrMetricManager.getRegistryName(SolrInfoBean.Group.cluster));
}
}
try {
if (coreAdminHandler != null) coreAdminHandler.shutdown();
} catch (Exception e) {
log.warn("Error shutting down CoreAdminHandler. Continuing to close CoreContainer.", e);
}
try {
// First wake up the closer thread, it'll terminate almost immediately since it checks isShutDown.
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // wake up anyone waiting
}
if (backgroundCloser != null) { // Doesn't seem right, but tests get in here without initializing the core.
try {
while (true) {
backgroundCloser.join(15000);
if (backgroundCloser.isAlive()) {
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // there is a race we have to protect against
}
} else {
break;
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (log.isDebugEnabled()) {
log.debug("backgroundCloser thread was interrupted before finishing");
}
}
}
// Now clear all the cores that are being operated upon.
solrCores.close();
// It's still possible that one of the pending dynamic load operation is waiting, so wake it up if so.
// Since all the pending operations queues have been drained, there should be nothing to do.
synchronized (solrCores.getModifyLock()) {
solrCores.getModifyLock().notifyAll(); // wake up the thread
}
} finally {
try {
if (shardHandlerFactory != null) {
shardHandlerFactory.close();
}
} finally {
try {
if (updateShardHandler != null) {
updateShardHandler.close();
}
} finally {
// we want to close zk stuff last
zkSys.close();
}
}
}
// It should be safe to close the authorization plugin at this point.
try {
if(authorizationPlugin != null) {
authorizationPlugin.plugin.close();
}
} catch (IOException e) {
log.warn("Exception while closing authorization plugin.", e);
}
// It should be safe to close the authentication plugin at this point.
try {
if(authenticationPlugin != null) {
authenticationPlugin.plugin.close();
authenticationPlugin = null;
}
} catch (Exception e) {
log.warn("Exception while closing authentication plugin.", e);
}
org.apache.lucene.util.IOUtils.closeWhileHandlingException(loader); // best effort
}
public void cancelCoreRecoveries() {
List<SolrCore> cores = solrCores.getCores();
// we must cancel without holding the cores sync
// make sure we wait for any recoveries to stop
for (SolrCore core : cores) {
try {
core.getSolrCoreState().cancelRecovery();
} catch (Exception e) {
SolrException.log(log, "Error canceling recovery for core", e);
}
}
}
@Override
protected void finalize() throws Throwable {
try {
if(!isShutDown){
log.error("CoreContainer was not close prior to finalize(), indicates a bug -- POSSIBLE RESOURCE LEAK!!! instance=" + System.identityHashCode(this));
}
} finally {
super.finalize();
}
}
public CoresLocator getCoresLocator() {
return coresLocator;
}
protected SolrCore registerCore(CoreDescriptor cd, SolrCore core, boolean registerInZk, boolean skipRecovery) {
if( core == null ) {
throw new RuntimeException( "Can not register a null core." );
}
if (isShutDown) {
core.close();
throw new IllegalStateException("This CoreContainer has been closed");
}
SolrCore old = solrCores.putCore(cd, core);
/*
* set both the name of the descriptor and the name of the
* core, since the descriptors name is used for persisting.
*/
core.setName(cd.getName());
coreInitFailures.remove(cd.getName());
if( old == null || old == core) {
log.debug( "registering core: " + cd.getName() );
if (registerInZk) {
zkSys.registerInZk(core, false, skipRecovery);
}
return null;
}
else {
log.debug( "replacing core: " + cd.getName() );
old.close();
if (registerInZk) {
zkSys.registerInZk(core, false, skipRecovery);
}
return old;
}
}
/**
* Creates a new core, publishing the core state to the cluster
* @param coreName the core name
* @param parameters the core parameters
* @return the newly created core
*/
public SolrCore create(String coreName, Map<String, String> parameters) {
return create(coreName, cfg.getCoreRootDirectory().resolve(coreName), parameters, false);
}
/**
* Creates a new core in a specified instance directory, publishing the core state to the cluster
* @param coreName the core name
* @param instancePath the instance directory
* @param parameters the core parameters
* @return the newly created core
*/
public SolrCore create(String coreName, Path instancePath, Map<String, String> parameters, boolean newCollection) {
CoreDescriptor cd = new CoreDescriptor(coreName, instancePath, parameters, getContainerProperties(), isZooKeeperAware());
// TODO: There's a race here, isn't there?
// Since the core descriptor is removed when a core is unloaded, it should never be anywhere when a core is created.
if (getAllCoreNames().contains(coreName)) {
log.warn("Creating a core with existing name is not allowed");
// TODO: Shouldn't this be a BAD_REQUEST?
throw new SolrException(ErrorCode.SERVER_ERROR, "Core with name '" + coreName + "' already exists.");
}
boolean preExisitingZkEntry = false;
try {
if (getZkController() != null) {
if (!Overseer.isLegacy(getZkController().getZkStateReader())) {
if (cd.getCloudDescriptor().getCoreNodeName() == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "non legacy mode coreNodeName missing " + parameters.toString());
}
}
preExisitingZkEntry = getZkController().checkIfCoreNodeNameAlreadyExists(cd);
}
// Much of the logic in core handling pre-supposes that the core.properties file already exists, so create it
// first and clean it up if there's an error.
coresLocator.create(this, cd);
SolrCore core = null;
try {
solrCores.waitAddPendingCoreOps(cd.getName());
core = createFromDescriptor(cd, true, newCollection);
coresLocator.persist(this, cd); // Write out the current core properties in case anything changed when the core was created
} finally {
solrCores.removeFromPendingOps(cd.getName());
}
return core;
} catch (Exception ex) {
// First clean up any core descriptor, there should never be an existing core.properties file for any core that
// failed to be created on-the-fly.
coresLocator.delete(this, cd);
if (isZooKeeperAware() && !preExisitingZkEntry) {
try {
getZkController().unregister(coreName, cd);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
SolrException.log(log, null, e);
} catch (KeeperException e) {
SolrException.log(log, null, e);
} catch (Exception e) {
SolrException.log(log, null, e);
}
}
Throwable tc = ex;
Throwable c = null;
do {
tc = tc.getCause();
if (tc != null) {
c = tc;
}
} while (tc != null);
String rootMsg = "";
if (c != null) {
rootMsg = " Caused by: " + c.getMessage();
}
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Error CREATEing SolrCore '" + coreName + "': " + ex.getMessage() + rootMsg, ex);
}
}
/**
* Creates a new core based on a CoreDescriptor.
*
* @param dcore a core descriptor
* @param publishState publish core state to the cluster if true
*
* WARNING: Any call to this method should be surrounded by a try/finally block
* that calls solrCores.waitAddPendingCoreOps(...) and solrCores.removeFromPendingOps(...)
*
* <pre>
* <code>
* try {
* solrCores.waitAddPendingCoreOps(dcore.getName());
* createFromDescriptor(...);
* } finally {
* solrCores.removeFromPendingOps(dcore.getName());
* }
* </code>
* </pre>
*
* Trying to put the waitAddPending... in this method results in Bad Things Happening due to race conditions.
* getCore() depends on getting the core returned _if_ it's in the pending list due to some other thread opening it.
* If the core is not in the pending list and not loaded, then getCore() calls this method. Anything that called
* to check if the core was loaded _or_ in pending ops and, based on the return called createFromDescriptor would
* introduce a race condition, see getCore() for the place it would be a problem
*
* @return the newly created core
*/
@SuppressWarnings("resource")
private SolrCore createFromDescriptor(CoreDescriptor dcore, boolean publishState, boolean newCollection) {
if (isShutDown) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "Solr has been shutdown.");
}
SolrCore core = null;
try {
MDCLoggingContext.setCoreDescriptor(this, dcore);
SolrIdentifierValidator.validateCoreName(dcore.getName());
if (zkSys.getZkController() != null) {
zkSys.getZkController().preRegister(dcore, publishState);
}
ConfigSet coreConfig = getConfigSet(dcore);
dcore.setConfigSetTrusted(coreConfig.isTrusted());
log.info("Creating SolrCore '{}' using configuration from {}, trusted={}", dcore.getName(), coreConfig.getName(), dcore.isConfigSetTrusted());
try {
core = new SolrCore(this, dcore, coreConfig);
} catch (SolrException e) {
core = processCoreCreateException(e, dcore, coreConfig);
}
// always kick off recovery if we are in non-Cloud mode
if (!isZooKeeperAware() && core.getUpdateHandler().getUpdateLog() != null) {
core.getUpdateHandler().getUpdateLog().recoverFromLog();
}
registerCore(dcore, core, publishState, newCollection);
return core;
} catch (Exception e) {
coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
if (e instanceof ZkController.NotInClusterStateException && !newCollection) {
// this mostly happen when the core is deleted when this node is down
unload(dcore.getName(), true, true, true);
throw e;
}
solrCores.removeCoreDescriptor(dcore);
final SolrException solrException = new SolrException(ErrorCode.SERVER_ERROR, "Unable to create core [" + dcore.getName() + "]", e);
if(core != null && !core.isClosed())
IOUtils.closeQuietly(core);
throw solrException;
} catch (Throwable t) {
SolrException e = new SolrException(ErrorCode.SERVER_ERROR, "JVM Error creating core [" + dcore.getName() + "]: " + t.getMessage(), t);
coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
solrCores.removeCoreDescriptor(dcore);
if(core != null && !core.isClosed())
IOUtils.closeQuietly(core);
throw t;
} finally {
MDCLoggingContext.clear();
}
}
public boolean isSharedFs(CoreDescriptor cd) {
try (SolrCore core = this.getCore(cd.getName())) {
if (core != null) {
return core.getDirectoryFactory().isSharedStorage();
} else {
ConfigSet configSet = getConfigSet(cd);
return DirectoryFactory.loadDirectoryFactory(configSet.getSolrConfig(), this, null).isSharedStorage();
}
}
}
private ConfigSet getConfigSet(CoreDescriptor cd) {
return coreConfigService.getConfig(cd);
}
/**
* Take action when we failed to create a SolrCore. If error is due to corrupt index, try to recover. Various recovery
* strategies can be specified via system properties "-DCoreInitFailedAction={fromleader, none}"
*
* @see CoreInitFailedAction
*
* @param original
* the problem seen when loading the core the first time.
* @param dcore
* core descriptor for the core to create
* @param coreConfig
* core config for the core to create
* @return if possible
* @throws SolrException
* rethrows the original exception if we will not attempt to recover, throws a new SolrException with the
* original exception as a suppressed exception if there is a second problem creating the solr core.
*/
private SolrCore processCoreCreateException(SolrException original, CoreDescriptor dcore, ConfigSet coreConfig) {
// Traverse full chain since CIE may not be root exception
Throwable cause = original;
while ((cause = cause.getCause()) != null) {
if (cause instanceof CorruptIndexException) {
break;
}
}
// If no CorruptIndexException, nothing we can try here
if (cause == null) throw original;
CoreInitFailedAction action = CoreInitFailedAction.valueOf(System.getProperty(CoreInitFailedAction.class.getSimpleName(), "none"));
log.debug("CorruptIndexException while creating core, will attempt to repair via {}", action);
switch (action) {
case fromleader: // Recovery from leader on a CorruptedIndexException
if (isZooKeeperAware()) {
CloudDescriptor desc = dcore.getCloudDescriptor();
try {
Replica leader = getZkController().getClusterState()
.getCollection(desc.getCollectionName())
.getSlice(desc.getShardId())
.getLeader();
if (leader != null && leader.getState() == State.ACTIVE) {
log.info("Found active leader, will attempt to create fresh core and recover.");
resetIndexDirectory(dcore, coreConfig);
// the index of this core is emptied, its term should be set to 0
getZkController().getShardTerms(desc.getCollectionName(), desc.getShardId()).setTermToZero(desc.getCoreNodeName());
return new SolrCore(this, dcore, coreConfig);
}
} catch (SolrException se) {
se.addSuppressed(original);
throw se;
}
}
throw original;
case none:
throw original;
default:
log.warn("Failed to create core, and did not recognize specified 'CoreInitFailedAction': [{}]. Valid options are {}.",
action, Arrays.asList(CoreInitFailedAction.values()));
throw original;
}
}
/**
* Write a new index directory for the a SolrCore, but do so without loading it.
*/
private void resetIndexDirectory(CoreDescriptor dcore, ConfigSet coreConfig) {
SolrConfig config = coreConfig.getSolrConfig();
String registryName = SolrMetricManager.getRegistryName(SolrInfoBean.Group.core, dcore.getName());
DirectoryFactory df = DirectoryFactory.loadDirectoryFactory(config, this, registryName);
String dataDir = SolrCore.findDataDir(df, null, config, dcore);
String tmpIdxDirName = "index." + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
SolrCore.modifyIndexProps(df, dataDir, config, tmpIdxDirName);
// Free the directory object that we had to create for this
Directory dir = null;
try {
dir = df.get(dataDir, DirContext.META_DATA, config.indexConfig.lockType);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} finally {
try {
df.release(dir);
df.doneWithDirectory(dir);
} catch (IOException e) {
SolrException.log(log, e);
}
}
}
/**
* @return a Collection of registered SolrCores
*/
public Collection<SolrCore> getCores() {
return solrCores.getCores();
}
/**
* Gets the cores that are currently loaded, i.e. cores that have
* 1: loadOnStartup=true and are either not-transient or, if transient, have been loaded and have not been aged out
* 2: loadOnStartup=false and have been loaded but are either non-transient or have not been aged out.
*
* Put another way, this will not return any names of cores that are lazily loaded but have not been called for yet
* or are transient and either not loaded or have been swapped out.
*
*/
public Collection<String> getLoadedCoreNames() {
return solrCores.getLoadedCoreNames();
}
/** This method is currently experimental.
*
* @return a Collection of the names that a specific core object is mapped to, there are more than one.
*/
public Collection<String> getNamesForCore(SolrCore core) {
return solrCores.getNamesForCore(core);
}
/**
* get a list of all the cores that are currently known, whether currently loaded or not
* @return a list of all the available core names in either permanent or transient cores
*
*/
public Collection<String> getAllCoreNames() {
return solrCores.getAllCoreNames();
}
/**
* Returns an immutable Map of Exceptions that occured when initializing
* SolrCores (either at startup, or do to runtime requests to create cores)
* keyed off of the name (String) of the SolrCore that had the Exception
* during initialization.
* <p>
* While the Map returned by this method is immutable and will not change
* once returned to the client, the source data used to generate this Map
* can be changed as various SolrCore operations are performed:
* </p>
* <ul>
* <li>Failed attempts to create new SolrCores will add new Exceptions.</li>
* <li>Failed attempts to re-create a SolrCore using a name already contained in this Map will replace the Exception.</li>
* <li>Failed attempts to reload a SolrCore will cause an Exception to be added to this list -- even though the existing SolrCore with that name will continue to be available.</li>
* <li>Successful attempts to re-created a SolrCore using a name already contained in this Map will remove the Exception.</li>
* <li>Registering an existing SolrCore with a name already contained in this Map (ie: ALIAS or SWAP) will remove the Exception.</li>
* </ul>
*/
public Map<String, CoreLoadFailure> getCoreInitFailures() {
return ImmutableMap.copyOf(coreInitFailures);
}
// ---------------- Core name related methods ---------------
private CoreDescriptor reloadCoreDescriptor(CoreDescriptor oldDesc) {
if (oldDesc == null) {
return null;
}
CorePropertiesLocator cpl = new CorePropertiesLocator(null);
CoreDescriptor ret = cpl.buildCoreDescriptor(oldDesc.getInstanceDir().resolve(PROPERTIES_FILENAME), this);
// Ok, this little jewel is all because we still create core descriptors on the fly from lists of properties
// in tests particularly. Theoretically, there should be _no_ way to create a CoreDescriptor in the new world
// of core discovery without writing the core.properties file out first.
//
// TODO: remove core.properties from the conf directory in test files, it's in a bad place there anyway.
if (ret == null) {
oldDesc.loadExtraProperties(); // there may be changes to extra properties that we need to pick up.
return oldDesc;
}
// The CloudDescriptor bit here is created in a very convoluted way, requiring access to private methods
// in ZkController. When reloading, this behavior is identical to what used to happen where a copy of the old
// CoreDescriptor was just re-used.
if (ret.getCloudDescriptor() != null) {
ret.getCloudDescriptor().reload(oldDesc.getCloudDescriptor());
}
return ret;
}
/**
* Recreates a SolrCore.
* While the new core is loading, requests will continue to be dispatched to
* and processed by the old core
*
* @param name the name of the SolrCore to reload
*/
public void reload(String name) {
SolrCore core = solrCores.getCoreFromAnyList(name, false);
if (core != null) {
// The underlying core properties files may have changed, we don't really know. So we have a (perhaps) stale
// CoreDescriptor and we need to reload it from the disk files
CoreDescriptor cd = reloadCoreDescriptor(core.getCoreDescriptor());
solrCores.addCoreDescriptor(cd);
try {
solrCores.waitAddPendingCoreOps(cd.getName());
ConfigSet coreConfig = coreConfigService.getConfig(cd);
log.info("Reloading SolrCore '{}' using configuration from {}", cd.getName(), coreConfig.getName());
SolrCore newCore = core.reload(coreConfig);
registerCore(cd, newCore, false, false);
if (getZkController() != null) {
DocCollection docCollection = getZkController().getClusterState().getCollection(cd.getCollectionName());
Replica replica = docCollection.getReplica(cd.getCloudDescriptor().getCoreNodeName());
assert replica != null;
if (replica.getType() == Replica.Type.TLOG) { //TODO: needed here?
getZkController().stopReplicationFromLeader(core.getName());
if (!cd.getCloudDescriptor().isLeader()) {
getZkController().startReplicationFromLeader(newCore.getName(), true);
}
} else if(replica.getType() == Replica.Type.PULL) {
getZkController().stopReplicationFromLeader(core.getName());
getZkController().startReplicationFromLeader(newCore.getName(), false);
}
}
} catch (SolrCoreState.CoreIsClosedException e) {
throw e;
} catch (Exception e) {
coreInitFailures.put(cd.getName(), new CoreLoadFailure(cd, e));
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to reload core [" + cd.getName() + "]", e);
}
finally {
solrCores.removeFromPendingOps(cd.getName());
}
} else {
CoreLoadFailure clf = coreInitFailures.get(name);
if (clf != null) {
try {
solrCores.waitAddPendingCoreOps(clf.cd.getName());
createFromDescriptor(clf.cd, true, false);
} finally {
solrCores.removeFromPendingOps(clf.cd.getName());
}
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No such core: " + name );
}
}
}
/**
* Swaps two SolrCore descriptors.
*/
public void swap(String n0, String n1) {
if( n0 == null || n1 == null ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Can not swap unnamed cores." );
}
solrCores.swap(n0, n1);
coresLocator.swap(this, solrCores.getCoreDescriptor(n0), solrCores.getCoreDescriptor(n1));
log.info("swapped: " + n0 + " with " + n1);
}
/**
* Unload a core from this container, leaving all files on disk
* @param name the name of the core to unload
*/
public void unload(String name) {
unload(name, false, false, false);
}
/**
* Unload a core from this container, optionally removing the core's data and configuration
*
* @param name the name of the core to unload
* @param deleteIndexDir if true, delete the core's index on close
* @param deleteDataDir if true, delete the core's data directory on close
* @param deleteInstanceDir if true, delete the core's instance directory on close
*/
public void unload(String name, boolean deleteIndexDir, boolean deleteDataDir, boolean deleteInstanceDir) {
CoreDescriptor cd = solrCores.getCoreDescriptor(name);
if (name != null) {
// check for core-init errors first
CoreLoadFailure loadFailure = coreInitFailures.remove(name);
if (loadFailure != null) {
// getting the index directory requires opening a DirectoryFactory with a SolrConfig, etc,
// which we may not be able to do because of the init error. So we just go with what we
// can glean from the CoreDescriptor - datadir and instancedir
SolrCore.deleteUnloadedCore(loadFailure.cd, deleteDataDir, deleteInstanceDir);
// If last time around we didn't successfully load, make sure that all traces of the coreDescriptor are gone.
if (cd != null) {
solrCores.removeCoreDescriptor(cd);
coresLocator.delete(this, cd);
}
return;
}
}
if (cd == null) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot unload non-existent core [" + name + "]");
}
boolean close = solrCores.isLoadedNotPendingClose(name);
SolrCore core = solrCores.remove(name);
solrCores.removeCoreDescriptor(cd);
coresLocator.delete(this, cd);
if (core == null) {
// transient core
SolrCore.deleteUnloadedCore(cd, deleteDataDir, deleteInstanceDir);
return;
}
// delete metrics specific to this core
metricManager.removeRegistry(core.getCoreMetricManager().getRegistryName());
if (zkSys.getZkController() != null) {
// cancel recovery in cloud mode
core.getSolrCoreState().cancelRecovery();
if (cd.getCloudDescriptor().getReplicaType() == Replica.Type.PULL
|| cd.getCloudDescriptor().getReplicaType() == Replica.Type.TLOG) {
// Stop replication if this is part of a pull/tlog replica before closing the core
zkSys.getZkController().stopReplicationFromLeader(name);
}
}
core.unloadOnClose(cd, deleteIndexDir, deleteDataDir, deleteInstanceDir);
if (close)
core.closeAndWait();
if (zkSys.getZkController() != null) {
try {
zkSys.getZkController().unregister(name, cd);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.SERVER_ERROR, "Interrupted while unregistering core [" + name + "] from cloud state");
} catch (KeeperException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error unregistering core [" + name + "] from cloud state", e);
}
}
}
public void rename(String name, String toName) {
SolrIdentifierValidator.validateCoreName(toName);
try (SolrCore core = getCore(name)) {
if (core != null) {
String oldRegistryName = core.getCoreMetricManager().getRegistryName();
String newRegistryName = SolrCoreMetricManager.createRegistryName(core, toName);
metricManager.swapRegistries(oldRegistryName, newRegistryName);
// The old coreDescriptor is obsolete, so remove it. registerCore will put it back.
CoreDescriptor cd = core.getCoreDescriptor();
solrCores.removeCoreDescriptor(cd);
cd.setProperty("name", toName);
solrCores.addCoreDescriptor(cd);
core.setName(toName);
registerCore(cd, core, true, false);
SolrCore old = solrCores.remove(name);
coresLocator.rename(this, old.getCoreDescriptor(), core.getCoreDescriptor());
}
}
}
/**
* Get the CoreDescriptors for all cores managed by this container
* @return a List of CoreDescriptors
*/
public List<CoreDescriptor> getCoreDescriptors() {
return solrCores.getCoreDescriptors();
}
public CoreDescriptor getCoreDescriptor(String coreName) {
return solrCores.getCoreDescriptor(coreName);
}
public Path getCoreRootDirectory() {
return cfg.getCoreRootDirectory();
}
/**
* Gets a core by name and increase its refcount.
*
* @see SolrCore#close()
* @param name the core name
* @return the core if found, null if a SolrCore by this name does not exist
* @exception SolrCoreInitializationException if a SolrCore with this name failed to be initialized
*/
public SolrCore getCore(String name) {
// Do this in two phases since we don't want to lock access to the cores over a load.
SolrCore core = solrCores.getCoreFromAnyList(name, true);
// If a core is loaded, we're done just return it.
if (core != null) {
return core;
}
// If it's not yet loaded, we can check if it's had a core init failure and "do the right thing"
CoreDescriptor desc = solrCores.getCoreDescriptor(name);
// if there was an error initializing this core, throw a 500
// error with the details for clients attempting to access it.
CoreLoadFailure loadFailure = getCoreInitFailures().get(name);
if (null != loadFailure) {
throw new SolrCoreInitializationException(name, loadFailure.exception);
}
// This is a bit of awkwardness where SolrCloud and transient cores don't play nice together. For transient cores,
// we have to allow them to be created at any time there hasn't been a core load failure (use reload to cure that).
// But for TestConfigSetsAPI.testUploadWithScriptUpdateProcessor, this needs to _not_ try to load the core if
// the core is null and there was an error. If you change this, be sure to run both TestConfiSetsAPI and
// TestLazyCores
if (desc == null || zkSys.getZkController() != null) return null;
// This will put an entry in pending core ops if the core isn't loaded. Here's where moving the
// waitAddPendingCoreOps to createFromDescriptor would introduce a race condition.
core = solrCores.waitAddPendingCoreOps(name);
if (isShutDown) return null; // We're quitting, so stop. This needs to be after the wait above since we may come off
// the wait as a consequence of shutting down.
try {
if (core == null) {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(desc);
}
core = createFromDescriptor(desc, true, false); // This should throw an error if it fails.
}
core.open();
}
finally {
solrCores.removeFromPendingOps(name);
}
return core;
}
public BlobRepository getBlobRepository(){
return blobRepository;
}
/**
* If using asyncSolrCoreLoad=true, calling this after {@link #load()} will
* not return until all cores have finished loading.
*
* @param timeoutMs timeout, upon which method simply returns
*/
public void waitForLoadingCoresToFinish(long timeoutMs) {
solrCores.waitForLoadingCoresToFinish(timeoutMs);
}
public void waitForLoadingCore(String name, long timeoutMs) {
solrCores.waitForLoadingCoreToFinish(name, timeoutMs);
}
// ---------------- CoreContainer request handlers --------------
protected <T> T createHandler(String path, String handlerClass, Class<T> clazz) {
T handler = loader.newInstance(handlerClass, clazz, null, new Class[] { CoreContainer.class }, new Object[] { this });
if (handler instanceof SolrRequestHandler) {
containerHandlers.put(path, (SolrRequestHandler)handler);
}
if (handler instanceof SolrMetricProducer) {
((SolrMetricProducer)handler).initializeMetrics(metricManager, SolrInfoBean.Group.node.toString(), metricTag, path);
}
return handler;
}
public CoreAdminHandler getMultiCoreHandler() {
return coreAdminHandler;
}
public CollectionsHandler getCollectionsHandler() {
return collectionsHandler;
}
public HealthCheckHandler getHealthCheckHandler() { return healthCheckHandler; }
public InfoHandler getInfoHandler() {
return infoHandler;
}
public ConfigSetsHandler getConfigSetsHandler() {
return configSetsHandler;
}
public String getHostName() {
return this.hostName;
}
/**
* Gets the alternate path for multicore handling:
* This is used in case there is a registered unnamed core (aka name is "") to
* declare an alternate way of accessing named cores.
* This can also be used in a pseudo single-core environment so admins can prepare
* a new version before swapping.
*/
public String getManagementPath() {
return cfg.getManagementPath();
}
public LogWatcher getLogging() {
return logging;
}
/**
* Determines whether the core is already loaded or not but does NOT load the core
*
*/
public boolean isLoaded(String name) {
return solrCores.isLoaded(name);
}
public boolean isLoadedNotPendingClose(String name) {
return solrCores.isLoadedNotPendingClose(name);
}
/**
* Gets a solr core descriptor for a core that is not loaded. Note that if the caller calls this on a
* loaded core, the unloaded descriptor will be returned.
*
* @param cname - name of the unloaded core descriptor to load. NOTE:
* @return a coreDescriptor. May return null
*/
public CoreDescriptor getUnloadedCoreDescriptor(String cname) {
return solrCores.getUnloadedCoreDescriptor(cname);
}
public String getSolrHome() {
return solrHome;
}
public boolean isZooKeeperAware() {
return zkSys.getZkController() != null;
}
public ZkController getZkController() {
return zkSys.getZkController();
}
public NodeConfig getConfig() {
return cfg;
}
/** The default ShardHandlerFactory used to communicate with other solr instances */
public ShardHandlerFactory getShardHandlerFactory() {
return shardHandlerFactory;
}
public UpdateShardHandler getUpdateShardHandler() {
return updateShardHandler;
}
public SolrResourceLoader getResourceLoader() {
return loader;
}
public boolean isCoreLoading(String name) {
return solrCores.isCoreLoading(name);
}
public AuthorizationPlugin getAuthorizationPlugin() {
return authorizationPlugin == null ? null : authorizationPlugin.plugin;
}
public AuthenticationPlugin getAuthenticationPlugin() {
return authenticationPlugin == null ? null : authenticationPlugin.plugin;
}
public NodeConfig getNodeConfig() {
return cfg;
}
public long getStatus() {
return status;
}
// Occasaionally we need to access the transient cache handler in places other than coreContainer.
public TransientSolrCoreCache getTransientCache() {
return solrCores.getTransientCacheHandler();
}
/**
*
* @param cd CoreDescriptor, presumably a deficient one
* @param prop The property that needs to be repaired.
* @return true if we were able to successfuly perisist the repaired coreDescriptor, false otherwise.
*
* See SOLR-11503, This can be removed when there's no chance we'll need to upgrade a
* Solr installation created with legacyCloud=true from 6.6.1 through 7.1
*/
public boolean repairCoreProperty(CoreDescriptor cd, String prop) {
// So far, coreNodeName is the only property that we need to repair, this may get more complex as other properties
// are added.
if (CoreDescriptor.CORE_NODE_NAME.equals(prop) == false) {
throw new SolrException(ErrorCode.SERVER_ERROR,
String.format(Locale.ROOT,"The only supported property for repair is currently [%s]",
CoreDescriptor.CORE_NODE_NAME));
}
// Try to read the coreNodeName from the cluster state.
String coreName = cd.getName();
DocCollection coll = getZkController().getZkStateReader().getClusterState().getCollection(cd.getCollectionName());
for (Replica rep : coll.getReplicas()) {
if (coreName.equals(rep.getCoreName())) {
log.warn("Core properties file for node {} found with no coreNodeName, attempting to repair with value {}. See SOLR-11503. " +
"This message should only appear if upgrading from collections created Solr 6.6.1 through 7.1.",
rep.getCoreName(), rep.getName());
cd.getCloudDescriptor().setCoreNodeName(rep.getName());
coresLocator.persist(this, cd);
return true;
}
}
log.error("Could not repair coreNodeName in core.properties file for core {}", coreName);
return false;
}
/**
* @param solrCore te core against which we check if there has been a tragic exception
* @return whether this solr core has tragic exception
*/
public boolean checkTragicException(SolrCore solrCore) {
Throwable tragicException;
try {
tragicException = solrCore.getSolrCoreState().getTragicException();
} catch (IOException e) {
// failed to open an indexWriter
tragicException = e;
}
if (tragicException != null) {
if (isZooKeeperAware()) {
getZkController().giveupLeadership(solrCore.getCoreDescriptor(), tragicException);
}
}
return tragicException != null;
}
}
class CloserThread extends Thread {
CoreContainer container;
SolrCores solrCores;
NodeConfig cfg;
CloserThread(CoreContainer container, SolrCores solrCores, NodeConfig cfg) {
this.container = container;
this.solrCores = solrCores;
this.cfg = cfg;
}
// It's important that this be the _only_ thread removing things from pendingDynamicCloses!
// This is single-threaded, but I tried a multi-threaded approach and didn't see any performance gains, so
// there's no good justification for the complexity. I suspect that the locking on things like DefaultSolrCoreState
// essentially create a single-threaded process anyway.
@Override
public void run() {
while (! container.isShutDown()) {
synchronized (solrCores.getModifyLock()) { // need this so we can wait and be awoken.
try {
solrCores.getModifyLock().wait();
} catch (InterruptedException e) {
// Well, if we've been told to stop, we will. Otherwise, continue on and check to see if there are
// any cores to close.
}
}
for (SolrCore removeMe = solrCores.getCoreToClose();
removeMe != null && !container.isShutDown();
removeMe = solrCores.getCoreToClose()) {
try {
removeMe.close();
} finally {
solrCores.removeFromPendingOps(removeMe.getName());
}
}
}
}
}
| 1 | 27,769 | Is this second check necessary? we know that just after the plugin was created its metricRegistry is null, it's set only after `initializeMetrics` has been called. | apache-lucene-solr | java |
@@ -130,6 +130,16 @@ TalkActionResult_t TalkActions::playerSaySpell(Player* player, SpeakClasses type
}
}
+ if (it->second.fromLua) {
+ if (it->second.getNeedAccess() && !player->getGroup()->access) {
+ return TALKACTION_CONTINUE;
+ }
+
+ if (player->getAccountType() < it->second.getRequiredAccountType()) {
+ return TALKACTION_BREAK;
+ }
+ }
+
if (it->second.executeSay(player, words, param, type)) {
return TALKACTION_CONTINUE;
} else { | 1 | /**
* The Forgotten Server - a free and open-source MMORPG server emulator
* Copyright (C) 2019 Mark Samman <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "otpch.h"
#include "player.h"
#include "talkaction.h"
#include "pugicast.h"
TalkActions::TalkActions()
: scriptInterface("TalkAction Interface")
{
scriptInterface.initState();
}
TalkActions::~TalkActions()
{
clear(false);
}
void TalkActions::clear(bool fromLua)
{
for (auto it = talkActions.begin(); it != talkActions.end(); ) {
if (fromLua == it->second.fromLua) {
it = talkActions.erase(it);
} else {
++it;
}
}
reInitState(fromLua);
}
LuaScriptInterface& TalkActions::getScriptInterface()
{
return scriptInterface;
}
std::string TalkActions::getScriptBaseName() const
{
return "talkactions";
}
Event_ptr TalkActions::getEvent(const std::string& nodeName)
{
if (strcasecmp(nodeName.c_str(), "talkaction") != 0) {
return nullptr;
}
return Event_ptr(new TalkAction(&scriptInterface));
}
bool TalkActions::registerEvent(Event_ptr event, const pugi::xml_node&)
{
TalkAction_ptr talkAction{static_cast<TalkAction*>(event.release())}; // event is guaranteed to be a TalkAction
std::vector<std::string> words = talkAction->getWordsMap();
for (size_t i = 0; i < words.size(); i++) {
if (i == words.size() - 1) {
talkActions.emplace(words[i], std::move(*talkAction));
} else {
talkActions.emplace(words[i], *talkAction);
}
}
return true;
}
bool TalkActions::registerLuaEvent(TalkAction* event)
{
TalkAction_ptr talkAction{ event };
std::vector<std::string> words = talkAction->getWordsMap();
for (size_t i = 0; i < words.size(); i++) {
if (i == words.size() - 1) {
talkActions.emplace(words[i], std::move(*talkAction));
} else {
talkActions.emplace(words[i], *talkAction);
}
}
return true;
}
TalkActionResult_t TalkActions::playerSaySpell(Player* player, SpeakClasses type, const std::string& words) const
{
size_t wordsLength = words.length();
for (auto it = talkActions.begin(); it != talkActions.end(); ) {
const std::string& talkactionWords = it->first;
size_t talkactionLength = talkactionWords.length();
if (wordsLength < talkactionLength || strncasecmp(words.c_str(), talkactionWords.c_str(), talkactionLength) != 0) {
++it;
continue;
}
std::string param;
if (wordsLength != talkactionLength) {
param = words.substr(talkactionLength);
if (param.front() != ' ') {
++it;
continue;
}
trim_left(param, ' ');
std::string separator = it->second.getSeparator();
if (separator != " ") {
if (!param.empty()) {
if (param != separator) {
++it;
continue;
} else {
param.erase(param.begin());
}
}
}
}
if (it->second.executeSay(player, words, param, type)) {
return TALKACTION_CONTINUE;
} else {
return TALKACTION_BREAK;
}
}
return TALKACTION_CONTINUE;
}
bool TalkAction::configureEvent(const pugi::xml_node& node)
{
pugi::xml_attribute wordsAttribute = node.attribute("words");
if (!wordsAttribute) {
std::cout << "[Error - TalkAction::configureEvent] Missing words for talk action or spell" << std::endl;
return false;
}
pugi::xml_attribute separatorAttribute = node.attribute("separator");
if (separatorAttribute) {
separator = pugi::cast<char>(separatorAttribute.value());
}
for (auto word : explodeString(wordsAttribute.as_string(), ";")) {
setWords(word);
}
return true;
}
std::string TalkAction::getScriptEventName() const
{
return "onSay";
}
bool TalkAction::executeSay(Player* player, const std::string& words, const std::string& param, SpeakClasses type) const
{
//onSay(player, words, param, type)
if (!scriptInterface->reserveScriptEnv()) {
std::cout << "[Error - TalkAction::executeSay] Call stack overflow" << std::endl;
return false;
}
ScriptEnvironment* env = scriptInterface->getScriptEnv();
env->setScriptId(scriptId, scriptInterface);
lua_State* L = scriptInterface->getLuaState();
scriptInterface->pushFunction(scriptId);
LuaScriptInterface::pushUserdata<Player>(L, player);
LuaScriptInterface::setMetatable(L, -1, "Player");
LuaScriptInterface::pushString(L, words);
LuaScriptInterface::pushString(L, param);
lua_pushnumber(L, type);
return scriptInterface->callFunction(4);
}
| 1 | 18,294 | shouldn't this be `return TALKACTION_BREAK;` as the player does not meet the required group access? | otland-forgottenserver | cpp |
@@ -72,6 +72,8 @@ public class CliqueMiningAcceptanceTest extends AcceptanceTestBase {
cluster.stopNode(minerNode2);
cluster.stopNode(minerNode3);
minerNode1.verify(net.awaitPeerCount(0));
+ minerNode1.verify(clique.blockIsCreatedByProposer(minerNode1));
+
minerNode1.verify(clique.noNewBlockCreated(minerNode1));
}
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.tests.acceptance.clique;
import org.hyperledger.besu.tests.acceptance.dsl.AcceptanceTestBase;
import org.hyperledger.besu.tests.acceptance.dsl.account.Account;
import org.hyperledger.besu.tests.acceptance.dsl.node.BesuNode;
import java.io.IOException;
import org.junit.Test;
public class CliqueMiningAcceptanceTest extends AcceptanceTestBase {
@Test
public void shouldMineTransactionsOnSingleNode() throws IOException {
final BesuNode minerNode = besu.createCliqueNode("miner1");
cluster.start(minerNode);
final Account sender = accounts.createAccount("account1");
final Account receiver = accounts.createAccount("account2");
minerNode.execute(accountTransactions.createTransfer(sender, 50));
cluster.verify(sender.balanceEquals(50));
minerNode.execute(accountTransactions.createIncrementalTransfers(sender, receiver, 1));
cluster.verify(receiver.balanceEquals(1));
minerNode.execute(accountTransactions.createIncrementalTransfers(sender, receiver, 2));
cluster.verify(receiver.balanceEquals(3));
}
@Test
public void shouldMineTransactionsOnMultipleNodes() throws IOException {
final BesuNode minerNode1 = besu.createCliqueNode("miner1");
final BesuNode minerNode2 = besu.createCliqueNode("miner2");
final BesuNode minerNode3 = besu.createCliqueNode("miner3");
cluster.start(minerNode1, minerNode2, minerNode3);
final Account sender = accounts.createAccount("account1");
final Account receiver = accounts.createAccount("account2");
minerNode1.execute(accountTransactions.createTransfer(sender, 50));
cluster.verify(sender.balanceEquals(50));
minerNode2.execute(accountTransactions.createIncrementalTransfers(sender, receiver, 1));
cluster.verify(receiver.balanceEquals(1));
minerNode3.execute(accountTransactions.createIncrementalTransfers(sender, receiver, 2));
cluster.verify(receiver.balanceEquals(3));
}
@Test
public void shouldStallMiningWhenInsufficientValidators() throws IOException {
final BesuNode minerNode1 = besu.createCliqueNode("miner1");
final BesuNode minerNode2 = besu.createCliqueNode("miner2");
final BesuNode minerNode3 = besu.createCliqueNode("miner3");
cluster.start(minerNode1, minerNode2, minerNode3);
cluster.stopNode(minerNode2);
cluster.stopNode(minerNode3);
minerNode1.verify(net.awaitPeerCount(0));
minerNode1.verify(clique.noNewBlockCreated(minerNode1));
}
@Test
public void shouldStillMineWhenANodeFailsAndHasSufficientValidators() throws IOException {
final BesuNode minerNode1 = besu.createCliqueNode("miner1");
final BesuNode minerNode2 = besu.createCliqueNode("miner2");
final BesuNode minerNode3 = besu.createCliqueNode("miner3");
cluster.start(minerNode1, minerNode2, minerNode3);
cluster.verifyOnActiveNodes(blockchain.reachesHeight(minerNode1, 1, 85));
cluster.stopNode(minerNode3);
cluster.verifyOnActiveNodes(net.awaitPeerCount(1));
cluster.verifyOnActiveNodes(blockchain.reachesHeight(minerNode1, 2));
cluster.verifyOnActiveNodes(clique.blockIsCreatedByProposer(minerNode1));
cluster.verifyOnActiveNodes(clique.blockIsCreatedByProposer(minerNode2));
}
}
| 1 | 24,534 | does this still work if minerNode1 has already proposed a block before 2 & 3 are stopped? | hyperledger-besu | java |
@@ -91,6 +91,12 @@ var _ topicreconciler.Interface = (*Reconciler)(nil)
func (r *Reconciler) ReconcileKind(ctx context.Context, topic *v1.Topic) reconciler.Event {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("topic", topic)))
+ if v, present := topic.Annotations[v1.LoggingE2ETestAnnotation]; present {
+ // This is added purely for the TestCloudLogging E2E tests, which verify that the log line
+ // is written if this annotation is present.
+ logging.FromContext(ctx).Desugar().Error("Adding log line for the TestCloudLogging E2E tests", zap.String(v1.LoggingE2EFieldName, v))
+ }
+
topic.Status.InitializeConditions()
topic.Status.ObservedGeneration = topic.Generation
| 1 | /*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package topic
import (
"context"
"encoding/json"
"fmt"
"cloud.google.com/go/pubsub"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"github.com/google/knative-gcp/pkg/tracing"
"github.com/google/knative-gcp/pkg/utils"
"knative.dev/pkg/logging"
"knative.dev/pkg/reconciler"
tracingconfig "knative.dev/pkg/tracing/config"
servingv1 "knative.dev/serving/pkg/apis/serving/v1"
servinglisters "knative.dev/serving/pkg/client/listers/serving/v1"
gstatus "google.golang.org/grpc/status"
"github.com/google/knative-gcp/pkg/apis/configs/dataresidency"
v1 "github.com/google/knative-gcp/pkg/apis/intevents/v1"
topicreconciler "github.com/google/knative-gcp/pkg/client/injection/reconciler/intevents/v1/topic"
listers "github.com/google/knative-gcp/pkg/client/listers/intevents/v1"
"github.com/google/knative-gcp/pkg/reconciler/identity"
"github.com/google/knative-gcp/pkg/reconciler/intevents"
"github.com/google/knative-gcp/pkg/reconciler/intevents/topic/resources"
reconcilerutilspubsub "github.com/google/knative-gcp/pkg/reconciler/utils/pubsub"
)
const (
resourceGroup = "topics.internal.events.cloud.google.com"
deleteTopicFailed = "TopicDeleteFailed"
deleteWorkloadIdentityFailed = "WorkloadIdentityDeleteFailed"
reconciledPublisherFailedReason = "PublisherReconcileFailed"
reconciledSuccessReason = "TopicReconciled"
reconciledTopicFailedReason = "TopicReconcileFailed"
workloadIdentityFailed = "WorkloadIdentityReconcileFailed"
)
// Reconciler implements controller.Reconciler for Topic resources.
type Reconciler struct {
*intevents.PubSubBase
// identity reconciler for reconciling workload identity.
*identity.Identity
// data residency store
dataresidencyStore *dataresidency.Store
// topicLister index properties about topics.
topicLister listers.TopicLister
// serviceLister index properties about services.
serviceLister servinglisters.ServiceLister
// serviceAccountLister for reading serviceAccounts.
serviceAccountLister corev1listers.ServiceAccountLister
publisherImage string
tracingConfig *tracingconfig.Config
// createClientFn is the function used to create the Pub/Sub client that interacts with Pub/Sub.
// This is needed so that we can inject a mock client for UTs purposes.
createClientFn reconcilerutilspubsub.CreateFn
}
// Check that our Reconciler implements Interface.
var _ topicreconciler.Interface = (*Reconciler)(nil)
func (r *Reconciler) ReconcileKind(ctx context.Context, topic *v1.Topic) reconciler.Event {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("topic", topic)))
topic.Status.InitializeConditions()
topic.Status.ObservedGeneration = topic.Generation
// If topic doesn't have ownerReference and ServiceAccountName is provided, reconcile workload identity.
// Otherwise, its owner will reconcile workload identity.
if (topic.OwnerReferences == nil || len(topic.OwnerReferences) == 0) && topic.Spec.ServiceAccountName != "" {
if _, err := r.Identity.ReconcileWorkloadIdentity(ctx, topic.Spec.Project, topic); err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, workloadIdentityFailed, "Failed to reconcile Pub/Sub topic workload identity: %s", err.Error())
}
}
if err := r.reconcileTopic(ctx, topic); err != nil {
topic.Status.MarkNoTopic(reconciledTopicFailedReason, "Failed to reconcile Pub/Sub topic: %s", err.Error())
return reconciler.NewEvent(corev1.EventTypeWarning, reconciledTopicFailedReason, "Failed to reconcile Pub/Sub topic: %s", err.Error())
}
topic.Status.MarkTopicReady()
// Set the topic being used.
topic.Status.TopicID = topic.Spec.Topic
// If enablePublisher is false, then skip creating the publisher.
if enablePublisher := topic.Spec.EnablePublisher; enablePublisher != nil && !*enablePublisher {
return reconciler.NewEvent(corev1.EventTypeNormal, reconciledSuccessReason, `Topic reconciled: "%s/%s"`, topic.Namespace, topic.Name)
}
err, svc := r.reconcilePublisher(ctx, topic)
if err != nil {
topic.Status.MarkPublisherNotDeployed(reconciledPublisherFailedReason, "Failed to reconcile Publisher: %s", err.Error())
return reconciler.NewEvent(corev1.EventTypeWarning, reconciledPublisherFailedReason, "Failed to reconcile Publisher: %s", err.Error())
}
// Update the topic.
topic.Status.PropagatePublisherStatus(&svc.Status)
return reconciler.NewEvent(corev1.EventTypeNormal, reconciledSuccessReason, `Topic reconciled: "%s/%s"`, topic.Namespace, topic.Name)
}
func (r *Reconciler) reconcileTopic(ctx context.Context, topic *v1.Topic) error {
if topic.Status.ProjectID == "" {
projectID, err := utils.ProjectIDOrDefault(topic.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return err
}
// Set the projectID in the status.
topic.Status.ProjectID = projectID
}
// Auth to GCP is handled by having the GOOGLE_APPLICATION_CREDENTIALS environment variable
// pointing at a credential file.
client, err := r.createClientFn(ctx, topic.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
t := client.Topic(topic.Spec.Topic)
exists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return err
}
if !exists {
if topic.Spec.PropagationPolicy == v1.TopicPolicyNoCreateNoDelete {
logging.FromContext(ctx).Desugar().Error("Topic does not exist and the topic policy doesn't allow creation")
return fmt.Errorf("Topic %q does not exist and the topic policy doesn't allow creation", topic.Spec.Topic)
} else {
topicConfig := &pubsub.TopicConfig{}
if r.dataresidencyStore != nil {
if dataresidencyCfg := r.dataresidencyStore.Load(); dataresidencyCfg != nil {
if dataresidencyCfg.DataResidencyDefaults.ComputeAllowedPersistenceRegions(topicConfig) {
r.Logger.Debugw("Updated Topic Config AllowedPersistenceRegions for topic reconciler", zap.Any("topicConfig", *topicConfig))
}
}
}
// Create a new topic with the given name.
t, err = client.CreateTopicWithConfig(ctx, topic.Spec.Topic, topicConfig)
if err != nil {
// For some reason (maybe some cache invalidation thing), sometimes t.Exists returns that the topic
// doesn't exist but it actually does. When we try to create it again, it fails with an AlreadyExists
// reason. We check for that error here. If it happens, then return nil.
if st, ok := gstatus.FromError(err); !ok {
logging.FromContext(ctx).Desugar().Error("Failed from Pub/Sub client while creating topic", zap.Error(err))
return err
} else if st.Code() != codes.AlreadyExists {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub topic", zap.Error(err))
return err
}
return nil
}
}
}
return nil
}
// deleteTopic looks at the status.TopicID and if non-empty,
// hence indicating that we have created a topic successfully,
// remove it.
func (r *Reconciler) deleteTopic(ctx context.Context, topic *v1.Topic) error {
if topic.Status.TopicID == "" {
return nil
}
// At this point the project ID should have been populated in the status.
// Querying Pub/Sub as the topic could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.createClientFn(ctx, topic.Status.ProjectID)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create Pub/Sub client", zap.Error(err))
return err
}
defer client.Close()
t := client.Topic(topic.Status.TopicID)
exists, err := t.Exists(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to verify Pub/Sub topic exists", zap.Error(err))
return err
}
if exists {
// Delete the topic.
if err := t.Delete(ctx); err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to delete Pub/Sub topic", zap.Error(err))
return err
}
}
return nil
}
func (r *Reconciler) reconcilePublisher(ctx context.Context, topic *v1.Topic) (error, *servingv1.Service) {
name := resources.GeneratePublisherName(topic)
existing, err := r.serviceLister.Services(topic.Namespace).Get(name)
if err != nil {
if !apierrors.IsNotFound(err) {
logging.FromContext(ctx).Desugar().Error("Unable to get an existing publisher", zap.Error(err))
return err, nil
}
existing = nil
} else if !metav1.IsControlledBy(existing, topic) {
p, _ := json.Marshal(existing)
logging.FromContext(ctx).Desugar().Error("Topic does not own publisher service", zap.Any("publisher", p))
return fmt.Errorf("Topic %q does not own publisher service: %q", topic.Name, name), nil
}
tracingCfg, err := tracing.ConfigToJSON(r.tracingConfig)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Error serializing tracing config", zap.Error(err))
}
desired := resources.MakePublisher(&resources.PublisherArgs{
Image: r.publisherImage,
Topic: topic,
Labels: resources.GetLabels(controllerAgentName, topic.Name),
TracingConfig: tracingCfg,
})
svc := existing
if existing == nil {
svc, err = r.ServingClientSet.ServingV1().Services(topic.Namespace).Create(ctx, desired, metav1.CreateOptions{})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create publisher", zap.Error(err))
return err, nil
}
} else if !equality.Semantic.DeepEqual(&existing.Spec, &desired.Spec) {
existing.Spec = desired.Spec
svc, err = r.ServingClientSet.ServingV1().Services(topic.Namespace).Update(ctx, existing, metav1.UpdateOptions{})
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to update publisher", zap.Any("publisher", existing), zap.Error(err))
return err, nil
}
}
return nil, svc
}
func (r *Reconciler) UpdateFromTracingConfigMap(cfg *corev1.ConfigMap) {
if cfg == nil {
r.Logger.Error("Tracing ConfigMap is nil")
return
}
delete(cfg.Data, "_example")
tracingCfg, err := tracingconfig.NewTracingConfigFromConfigMap(cfg)
if err != nil {
r.Logger.Warnw("failed to create tracing config from configmap", zap.String("cfg.Name", cfg.Name))
return
}
r.tracingConfig = tracingCfg
r.Logger.Debugw("Updated Tracing config", zap.Any("tracingCfg", r.tracingConfig))
// TODO: requeue all Topics. See https://github.com/google/knative-gcp/issues/457.
}
func (r *Reconciler) FinalizeKind(ctx context.Context, topic *v1.Topic) reconciler.Event {
// If topic doesn't have ownerReference, and
// k8s ServiceAccount exists, binds to the default GCP ServiceAccount, and it only has one ownerReference,
// remove the corresponding GCP ServiceAccount iam policy binding.
// No need to delete k8s ServiceAccount, it will be automatically handled by k8s Garbage Collection.
if (topic.OwnerReferences == nil || len(topic.OwnerReferences) == 0) && topic.Spec.ServiceAccountName != "" {
if err := r.Identity.DeleteWorkloadIdentity(ctx, topic.Spec.Project, topic); err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, deleteWorkloadIdentityFailed, "Failed to delete delete Pub/Sub topic workload identity: %s", err.Error())
}
}
if topic.Spec.PropagationPolicy == v1.TopicPolicyCreateDelete {
logging.FromContext(ctx).Desugar().Debug("Deleting Pub/Sub topic")
if err := r.deleteTopic(ctx, topic); err != nil {
return reconciler.NewEvent(corev1.EventTypeWarning, deleteTopicFailed, "Failed to delete Pub/Sub topic: %s", err.Error())
}
}
return nil
}
| 1 | 18,627 | I am wondering if we can either make this a feature or have a way to turn it off in production. My concern is that we might be adding more of these kind of code in the future. | google-knative-gcp | go |
@@ -960,11 +960,8 @@ class DataFrame(_Frame, Generic[T]):
# sum 12.0 NaN
#
# Aggregated output is usually pretty much small. So it is fine to directly use pandas API.
- pdf = kdf.to_pandas().transpose().reset_index()
- pdf = pdf.groupby(['level_1']).apply(
- lambda gpdf: gpdf.drop('level_1', 1).set_index('level_0').transpose()
- ).reset_index(level=1)
- pdf = pdf.drop(columns='level_1')
+ pdf = kdf.to_pandas().stack(level=1)
+ pdf.index = pdf.index.droplevel()
pdf.columns.names = [None]
pdf.index.names = [None]
| 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from collections import OrderedDict
from distutils.version import LooseVersion
import re
import warnings
import inspect
import json
from functools import partial, reduce
import sys
from itertools import zip_longest
from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar, Iterable, Dict
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType)
from pyspark.sql.utils import AnalysisException
from pyspark.sql.window import Window
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import validate_arguments_and_invoke_function, align_diff_frames
from databricks.koalas.generic import _Frame
from databricks.koalas.internal import _InternalFrame, IndexMap
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.utils import column_index_level, scol_for
from databricks.koalas.typedef import as_spark_type, as_python_type
from databricks.koalas.plot import KoalasFramePlotMethods
from databricks.koalas.config import get_option
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results. Also reverse version.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide and true divide by constant with reverse version.
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rtruediv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant with reverse version.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
Multiply by constant with reverse version.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Floor Divide by constant with reverse version.
>>> df // 10
angles degrees
circle 0 36
triangle 0 18
rectangle 0 36
>>> df.floordiv(10)
angles degrees
circle 0 36
triangle 0 18
rectangle 0 36
>>> df.rfloordiv(10)
angles degrees
circle NaN 0
triangle 3.0 0
rectangle 2.0 0
Mod by constant with reverse version.
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.mod(2)
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.rmod(2)
angles degrees
circle NaN 2
triangle 2.0 2
rectangle 2.0 2
Power by constant with reverse version.
>>> df ** 2
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.rpow(2)
angles degrees
circle 1.0 2.348543e+108
triangle 8.0 1.532496e+54
rectangle 16.0 2.348543e+108
"""
T = TypeVar('T')
if (3, 5) <= sys.version_info < (3, 7):
from typing import GenericMeta
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, Tuple[params])
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(_Frame, Generic[T]):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: _InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame, Spark DataFrame \
or Koalas Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, a Spark DataFrame, and a Koalas Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, _InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(data)
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(_InternalFrame(data))
elif isinstance(data, ks.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_dataframe()
super(DataFrame, self).__init__(data._internal)
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf))
@property
def _sdf(self) -> spark.DataFrame:
return self._internal.sdf
def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : boolean, default False
If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
from databricks.koalas import Series
if axis in ('index', 0, None):
exprs = []
num_args = len(signature(sfun).parameters)
for idx in self._internal.column_index:
col_sdf = self._internal.scol_for(idx)
col_type = self._internal.spark_type_for(idx)
is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType))
min_or_max = sfun.__name__ in ('min', 'max')
keep_column = not numeric_only or is_numeric_or_boolean or min_or_max
if keep_column:
if isinstance(col_type, BooleanType) and not min_or_max:
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(str(idx) if len(idx) > 1 else idx[0]))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
if self._internal.column_index_level > 1:
pdf.columns = pd.MultiIndex.from_tuples(self._internal.column_index)
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
# TODO: return Koalas series.
return row # Return first row as a Series
elif axis in ('columns', 1):
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type))
def calculate_columns_axis(*cols):
return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only)
df = self._sdf.select(calculate_columns_axis(*self._internal.data_scols).alias("0"))
return DataFrame(df)["0"]
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
# Arithmetic Operators
def _map_series_op(self, op, other):
if not isinstance(other, DataFrame) and is_sequence(other):
raise ValueError(
"%s with a sequence is currently not supported; "
"however, got %s." % (op, type(other)))
if isinstance(other, DataFrame) and self is not other:
if self._internal.column_index_level != other._internal.column_index_level:
raise ValueError('cannot join with no overlapping index names')
# Different DataFrames
def apply_op(kdf, this_column_index, that_column_index):
for this_idx, that_idx in zip(this_column_index, that_column_index):
yield (getattr(kdf[this_idx], op)(kdf[that_idx]), this_idx)
return align_diff_frames(apply_op, self, other, fillna=True, how="full")
else:
# DataFrame and Series
applied = []
for idx in self._internal.column_index:
applied.append(getattr(self[idx], op)(other))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0]
for c in applied],
column_index=[c._internal.column_index[0]
for c in applied])
return DataFrame(internal)
def __add__(self, other):
return self._map_series_op("add", other)
def __radd__(self, other):
return self._map_series_op("radd", other)
def __div__(self, other):
return self._map_series_op("div", other)
def __rdiv__(self, other):
return self._map_series_op("rdiv", other)
def __truediv__(self, other):
return self._map_series_op("truediv", other)
def __rtruediv__(self, other):
return self._map_series_op("rtruediv", other)
def __mul__(self, other):
return self._map_series_op("mul", other)
def __rmul__(self, other):
return self._map_series_op("rmul", other)
def __sub__(self, other):
return self._map_series_op("sub", other)
def __rsub__(self, other):
return self._map_series_op("rsub", other)
def __pow__(self, other):
return self._map_series_op("pow", other)
def __rpow__(self, other):
return self._map_series_op("rpow", other)
def __mod__(self, other):
return self._map_series_op("mod", other)
def __rmod__(self, other):
return self._map_series_op("rmod", other)
def __floordiv__(self, other):
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other):
return self._map_series_op("rfloordiv", other)
def add(self, other):
return self + other
# create accessor for plot
plot = CachedAccessor("plot", KoalasFramePlotMethods)
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = KoalasFramePlotMethods.hist.__doc__
add.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name='+',
equiv='dataframe + other',
reverse='radd')
def radd(self, other):
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name="+",
equiv="other + dataframe",
reverse='add')
def div(self, other):
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rdiv')
divide = div
def rdiv(self, other):
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='div')
def truediv(self, other):
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rtruediv')
def rtruediv(self, other):
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='truediv')
def mul(self, other):
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="dataframe * other",
reverse='rmul')
multiply = mul
def rmul(self, other):
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="other * dataframe",
reverse='mul')
def sub(self, other):
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="dataframe - other",
reverse='rsub')
subtract = sub
def rsub(self, other):
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="other - dataframe",
reverse='sub')
def mod(self, other):
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='dataframe % other',
reverse='rmod')
def rmod(self, other):
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='other % dataframe',
reverse='mod')
def pow(self, other):
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power of series',
op_name='**',
equiv='dataframe ** other',
reverse='rpow')
def rpow(self, other):
return other ** self
rpow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power',
op_name='**',
equiv='other ** dataframe',
reverse='pow')
def floordiv(self, other):
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='dataframe // other',
reverse='rfloordiv')
def rfloordiv(self, other):
return other // self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='other // dataframe',
reverse='floordiv')
# Comparison Operators
def __eq__(self, other):
return self._map_series_op("eq", other)
def __ne__(self, other):
return self._map_series_op("ne", other)
def __lt__(self, other):
return self._map_series_op("lt", other)
def __le__(self, other):
return self._map_series_op("le", other)
def __ge__(self, other):
return self._map_series_op("ge", other)
def __gt__(self, other):
return self._map_series_op("gt", other)
def eq(self, other):
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False None
c False True
d False None
"""
return self == other
equals = eq
def gt(self, other):
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False None
c True False
d True None
"""
return self > other
def ge(self, other):
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True None
c True True
d True None
"""
return self >= other
def lt(self, other):
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False None
c False False
d False None
"""
return self < other
def le(self, other):
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True None
c False True
d False None
"""
return self <= other
def ne(self, other):
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True None
c True False
d True None
"""
return self != other
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: unlike pandas, it is required for `func` to specify return type hint.
See https://docs.python.org/3/library/typing.html. For instance, as below:
>>> def function() -> int:
... return 1
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].apply(func))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: Series support is not implemented yet.
# TODO: not all arguments are implemented comparing to Pandas' for now.
def aggregate(self, func: Union[List[str], Dict[str, List[str]]]):
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : dict or a list
a dict mapping from column name (string) to
aggregate functions (list of strings).
If a list is given, the aggregation is performed against
all columns.
Returns
-------
DataFrame
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1.0 2.0 3.0
1 4.0 5.0 6.0
2 7.0 8.0 9.0
3 NaN NaN NaN
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])[['A', 'B', 'C']]
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']]
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
"""
from databricks.koalas.groupby import GroupBy
if isinstance(func, list):
if all((isinstance(f, str) for f in func)):
func = dict([
(column, func) for column in self.columns])
else:
raise ValueError("If the given function is a list, it "
"should only contains function names as strings.")
if not isinstance(func, dict) or \
not all(isinstance(key, str) and
(isinstance(value, str) or
isinstance(value, list) and all(isinstance(v, str) for v in value))
for key, value in func.items()):
raise ValueError("aggs must be a dict mapping from column name (string) to aggregate "
"functions (list of strings).")
kdf = DataFrame(GroupBy._spark_groupby(self, func, ())) # type: DataFrame
# The codes below basically converts:
#
# A B
# sum min min max
# 0 12.0 1.0 2.0 8.0
#
# to:
# A B
# max NaN 8.0
# min 1.0 2.0
# sum 12.0 NaN
#
# Aggregated output is usually pretty much small. So it is fine to directly use pandas API.
pdf = kdf.to_pandas().transpose().reset_index()
pdf = pdf.groupby(['level_1']).apply(
lambda gpdf: gpdf.drop('level_1', 1).set_index('level_0').transpose()
).reset_index(level=1)
pdf = pdf.drop(columns='level_1')
pdf.columns.names = [None]
pdf.index.names = [None]
return DataFrame(pdf[list(func.keys())])
agg = aggregate
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self) -> Iterable:
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def items(self) -> Iterable:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from databricks.koalas.config import get_option, set_option
>>> set_option('compute.max_rows', 1000)
>>> ks.DataFrame({'a': range(1001)}).transpose() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
pdf = self.head(max_compute_count + 1)._to_internal_pandas()
if len(pdf) > max_compute_count:
raise ValueError(
"Current DataFrame has more then the given limit {0} rows. "
"Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' "
"to retrieve to retrieve more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive."
.format(max_compute_count))
return DataFrame(pdf.transpose())
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +------+------+------+------+------+
# |index1|index2|(a,x1)|(a,x2)|(b,x3)|
# +------+------+------+------+------+
# | y1| z1| 1| 0| 0|
# | y2| z2| 0| 50| 0|
# | y3| z3| 3| 2| 1|
# +------+------+------+------+------+
#
# Output of `exploded_df` becomes as below:
#
# +-----------------+-----------------+-----------------+-----+
# | index|__index_level_0__|__index_level_1__|value|
# +-----------------+-----------------+-----------------+-----+
# |{"a":["y1","z1"]}| a| x1| 1|
# |{"a":["y1","z1"]}| a| x2| 0|
# |{"a":["y1","z1"]}| b| x3| 0|
# |{"a":["y2","z2"]}| a| x1| 0|
# |{"a":["y2","z2"]}| a| x2| 50|
# |{"a":["y2","z2"]}| b| x3| 0|
# |{"a":["y3","z3"]}| a| x1| 3|
# |{"a":["y3","z3"]}| a| x2| 2|
# |{"a":["y3","z3"]}| b| x3| 1|
# +-----------------+-----------------+-----------------+-----+
internal_index_column = "__index_level_{}__".format
pairs = F.explode(F.array(*[
F.struct(
[F.lit(col).alias(internal_index_column(i)) for i, col in enumerate(idx)] +
[self[idx]._scol.alias("value")]
) for idx in self._internal.column_index]))
exploded_df = self._sdf.withColumn("pairs", pairs).select(
[F.to_json(F.struct(F.array([scol.cast('string')
for scol in self._internal.index_scols])
.alias('a'))).alias('index'),
F.col("pairs.*")])
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
internal_index_columns = [internal_index_column(i)
for i in range(self._internal.column_index_level)]
pivoted_df = exploded_df.groupBy(internal_index_columns).pivot('index')
transposed_df = pivoted_df.agg(F.first(F.col("value")))
new_data_columns = list(filter(lambda x: x not in internal_index_columns,
transposed_df.columns))
internal = self._internal.copy(
sdf=transposed_df,
data_columns=new_data_columns,
index_map=[(col, None) for col in internal_index_columns],
column_index=[tuple(json.loads(col)['a']) for col in new_data_columns],
column_index_names=None)
return DataFrame(internal)
T = property(transpose)
def transform(self, func):
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
.. note:: the series within ``func`` is actually a pandas series, and
the length of each series is not guaranteed.
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
Examples
--------
>>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
You can omit the type hint and let Koalas infer its type.
>>> df.transform(lambda x: x ** 2)
A B
0 0 1
1 1 4
2 4 9
For multi-index columns:
>>> df.columns = [('X', 'A'), ('X', 'B')]
>>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
>>> df.transform(lambda x: x ** 2) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
"""
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
transformed = pdf.transform(func)
kdf = DataFrame(transformed)
if len(pdf) <= limit:
return kdf
applied = []
for input_idx, output_idx in zip(
self._internal.column_index, kdf._internal.column_index):
wrapped = ks.pandas_wraps(
func,
return_col=as_python_type(kdf[output_idx].spark_type))
applied.append(wrapped(self[input_idx]).rename(input_idx))
else:
wrapped = ks.pandas_wraps(func)
applied = []
for idx in self._internal.column_index:
applied.append(wrapped(self[idx]).rename(idx))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
Also support for MultiIndex
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df
a b
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('a')
name class
0 falcon bird
1 parrot bird
2 lion mammal
3 monkey mammal
>>> df
b
max_speed
0 389.0
1 24.0
2 80.5
3 NaN
"""
result = self[item]
self._internal = self.drop(item)._internal
return result
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._internal.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._internal.column_index) == 0 or self._sdf.rdd.isEmpty()
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
.. note:: currently it collects top 1000 rows and return its
pandas `pandas.io.formats.style.Styler` instance.
Examples
--------
>>> ks.range(1001).style # doctest: +ELLIPSIS
<pandas.io.formats.style.Styler object at ...>
"""
max_results = get_option('compute.max_rows')
pdf = self.head(max_results + 1).to_pandas()
if len(pdf) > max_results:
warnings.warn(
"'style' property will only use top %s rows." % max_results, UserWarning)
return pdf.head(max_results).style
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, (str, tuple)):
keys = [keys]
else:
keys = list(keys)
columns = set(self.columns)
for key in keys:
if key not in columns:
raise KeyError(key)
keys = [key if isinstance(key, tuple) else (key,) for key in keys]
if drop:
column_index = [idx for idx in self._internal.column_index if idx not in keys]
else:
column_index = self._internal.column_index
if append:
index_map = self._internal.index_map + [(self._internal.column_name_for(idx), idx)
for idx in keys]
else:
index_map = [(self._internal.column_name_for(idx), idx) for idx in keys]
internal = self._internal.copy(index_map=index_map,
column_index=column_index,
data_columns=[self._internal.column_name_for(idx)
for idx in column_index])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ks.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
multi_index = len(self._internal.index_map) > 1
def rename(index):
if multi_index:
return ('level_{}'.format(index),)
else:
if ('index',) not in self._internal.column_index:
return ('index',)
else:
return ('level_{}'.format(index),)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._internal.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._internal.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._internal.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._internal.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._internal.index_map.copy()
for i in idx:
info = self._internal.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(index_name)))
index_map.remove(info)
new_data_scols = [
self._internal.scol_for(column).alias(str(name)) for column, name in new_index_map]
if len(index_map) > 0:
index_scols = [scol_for(self._sdf, column) for column, _ in index_map]
sdf = self._sdf.select(
index_scols + new_data_scols + self._internal.data_scols)
else:
sdf = self._sdf.select(new_data_scols + self._internal.data_scols)
# Now, new internal Spark columns are named as same as index name.
new_index_map = [(column, name) for column, name in new_index_map]
index_map = [('__index_level_0__', None)]
sdf = _InternalFrame.attach_default_index(sdf)
if drop:
new_index_map = []
internal = self._internal.copy(
sdf=sdf,
data_columns=[str(name) for _, name in new_index_map] + self._internal.data_columns,
index_map=index_map,
column_index=None)
if self._internal.column_index_level > 1:
column_depth = len(self._internal.column_index[0])
if col_level >= column_depth:
raise IndexError('Too many levels: Index has only {} levels, not {}'
.format(column_depth, col_level + 1))
if any(col_level + len(name) > column_depth for _, name in new_index_map):
raise ValueError('Item must have length equal to number of levels.')
columns = pd.MultiIndex.from_tuples(
[tuple(([col_fill] * col_level)
+ list(name)
+ ([col_fill] * (column_depth - (len(name) + col_level))))
for _, name in new_index_map]
+ self._internal.column_index)
else:
columns = [name for _, name in new_index_map] + self._internal.column_index
if inplace:
self._internal = internal
self.columns = columns
else:
kdf = DataFrame(internal)
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None):
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].shift(periods, fill_value))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: axis should support 1 or 'columns' either at this moment
def diff(self, periods: int = 1, axis: Union[int, str] = 0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].diff(periods))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: axis should support 1 or 'columns' either at this moment
def nunique(self, axis: Union[int, str] = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
res = self._sdf.select([self[column]._nunique(dropna, approx, rsd)
for column in self.columns])
return res.toPandas().T.iloc[:, 0]
def round(self, decimals=0):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ks.Series):
decimals_list = [(k if isinstance(k, tuple) else (k,), v)
for k, v in decimals._to_internal_pandas().items()]
elif isinstance(decimals, dict):
decimals_list = [(k if isinstance(k, tuple) else (k,), v)
for k, v in decimals.items()]
elif isinstance(decimals, int):
decimals_list = [(k, decimals) for k in self._internal.column_index]
else:
raise ValueError("decimals must be an integer, a dict-like or a Series")
sdf = self._sdf
for idx, decimal in decimals_list:
if idx in self._internal.column_index:
col = self._internal.column_name_for(idx)
sdf = sdf.withColumn(col, F.round(scol_for(sdf, col), decimal))
return DataFrame(self._internal.copy(sdf=sdf))
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},
... columns = ['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 1 1 1
2 1 1 1
3 3 4 5
>>> df.duplicated().sort_index()
0 False
1 True
2 True
3 False
Name: 0, dtype: bool
Mark duplicates as ``True`` except for the last occurrence.
>>> df.duplicated(keep='last').sort_index()
0 True
1 True
2 False
3 False
Name: 0, dtype: bool
Mark all duplicates as ``True``.
>>> df.duplicated(keep=False).sort_index()
0 True
1 True
2 True
3 False
Name: 0, dtype: bool
"""
from databricks.koalas.series import _col
if len(self._internal.index_names) > 1:
raise ValueError("Now we don't support multi-index Now.")
if subset is None:
group_cols = self._internal.data_columns
else:
group_cols = subset
diff = set(subset).difference(set(self._internal.data_columns))
if len(diff) > 0:
raise KeyError(', '.join(diff))
sdf = self._sdf
index = self._internal.index_columns[0]
if self._internal.index_names[0] is not None:
name = self._internal.index_names[0]
else:
name = '0'
if keep == 'first' or keep == 'last':
if keep == 'first':
ord_func = spark.functions.asc
else:
ord_func = spark.functions.desc
window = Window.partitionBy(group_cols).orderBy(ord_func(index)).rowsBetween(
Window.unboundedPreceding, Window.currentRow)
sdf = sdf.withColumn(name, F.row_number().over(window) > 1)
elif not keep:
window = Window.partitionBy(group_cols).orderBy(F.col(index).desc())\
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
sdf = sdf.withColumn(name, F.count(F.col(index)).over(window) > 1)
else:
raise ValueError("'keep' only support 'first', 'last' and False")
return _col(DataFrame(_InternalFrame(sdf=sdf.select(index, name), data_columns=[name],
index_map=self._internal.index_map)))
def to_koalas(self):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
col1 col2
0 1 3
1 2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
return DataFrame(self)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
return _CachedDataFrame(self._internal)
def to_table(self, name: str, format: Optional[str] = None, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
**options):
"""
Write the DataFrame into a Spark table.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the table exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
self.to_spark().write.saveAsTable(name=name, format=format, mode=mode,
partitionBy=partition_cols, options=options)
def to_delta(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None, **options):
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2019-01-01"')
"""
self.to_spark_io(
path=path, mode=mode, format="delta", partition_cols=partition_cols, options=options)
def to_parquet(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
compression: Optional[str] = None):
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
self.to_spark().write.parquet(
path=path, mode=mode, partitionBy=partition_cols, compression=compression)
def to_spark_io(self, path: Optional[str] = None, format: Optional[str] = None,
mode: str = 'error', partition_cols: Union[str, List[str], None] = None,
**options):
"""Write the DataFrame out to a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
self.to_spark().write.save(
path=path, format=format, mode=mode, partitionBy=partition_cols, options=options)
def to_spark(self):
"""
Return the current DataFrame as a Spark DataFrame.
.. note:: Index information is lost. So, if the index columns are not present in
actual columns, they are lost.
See Also
--------
DataFrame.to_koalas
"""
return self._internal.spark_df
def to_pandas(self):
"""
Return a pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.pandas_df.copy()
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = list(kwargs.items())
sdf = self._sdf
for (name, c) in pairs:
if isinstance(c, Series):
sdf = sdf.withColumn(name, c._scol)
elif isinstance(c, Column):
sdf = sdf.withColumn(name, c)
else:
sdf = sdf.withColumn(name, F.lit(c))
data_columns = set(self._internal.data_columns)
adding_columns = [name for name, _ in pairs if name not in data_columns]
level = self._internal.column_index_level
adding_column_index = [tuple([col, *([''] * (level - 1))]) for col in adding_columns]
internal = self._internal.copy(
sdf=sdf,
data_columns=(self._internal.data_columns + adding_columns),
column_index=(self._internal.column_index + adding_column_index))
return DataFrame(internal)
@staticmethod
def from_records(data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None, exclude: list = None,
columns: list = None, coerce_float: bool = False, nrows: int = None) \
-> 'DataFrame':
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float,
nrows))
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
"""
return DataFrame(self._internal.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
columns = [subset]
else:
columns = list(subset)
invalids = [column for column in columns
if column not in self._internal.data_columns]
if len(invalids) > 0:
raise KeyError(invalids)
else:
columns = list(self.columns)
cnt = reduce(lambda x, y: x + y,
[F.when(self[column].notna()._scol, 1).otherwise(0)
for column in columns],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(columns))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
# TODO: add 'limit' when value parameter exists
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
sdf = self._sdf
if value is not None:
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if (value is None) and (method is None):
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
value = {self._internal.column_name_for(key): value for key, value in value.items()}
if limit is not None:
raise ValueError('limit parameter for value is not support now')
sdf = sdf.fillna(value)
internal = self._internal.copy(sdf=sdf)
else:
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].fillna(value=value, method=method, axis=axis,
inplace=False, limit=limit))
sdf = self._sdf.select(self._internal.index_scols + [col._scol for col in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[col._internal.data_columns[0]
for col in applied],
column_index=[col._internal.column_index[0]
for col in applied])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
# TODO: add 'downcast' when value parameter exists
def bfill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> df.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit)
# TODO: add 'downcast' when value parameter exists
def ffill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> df.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit)
def replace(self, to_replace=None, value=None, subset=None, inplace=False,
limit=None, regex=False, method='pad'):
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, or list
Value to be replaced. If the value is a dict, then value is ignored and
to_replace must be a mapping from column name (string) to replacement value.
The value to be replaced must be an int, float, or string.
value : int, float, string, or list
Value to use to replace holes. The replacement value must be an int, float,
or string. If value is a list, value should be of the same length with to_replace.
subset : string, list
Optional list of column names to consider. Columns specified in subset that
do not have matching data type are ignored. For example, if value is a string,
and subset contains a non-string column, then the non-string column is simply ignored.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Replacing value by specifying column
>>> df.replace('Mjolnir', 'Stormbuster', subset='weapon')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict like `to_replace`
>>> df = ks.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']},
... columns=['A', 'B', 'C'])
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
Notes
-----
One difference between this implementation and pandas is that it is necessary
to specify the column name when you are passing dictionary in `to_replace`
parameter. Calling `replace` on its index such as `df.replace({0: 10, 1: 100})` will
throw an error. Instead specify column-name like `df.replace({'A': {0: 10, 1: 100}})`.
"""
if method != 'pad':
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
if value is not None and not isinstance(value, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(value)))
if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(to_replace)))
if isinstance(value, list) and isinstance(to_replace, list):
if len(value) != len(to_replace):
raise ValueError('Length of to_replace and value must be same')
sdf = self._sdf.select(self._internal.data_columns)
if isinstance(to_replace, dict) and value is None and \
(not any(isinstance(i, dict) for i in to_replace.values())):
sdf = sdf.replace(to_replace, value, subset)
elif isinstance(to_replace, dict):
for df_column, replacement in to_replace.items():
if isinstance(replacement, dict):
sdf = sdf.replace(replacement, subset=df_column)
else:
sdf = sdf.withColumn(df_column,
F.when(scol_for(sdf, df_column) == replacement, value)
.otherwise(scol_for(sdf, df_column)))
else:
sdf = sdf.replace(to_replace, value, subset)
kdf = DataFrame(sdf)
if inplace:
self._internal = kdf._internal
else:
return kdf
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [(c, self._internal.scol_for(c)) for c in self.columns
if isinstance(self._internal.spark_type_for(c), numeric_types)]
if lower is not None:
numeric_columns = [(c, F.when(scol < lower, lower).otherwise(scol).alias(c))
for c, scol in numeric_columns]
if upper is not None:
numeric_columns = [(c, F.when(scol > upper, upper).otherwise(scol).alias(c))
for c, scol in numeric_columns]
nonnumeric_columns = [self._internal.scol_for(c) for c in self.columns
if not isinstance(self._internal.spark_type_for(c), numeric_types)]
sdf = self._sdf.select([scol for _, scol in numeric_columns] + nonnumeric_columns)
return ks.DataFrame(sdf)[list(self.columns)]
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._internal.copy(sdf=self._sdf.limit(n)))
# TODO: support multi-index columns
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list less than three or a string.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the resulting pivot table will have
columns concatenated by "_" where the first part is the value
of columns and the second part is the column name in values
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table # doctest: +NORMALIZE_WHITESPACE
C large small
A B
foo one 4.0 1
two NaN 6
bar two 7.0 6
one 4.0 5
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table # doctest: +NORMALIZE_WHITESPACE
C large small
A B
foo one 4 1
two 0 6
bar two 7 6
one 4 5
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values = ['D'], index =['C'],
... columns="A", aggfunc={'D':'mean'})
>>> table # doctest: +NORMALIZE_WHITESPACE
A bar foo
C
small 5.5 2.333333
large 5.5 2.000000
The next example aggregates on multiple values.
>>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'],
... aggfunc={'D': 'mean', 'E': 'sum'})
>>> table # doctest: +NORMALIZE_WHITESPACE
D E
A bar foo bar foo
C
small 5.5 2.333333 17 13
large 5.5 2.000000 15 9
"""
if not isinstance(columns, str):
raise ValueError("columns should be string.")
if not isinstance(values, str) and not isinstance(values, list):
raise ValueError('values should be string or list of one column.')
if not isinstance(aggfunc, str) and (not isinstance(aggfunc, dict) or not all(
isinstance(key, str) and isinstance(value, str) for key, value in aggfunc.items())):
raise ValueError("aggfunc must be a dict mapping from column name (string) "
"to aggregate functions (string).")
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError("pivot_table doesn't support aggfunc"
" as dict and without index.")
if isinstance(values, list) and index is None:
raise NotImplementedError("values can't be a list without index.")
if isinstance(values, list) and len(values) > 2:
raise NotImplementedError("values more than two is not supported yet!")
if columns not in self.columns.values:
raise ValueError("Wrong columns {}.".format(columns))
if isinstance(values, list):
if not all(isinstance(self._internal.spark_type_for(col), NumericType)
for col in values):
raise TypeError('values should be a numeric type.')
elif not isinstance(self._internal.spark_type_for(values), NumericType):
raise TypeError('values should be a numeric type.')
if isinstance(aggfunc, str):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(values, aggfunc))]
elif isinstance(aggfunc, dict):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'.format(key, value))
for key, value in aggfunc.items()]
agg_columns = [key for key, value in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
if index is None:
sdf = self._sdf.groupBy().pivot(pivot_col=columns).agg(*agg_cols)
elif isinstance(index, list):
sdf = self._sdf.groupBy(index).pivot(pivot_col=columns).agg(*agg_cols)
else:
raise ValueError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
if isinstance(values, list):
data_columns = [column for column in sdf.columns if column not in index]
if len(values) == 2:
# If we have two values, Spark will return column's name
# in this format: column_values, where column contains
# their values in the DataFrame and values is
# the column list passed to the pivot_table().
# E.g. if column is b and values is ['b','e'],
# then ['2_b', '2_e', '3_b', '3_e'].
# We sort the columns of Spark DataFrame by values.
data_columns.sort(key=lambda x: x.split('_', 1)[1])
sdf = sdf.select(index + data_columns)
index_map = [(column, (column,)) for column in index]
internal = _InternalFrame(sdf=sdf, data_columns=data_columns,
index_map=index_map)
kdf = DataFrame(internal)
# We build the MultiIndex from the list of columns returned by Spark.
tuples = [(name.split('_')[1], self.dtypes[columns].type(name.split('_')[0]))
for name in kdf._internal.data_columns]
kdf.columns = pd.MultiIndex.from_tuples(tuples, names=[None, columns])
else:
index_map = [(column, (column,)) for column in index]
internal = _InternalFrame(sdf=sdf, data_columns=data_columns,
index_map=index_map, column_index_names=[columns])
kdf = DataFrame(internal)
return kdf
else:
data_columns = [column for column in sdf.columns if column not in index]
index_map = [(column, (column,)) for column in index]
internal = _InternalFrame(sdf=sdf, data_columns=data_columns, index_map=index_map,
column_index_names=[columns])
return DataFrame(internal)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
sdf = sdf.withColumn(columns, F.lit(index_values))
data_columns = [column for column in sdf.columns if column not in [columns]]
index_map = [(column, (column,)) for column in [columns]]
internal = _InternalFrame(sdf=sdf, data_columns=data_columns, index_map=index_map,
column_index_names=[columns])
return DataFrame(internal)
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
Koalas' pivot still works with its first value it meets during operation because pivot
is an expensive operation and it is preferred to permissively execute over failing fast
when processing large data.
>>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
index = [index]
else:
index = self._internal.index_columns
df = self.pivot_table(
index=index, columns=columns, values=values, aggfunc='first')
if should_use_existing_index:
return df
else:
index_columns = df._internal.index_columns
# Note that the existing indexing column won't exist in the pivoted DataFrame.
internal = df._internal.copy(
index_map=[(index_column, None) for index_column in index_columns])
return DataFrame(internal)
@property
def columns(self):
"""The column labels of the DataFrame."""
if self._internal.column_index_level > 1:
columns = pd.MultiIndex.from_tuples(self._internal.column_index)
else:
columns = pd.Index([idx[0] for idx in self._internal.column_index])
if self._internal.column_index_names is not None:
columns.names = self._internal.column_index_names
return columns
@columns.setter
def columns(self, columns):
if isinstance(columns, pd.MultiIndex):
column_index = columns.tolist()
old_names = self._internal.column_index
if len(old_names) != len(column_index):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(column_index)))
column_index_names = columns.names
data_columns = [str(idx) if len(idx) > 1 else idx[0] for idx in column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
self._internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
else:
old_names = self._internal.column_index
if len(old_names) != len(columns):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(columns)))
column_index = [col if isinstance(col, tuple) else (col,) for col in columns]
if isinstance(columns, pd.Index):
column_index_names = columns.names
else:
column_index_names = None
data_columns = [str(idx) if len(idx) > 1 else idx[0] for idx in column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
self._internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[col].dtype for col in self._internal.data_columns],
index=self._internal.data_columns)
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
TypeError: string dtypes are not allowed, use 'object' instead
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=set(include).intersection(set(exclude))))
# Handle Spark types
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle Pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
columns = []
column_index = []
for idx in self._internal.column_index:
if len(include) > 0:
should_include = (
infer_dtype_from_object(self[idx].dtype.name) in include_numpy_type or
self._internal.spark_type_for(idx) in include_spark_type)
else:
should_include = not (
infer_dtype_from_object(self[idx].dtype.name) in exclude_numpy_type or
self._internal.spark_type_for(idx) in exclude_spark_type)
if should_include:
columns.append(self._internal.column_name_for(idx))
column_index.append(idx)
return DataFrame(self._internal.copy(
sdf=self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(col) for col in columns]),
data_columns=columns, column_index=column_index))
def count(self, axis=None):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
Name: 0, dtype: int64
"""
return self._reduce_for_stat_function(
_Frame._count_expr, name="count", axis=axis, numeric_only=False)
def drop(self, labels=None, axis=1,
columns: Union[str, Tuple[str, ...], List[str], List[Tuple[str, ...]]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Also support for MultiIndex
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE
b
z w
0 5 7
1 6 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [(columns,)] # type: ignore
elif isinstance(columns, tuple):
columns = [columns]
else:
columns = [col if isinstance(col, tuple) else (col,) # type: ignore
for col in columns]
drop_column_index = set(idx for idx in self._internal.column_index
for col in columns
if idx[:len(col)] == col)
if len(drop_column_index) == 0:
raise KeyError(columns)
cols, idxes = zip(*((column, idx)
for column, idx
in zip(self._internal.data_columns, self._internal.column_index)
if idx not in drop_column_index))
internal = self._internal.copy(
sdf=self._sdf.select(
self._internal.index_scols + [self._internal.scol_for(idx) for idx in idxes]),
data_columns=list(cols),
column_index=list(idxes))
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def _sort(self, by: List[Column], ascending: Union[bool, List[bool]],
inplace: bool, na_position: str):
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]
kdf = DataFrame(self._internal.copy(sdf=self._sdf.sort(*by))) # type: ks.DataFrame
if inplace:
self._internal = kdf._internal
return None
else:
return kdf
def sort_values(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
by = [self[colname]._scol for colname in by]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
def sort_index(self, axis: int = 0,
level: Optional[Union[int, List[int]]] = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_scols
elif is_list_like(level):
by = [self._internal.index_scols[l] for l in level] # type: ignore
else:
by = [self._internal.index_scols[level]]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._internal.index_columns.copy()
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self._internal.scol_for(col)
.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self._internal.scol_for(col).isin(list(values)).alias(col)
for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._internal.copy(sdf=self._sdf.select(_select_columns)))
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
# TODO: support multi-index columns
def merge(self, right: 'DataFrame', how: str = 'inner',
on: Optional[Union[str, List[str]]] = None,
left_on: Optional[Union[str, List[str]]] = None,
right_on: Optional[Union[str, List[str]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
5 baz 3 baz 7
1 foo 1 foo 5
2 foo 1 foo 8
3 foo 5 foo 5
4 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda o: o if o is None or is_list_like(o) else [o]
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = self._internal.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._internal.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = common
right_keys = common
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = self._internal.spark_internal_df.alias('left_table')
right_table = right._internal.spark_internal_df.alias('right_table')
left_key_columns = [scol_for(left_table, col) for col in left_keys] # type: ignore
right_key_columns = [scol_for(right_table, col) for col in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(self._internal.data_columns)
& set(right._internal.data_columns))
left_index_columns = set(self._internal.index_columns)
right_index_columns = set(right._internal.index_columns)
exprs = []
for col in left_table.columns:
if col in left_index_columns:
continue
scol = scol_for(left_table, col)
if col in duplicate_columns:
if col in left_keys and col in right_keys:
right_scol = scol_for(right_table, col)
if how == 'right':
scol = right_scol
elif how == 'full':
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
exprs.append(scol)
for col in right_table.columns:
if col in right_index_columns:
continue
scol = scol_for(right_table, col)
if col in duplicate_columns:
if col in left_keys and col in right_keys:
continue
else:
col = col + right_suffix
scol = scol.alias(col)
exprs.append(scol)
# Retain indices if they are used for joining
if left_index:
if right_index:
exprs.extend(['left_table.`{}`'.format(col) for col in left_index_columns])
exprs.extend(['right_table.`{}`'.format(col) for col in right_index_columns])
index_map = self._internal.index_map + [idx for idx in right._internal.index_map
if idx not in self._internal.index_map]
else:
exprs.extend(['right_table.`{}`'.format(col) for col in right_index_columns])
index_map = right._internal.index_map
elif right_index:
exprs.extend(['left_table.`{}`'.format(col) for col in left_index_columns])
index_map = self._internal.index_map
else:
index_map = []
selected_columns = joined_table.select(*exprs)
# Merge left and right indices after the join by replacing missing values in the left index
# with values from the right index and dropping
if (how == 'right' or how == 'full') and right_index:
for left_index_col, right_index_col in zip(self._internal.index_columns,
right._internal.index_columns):
selected_columns = selected_columns.withColumn(
'left_table.' + left_index_col,
F.when(F.col('left_table.`{}`'.format(left_index_col)).isNotNull(),
F.col('left_table.`{}`'.format(left_index_col)))
.otherwise(F.col('right_table.`{}`'.format(right_index_col)))
).withColumnRenamed(
'left_table.' + left_index_col, left_index_col
).drop(F.col('left_table.`{}`'.format(left_index_col)))
if not (left_index and not right_index):
for right_index_col in right_index_columns:
if right_index_col in left_index_columns:
selected_columns = \
selected_columns.drop(F.col('right_table.`{}`'.format(right_index_col)))
if index_map:
data_columns = [c for c in selected_columns.columns
if c not in [idx[0] for idx in index_map]]
internal = _InternalFrame(
sdf=selected_columns, data_columns=data_columns, index_map=index_map)
return DataFrame(internal)
else:
return DataFrame(selected_columns)
def join(self, right: 'DataFrame', on: Optional[Union[str, List[str]]] = None,
how: str = 'left', lsuffix: str = '', rsuffix: str = '') -> 'DataFrame':
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> kdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> kdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right')
>>> join_kdf.sort_values(by=join_kdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key'))
>>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method preserves the
original DataFrame’s index in the result.
>>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key')
>>> join_kdf.sort_index()
key A B
0 K3 A3 None
1 K0 A0 B0
2 K1 A1 B1
3 K2 A2 B2
"""
if isinstance(right, ks.Series):
common = list(self.columns.intersection([right.name]))
else:
common = list(self.columns.intersection(right.columns))
if len(common) > 0 and not lsuffix and not rsuffix:
raise ValueError(
"columns overlap but no suffix specified: "
"{rename}".format(rename=common))
if on:
self = self.set_index(on)
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix)).reset_index()
else:
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix))
return join_kdf
def append(self, other: 'DataFrame', ignore_index: bool = False,
verify_integrity: bool = False, sort: bool = False) -> 'DataFrame':
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise ValueError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_scols
if len(index_scols) != len(other._internal.index_scols):
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (self._sdf.select(index_scols)
.intersect(other._sdf.select(other._internal.index_scols))
.count()) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return concat([self, other], ignore_index=ignore_index)
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: 'DataFrame', join: str = 'left', overwrite: bool = True):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != 'left':
raise NotImplementedError("Only left join is supported")
if isinstance(other, ks.Series):
other = DataFrame(other)
update_columns = list(set(self._internal.data_columns)
.intersection(set(other._internal.data_columns)))
update_sdf = self.join(other[update_columns], rsuffix='_new')._sdf
for column_name in update_columns:
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(update_sdf, column_name + '_new')
if overwrite:
update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col)
.otherwise(new_col))
else:
update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col)
.otherwise(old_col))
internal = self._internal.copy(sdf=update_sdf.select([scol_for(update_sdf, col)
for col in self._internal.columns]))
self._internal = internal
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifying the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(self._internal.copy(sdf=sdf))
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.items():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.items():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._internal.index_scols + list(map(lambda ser: ser._scol, results)))
return DataFrame(self._internal.copy(sdf=sdf))
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
data_columns = [prefix + self._internal.column_name_for(idx)
for idx in self._internal.column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
column_index = [tuple([prefix + i for i in idx]) for idx in self._internal.column_index]
internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index)
return DataFrame(internal)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
data_columns = [self._internal.column_name_for(idx) + suffix
for idx in self._internal.column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
column_index = [tuple([i + suffix for i in idx]) for idx in self._internal.column_index]
internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index)
return DataFrame(internal)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
data_columns = []
for col in self.columns:
kseries = self[col]
spark_type = kseries.spark_type
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name))
data_columns.append(kseries.name)
elif isinstance(spark_type, NumericType):
exprs.append(kseries._scol)
data_columns.append(kseries.name)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
internal = _InternalFrame(sdf=sdf.replace("stddev", "std", subset='summary'),
data_columns=data_columns,
index_map=[('summary', None)])
return DataFrame(internal).astype('float64')
def _cum(self, func, skipna: bool):
# This is used for cummin, cummax, cumxum, etc.
if func == F.min:
func = "cummin"
elif func == F.max:
func = "cummax"
elif func == F.sum:
func = "cumsum"
elif func.__name__ == "cumprod":
func = "cumprod"
applied = []
for column in self.columns:
applied.append(getattr(self[column], func)(skipna))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: implements 'keep' parameters
def drop_duplicates(self, subset=None, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_values(['a', 'b'])
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
"""
if subset is None:
subset = self._internal.data_columns
elif not isinstance(subset, list):
subset = [subset]
sdf = self._sdf.drop_duplicates(subset=subset)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reindex(self, labels: Optional[Any] = None, index: Optional[Any] = None,
columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None,
copy: Optional[bool] = True, fill_value: Optional[Any] = None) -> 'DataFrame':
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ks.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
if axis in ('index', 0, None):
index = labels
elif axis in ('columns', 1):
columns = labels
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
if index is not None and not is_list_like(index):
raise TypeError("Index must be called with a collection of some kind, "
"%s was passed" % type(index))
if columns is not None and not is_list_like(columns):
raise TypeError("Columns must be called with a collection of some kind, "
"%s was passed" % type(columns))
df = self.copy()
if index is not None:
df = DataFrame(df._reindex_index(index))
if columns is not None:
df = DataFrame(df._reindex_columns(columns))
# Process missing values.
if fill_value is not None:
df = df.fillna(fill_value)
# Copy
if copy:
return df.copy()
else:
self._internal = df._internal
return self
def _reindex_index(self, index):
# When axis is index, we can mimic pandas' by a right outer join.
index_column = self._internal.index_columns
assert len(index_column) <= 1, "Index should be single column or not set."
if len(index_column) == 1:
kser = ks.Series(list(index))
index_column = index_column[0]
labels = kser._kdf._sdf.select(kser._scol.alias(index_column))
else:
index_column = None
labels = ks.Series(index).to_frame()._sdf
joined_df = self._sdf.join(labels, on=index_column, how="right")
new_data_columns = filter(lambda x: x not in index_column, joined_df.columns)
if index_column is not None:
index_map = [(index_column, None)] # type: List[IndexMap]
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns),
index_map=index_map)
else:
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns))
return internal
def _reindex_columns(self, columns):
level = self._internal.column_index_level
if level > 1:
label_columns = list(columns)
for col in label_columns:
if not isinstance(col, tuple):
raise TypeError('Expected tuple, got {}'.format(type(col)))
else:
label_columns = [(col,) for col in columns]
for col in label_columns:
if len(col) != level:
raise ValueError("shape (1,{}) doesn't match the shape (1,{})"
.format(len(col), level))
scols, columns, idx = [], [], []
null_columns = False
for label in label_columns:
if label in self._internal.column_index:
scols.append(self._internal.scol_for(label))
columns.append(self._internal.column_name_for(label))
else:
scols.append(F.lit(np.nan).alias(str(label)))
columns.append(str(label))
null_columns = True
idx.append(label)
if null_columns:
sdf = self._sdf.select(self._internal.index_scols + list(scols))
return self._internal.copy(sdf=sdf, data_columns=columns, column_index=idx)
def melt(self, id_vars=None, value_vars=None, var_name='variable',
value_name='value'):
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ks.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> ks.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ks.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
if id_vars is None:
id_vars = []
if not isinstance(id_vars, (list, tuple, np.ndarray)):
id_vars = list(id_vars)
data_columns = self._internal.data_columns
if value_vars is None:
value_vars = []
if not isinstance(value_vars, (list, tuple, np.ndarray)):
value_vars = list(value_vars)
if len(value_vars) == 0:
value_vars = data_columns
data_columns = [data_column for data_column in data_columns if data_column not in id_vars]
sdf = self._sdf
pairs = F.explode(F.array(*[
F.struct(*(
[F.lit(column).alias(var_name)] +
[self._internal.scol_for(column).alias(value_name)])
) for column in data_columns if column in value_vars]))
columns = (id_vars +
[F.col("pairs.%s" % var_name), F.col("pairs.%s" % value_name)])
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(exploded_df)
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
Name: all, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
column_index = self._internal.column_index
for idx in column_index:
col = self[idx]._scol
all_col = F.min(F.coalesce(col.cast('boolean'), F.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.any, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
internal_index_column = "__index_level_{}__".format
value_column = "value"
cols = []
for idx, applied_col in zip(column_index, applied):
cols.append(F.struct(
[F.lit(col).alias(internal_index_column(i)) for i, col in enumerate(idx)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_index_names is None
else (self._internal.column_index_names[i],))
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(internal_index_column(i), index_column_name(i))
for i in range(self._internal.column_index_level)],
column_index=None,
column_index_names=None)
return DataFrame(internal)[value_column].rename("all")
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
Name: any, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
column_index = self._internal.column_index
for idx in column_index:
col = self[idx]._scol
all_col = F.max(F.coalesce(col.cast('boolean'), F.lit(False)))
applied.append(F.when(all_col.isNull(), False).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.all, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
internal_index_column = "__index_level_{}__".format
value_column = "value"
cols = []
for idx, applied_col in zip(column_index, applied):
cols.append(F.struct(
[F.lit(col).alias(internal_index_column(i)) for i, col in enumerate(idx)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_index_names is None
else (self._internal.column_index_names[i],))
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(internal_index_column(i), index_column_name(i))
for i in range(self._internal.column_index_level)],
column_index=None,
column_index_names=None)
return DataFrame(internal)[value_column].rename("any")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method='average', ascending=True):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].rank(method=method, ascending=ascending))
sdf = self._sdf.select(self._internal.index_columns + [column._scol for column in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive")
if axis not in ('index', 0, 'columns', 1, None):
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
index_scols = self._internal.index_scols
sdf = self._sdf
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
col = None
for item in items:
if col is None:
col = index_scols[0] == F.lit(item)
else:
col = col | (index_scols[0] == F.lit(item))
sdf = sdf.filter(col)
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
return self[items]
elif like is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].contains(like))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
data_columns = self._internal.data_columns
output_columns = [c for c in data_columns if like in c]
return self[output_columns]
elif regex is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].rlike(regex))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
data_columns = self._internal.data_columns
matcher = re.compile(regex)
output_columns = [c for c in data_columns if matcher.search(c) is not None]
return self[output_columns]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def _get_from_multiindex_column(self, key):
""" Select columns from multi-index columns.
:param key: the multi-index column keys represented by tuple
:return: DataFrame or Series
"""
from databricks.koalas.series import Series
assert isinstance(key, tuple)
indexes = [(idx, idx) for idx in self._internal.column_index]
for k in key:
indexes = [(index, idx[1:]) for index, idx in indexes if idx[0] == k]
if len(indexes) == 0:
raise KeyError(k)
recursive = False
if all(len(idx) > 0 and idx[0] == '' for _, idx in indexes):
# If the head is '', drill down recursively.
recursive = True
for i, (col, idx) in enumerate(indexes):
indexes[i] = (col, tuple([str(key), *idx[1:]]))
column_index_names = None
if self._internal.column_index_names is not None:
# Manage column index names
level = column_index_level([idx for _, idx in indexes])
column_index_names = self._internal.column_index_names[-level:]
if all(len(idx) == 0 for _, idx in indexes):
try:
idxes = set(idx for idx, _ in indexes)
assert len(idxes) == 1
index = list(idxes)[0]
kdf_or_ser = \
Series(self._internal.copy(scol=self._internal.scol_for(index),
column_index=[index]),
anchor=self)
except AnalysisException:
raise KeyError(key)
else:
kdf_or_ser = DataFrame(self._internal.copy(
data_columns=[self._internal.column_name_for(idx) for idx, _ in indexes],
column_index=[idx for _, idx in indexes],
column_index_names=column_index_names))
if recursive:
kdf_or_ser = kdf_or_ser._get_from_multiindex_column((str(key),))
return kdf_or_ser
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
return self._get_from_multiindex_column((key,))
if isinstance(key, tuple):
return self._get_from_multiindex_column(key)
elif np.isscalar(key):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._internal.copy(scol=self._internal.scol_for(key)), anchor=self)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._internal.copy(sdf=self._sdf.filter(bcol)))
raise NotImplementedError(key)
def _to_internal_pandas(self):
"""
Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._internal.pandas_df
def __repr__(self):
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string()
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_string = pdf.to_string(show_dimensions=True)
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return pdf.to_string()
def _repr_html_(self):
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_html(notebook=True)
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
pdf = pdf[:max_display_count]
if pdf_length > max_display_count:
repr_html = pdf.to_html(show_dimensions=True, notebook=True)
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return pdf.to_html(notebook=True)
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
if (isinstance(value, Series) and value._kdf is not self) or \
(isinstance(value, DataFrame) and value is not self):
# Different Series or DataFrames
level = self._internal.column_index_level
if isinstance(value, Series):
value = value.to_frame()
value.columns = pd.MultiIndex.from_tuples(
[tuple(list(value._internal.column_index[0]) + ([''] * (level - 1)))])
else:
assert isinstance(value, DataFrame)
value_level = value._internal.column_index_level
if value_level > level:
value.columns = pd.MultiIndex.from_tuples(
[idx[level:] for idx in value._internal.column_index])
elif value_level < level:
value.columns = pd.MultiIndex.from_tuples(
[tuple(list(idx) + ([''] * (level - value_level)))
for idx in value._internal.column_index])
if isinstance(key, str):
key = [(key,)]
elif isinstance(key, tuple):
key = [key]
else:
key = [k if isinstance(k, tuple) else (k,) for k in key]
def assign_columns(kdf, this_column_index, that_column_index):
assert len(key) == len(that_column_index)
# Note that here intentionally uses `zip_longest` that combine
# that_columns.
for k, this_idx, that_idx \
in zip_longest(key, this_column_index, that_column_index):
yield (kdf[that_idx], tuple(['that', *k]))
if this_idx is not None and this_idx[1:] != k:
yield (kdf[this_idx], this_idx)
kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left")
elif isinstance(key, (tuple, list)):
assert isinstance(value, DataFrame)
# Same DataFrames.
field_names = value.columns
kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)})
else:
# Same Series.
kdf = self.assign(**{key: value})
self._internal = kdf._internal
def __getattr__(self, key: str) -> Any:
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
try:
return self._get_from_multiindex_column((key,))
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key))
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return super(cls, DataFrame).__class_getitem__(Tuple[params])
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# Koalas DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class _CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal):
self._cached = internal._sdf.cache()
super(_CachedDataFrame, self).__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unpersist()
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.cache()
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
if self._cached.is_cached:
self._cached.unpersist()
| 1 | 11,716 | We can just use `.stack()` here? Then I guess we can reuse when supporting multi-index columns. | databricks-koalas | py |
@@ -14,8 +14,13 @@ namespace Datadog.Trace.ClrProfiler
private static readonly Lazy<bool> _enabled = new Lazy<bool>(
() =>
{
+#if NETSTANDARD2_0
+ // TODO: figure out configuration in .NET Standard 2.0
+ return true;
+#else
string setting = ConfigurationManager.AppSettings["Datadog.Tracing:Enabled"];
return !string.Equals(setting, bool.FalseString, StringComparison.InvariantCultureIgnoreCase);
+#endif
},
LazyThreadSafetyMode.PublicationOnly);
| 1 | using System;
using System.Configuration;
using System.Threading;
// [assembly: System.Security.SecurityCritical]
// [assembly: System.Security.AllowPartiallyTrustedCallers]
namespace Datadog.Trace.ClrProfiler
{
/// <summary>
/// Provides instrumentation probes that can be injected into profiled code.
/// </summary>
public static class Instrumentation
{
private static readonly Lazy<bool> _enabled = new Lazy<bool>(
() =>
{
string setting = ConfigurationManager.AppSettings["Datadog.Tracing:Enabled"];
return !string.Equals(setting, bool.FalseString, StringComparison.InvariantCultureIgnoreCase);
},
LazyThreadSafetyMode.PublicationOnly);
private static readonly Lazy<bool> _profilerAttached = new Lazy<bool>(
() =>
{
try
{
return NativeMethods.IsProfilerAttached();
}
catch
{
return false;
}
},
LazyThreadSafetyMode.PublicationOnly);
/// <summary>
/// Gets a value indicating whether tracing with Datadog's profiler is enabled.
/// </summary>
/// <value>
/// <c>true</c> if profiling is enabled; <c>false</c> otherwise.
/// </value>
public static bool Enabled => _enabled.Value;
/// <summary>
/// Gets a value indicating whether Datadog's profiler is currently attached.
/// </summary>
/// <value>
/// <c>true</c> if the profiler is currentl attached; <c>false</c> otherwise.
/// </value>
public static bool ProfilerAttached => _profilerAttached.Value;
}
}
| 1 | 14,342 | What are the possibilities here? Since we plan on supporting .net standard 2.0 from day 1, we might as well tackle that now. | DataDog-dd-trace-dotnet | .cs |
@@ -28,9 +28,9 @@ namespace OpenTelemetry.Metrics
/// <summary>
/// Adds the given value to the bound counter metric.
/// </summary>
- /// <param name="context">the associated <see cref="SpanContext"/>.</param>
+ /// <param name="context">the associated <see cref="SpanReference"/>.</param>
/// <param name="value">value by which the bound counter metric should be added.</param>
- public abstract void Add(in SpanContext context, T value);
+ public abstract void Add(in SpanReference context, T value);
/// <summary>
/// Adds the given value to the bound counter metric. | 1 | // <copyright file="BoundCounterMetric.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using OpenTelemetry.Trace;
namespace OpenTelemetry.Metrics
{
/// <summary>
/// Bound counter metric with the defined <see cref="LabelSet"/>.
/// </summary>
/// <typeparam name="T">The type of counter. Only long and double are supported now.</typeparam>
public abstract class BoundCounterMetric<T>
where T : struct
{
/// <summary>
/// Adds the given value to the bound counter metric.
/// </summary>
/// <param name="context">the associated <see cref="SpanContext"/>.</param>
/// <param name="value">value by which the bound counter metric should be added.</param>
public abstract void Add(in SpanContext context, T value);
/// <summary>
/// Adds the given value to the bound counter metric.
/// </summary>
/// <param name="context">the associated <see cref="Baggage"/>.</param>
/// <param name="value">value by which the bound counter metric should be added.</param>
public abstract void Add(in Baggage context, T value);
}
}
| 1 | 17,556 | should the parameters be called reference or context? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -184,7 +184,7 @@ module Bolt
begin
if writable && index < in_buffer.length
- to_print = in_buffer[index..-1]
+ to_print = in_buffer.byteslice(index..-1)
written = inp.write_nonblock to_print
index += written
| 1 | # frozen_string_literal: true
require 'open3'
require 'fileutils'
require 'bolt/node/output'
require 'bolt/util'
module Bolt
module Transport
class Local < Sudoable
class Shell < Sudoable::Connection
attr_accessor :user, :logger, :target
attr_writer :run_as
CHUNK_SIZE = 4096
def initialize(target)
@target = target
# The familiar problem: Etc.getlogin is broken on osx
@user = ENV['USER'] || Etc.getlogin
@run_as = target.options['run-as']
@logger = Logging.logger[self]
@sudo_id = SecureRandom.uuid
end
# If prompted for sudo password, send password to stdin and return an
# empty string. Otherwise, check for sudo errors and raise Bolt error.
# If sudo_id is detected, that means the task needs to have stdin written.
# If error is not sudo-related, return the stderr string to be added to
# node output
def handle_sudo(stdin, err, pid, sudo_stdin)
if err.include?(Sudoable.sudo_prompt)
# A wild sudo prompt has appeared!
if @target.options['sudo-password']
stdin.write("#{@target.options['sudo-password']}\n")
''
else
raise Bolt::Node::EscalateError.new(
"Sudo password for user #{@user} was not provided for localhost",
'NO_PASSWORD'
)
end
elsif err =~ /^#{@sudo_id}/
if sudo_stdin
stdin.write("#{sudo_stdin}\n")
stdin.close
end
''
else
handle_sudo_errors(err, pid)
end
end
def handle_sudo_errors(err, pid)
if err =~ /^#{@user} is not in the sudoers file\./
@logger.debug { err }
raise Bolt::Node::EscalateError.new(
"User #{@user} does not have sudo permission on localhost",
'SUDO_DENIED'
)
elsif err =~ /^Sorry, try again\./
@logger.debug { err }
# CODEREVIEW can we kill a sudo process without sudo password?
Process.kill('TERM', pid)
raise Bolt::Node::EscalateError.new(
"Sudo password for user #{@user} not recognized on localhost",
'BAD_PASSWORD'
)
else
# No need to raise an error - just return the string
err
end
end
def copy_file(source, dest)
@logger.debug { "Uploading #{source}, to #{dest}" }
if source.is_a?(StringIO)
File.open("tempfile", "w") { |f| f.write(source.read) }
execute(['mv', 'tempfile', dest])
else
# Mimic the behavior of `cp --remove-destination`
# since the flag isn't supported on MacOS
result = execute(['rm', '-rf', dest])
if result.exit_code != 0
message = "Could not remove existing file #{dest}: #{result.stderr.string}"
raise Bolt::Node::FileError.new(message, 'REMOVE_ERROR')
end
result = execute(['cp', '-r', source, dest])
if result.exit_code != 0
message = "Could not copy file to #{dest}: #{result.stderr.string}"
raise Bolt::Node::FileError.new(message, 'COPY_ERROR')
end
end
end
def with_tmpscript(script)
with_tempdir do |dir|
dest = File.join(dir.to_s, File.basename(script))
copy_file(script, dest)
yield dest, dir
end
end
# See if there's a sudo prompt in the output
# If not, return the output
def check_sudo(out, inp, pid, stdin)
buffer = out.readpartial(CHUNK_SIZE)
# Split on newlines, including the newline
lines = buffer.split(/(?<=[\n])/)
# handle_sudo will return the line if it is not a sudo prompt or error
lines.map! { |line| handle_sudo(inp, line, pid, stdin) }
lines.join("")
# If stream has reached EOF, no password prompt is expected
# return an empty string
rescue EOFError
''
end
def execute(command, sudoable: true, **options)
run_as = options[:run_as] || self.run_as
escalate = sudoable && run_as && @user != run_as
use_sudo = escalate && @target.options['run-as-command'].nil?
command_str = inject_interpreter(options[:interpreter], command)
if escalate
if use_sudo
sudo_exec = target.options['sudo-executable'] || "sudo"
sudo_flags = [sudo_exec, "-k", "-S", "-u", run_as, "-p", Sudoable.sudo_prompt]
sudo_flags += ["-E"] if options[:environment]
sudo_str = Shellwords.shelljoin(sudo_flags)
else
sudo_str = Shellwords.shelljoin(@target.options['run-as-command'] + [run_as])
end
command_str = build_sudoable_command_str(command_str, sudo_str, @sudo_id, options)
end
command_arr = options[:environment].nil? ? [command_str] : [options[:environment], command_str]
# Prepare the variables!
result_output = Bolt::Node::Output.new
# Sudo handler will pass stdin if needed.
in_buffer = !use_sudo && options[:stdin] ? options[:stdin] : ''
# Chunks of this size will be read in one iteration
index = 0
timeout = 0.1
inp, out, err, t = Open3.popen3(*command_arr)
read_streams = { out => String.new,
err => String.new }
write_stream = in_buffer.empty? ? [] : [inp]
# See if there's a sudo prompt
if use_sudo
ready_read = select([err], nil, nil, timeout * 5)
read_streams[err] << check_sudo(err, inp, t.pid, options[:stdin]) if ready_read
end
# True while the process is running or waiting for IO input
while t.alive?
# See if we can read from out or err, or write to in
ready_read, ready_write, = select(read_streams.keys, write_stream, nil, timeout)
# Read from out and err
ready_read&.each do |stream|
# Check for sudo prompt
read_streams[stream] << if use_sudo
check_sudo(stream, inp, t.pid, options[:stdin])
else
stream.readpartial(CHUNK_SIZE)
end
rescue EOFError
end
# select will either return an empty array if there are no
# writable streams or nil if no IO object is available before the
# timeout is reached.
writable = if ready_write.respond_to?(:empty?)
!ready_write.empty?
else
!ready_write.nil?
end
begin
if writable && index < in_buffer.length
to_print = in_buffer[index..-1]
written = inp.write_nonblock to_print
index += written
if index >= in_buffer.length && !write_stream.empty?
inp.close
write_stream = []
end
end
# If a task has stdin as an input_method but doesn't actually
# read from stdin, the task may return and close the input stream
rescue Errno::EPIPE
write_stream = []
end
end
# Read any remaining data in the pipe. Do not wait for
# EOF in case the pipe is inherited by a child process.
read_streams.each do |stream, _|
loop { read_streams[stream] << stream.read_nonblock(CHUNK_SIZE) }
rescue Errno::EAGAIN, EOFError
end
result_output.stdout << read_streams[out]
result_output.stderr << read_streams[err]
result_output.exit_code = t.value.exitstatus
result_output
end
end
end
end
end
| 1 | 14,393 | Do we need to also change `length` here (and below) to `bytesize`? Perhaps we ought to make a copy of `in_buffer` encoded as binary and then the existing algorithm should work. | puppetlabs-bolt | rb |
@@ -166,6 +166,7 @@ def get_filename_question(*, suggested_filename, url, parent=None):
q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format(
html.escape(url.toDisplayString()))
+ q.yank_text = url.toString()
q.mode = usertypes.PromptMode.download
q.completed.connect(q.deleteLater)
q.default = _path_suggestion(suggested_filename) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Shared QtWebKit/QtWebEngine code for downloads."""
import re
import sys
import html
import os.path
import collections
import functools
import pathlib
import tempfile
import enum
import sip
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, Qt, QObject, QModelIndex,
QTimer, QAbstractListModel)
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.config import config
from qutebrowser.utils import (usertypes, standarddir, utils, message, log,
qtutils)
ModelRole = enum.IntEnum('ModelRole', ['item'], start=Qt.UserRole)
# Remember the last used directory
last_used_directory = None
# All REFRESH_INTERVAL milliseconds, speeds will be recalculated and downloads
# redrawn.
_REFRESH_INTERVAL = 500
class UnsupportedAttribute:
"""Class which is used to create attributes which are not supported.
This is used for attributes like "fileobj" for downloads which are not
supported with QtWebengine.
"""
pass
class UnsupportedOperationError(Exception):
"""Raised when an operation is not supported with the given backend."""
def download_dir():
"""Get the download directory to use."""
directory = config.val.downloads.location.directory
remember_dir = config.val.downloads.location.remember
if remember_dir and last_used_directory is not None:
ddir = last_used_directory
elif directory is None:
ddir = standarddir.download()
else:
ddir = directory
try:
os.makedirs(ddir)
except FileExistsError:
pass
return ddir
def immediate_download_path(prompt_download_directory=None):
"""Try to get an immediate download path without asking the user.
If that's possible, we return a path immediately. If not, None is returned.
Args:
prompt_download_directory: If this is something else than None, it
will overwrite the
downloads.location.prompt setting.
"""
if prompt_download_directory is None:
prompt_download_directory = config.val.downloads.location.prompt
if not prompt_download_directory:
return download_dir()
return None
def _path_suggestion(filename):
"""Get the suggested file path.
Args:
filename: The filename to use if included in the suggestion.
"""
suggestion = config.val.downloads.location.suggestion
if suggestion == 'path':
# add trailing '/' if not present
return os.path.join(download_dir(), '')
elif suggestion == 'filename':
return filename
elif suggestion == 'both':
return os.path.join(download_dir(), filename)
else: # pragma: no cover
raise ValueError("Invalid suggestion value {}!".format(suggestion))
def create_full_filename(basename, filename):
"""Create a full filename based on the given basename and filename.
Args:
basename: The basename to use if filename is a directory.
filename: The path to a folder or file where you want to save.
Return:
The full absolute path, or None if filename creation was not possible.
"""
# Remove chars which can't be encoded in the filename encoding.
# See https://github.com/qutebrowser/qutebrowser/issues/427
encoding = sys.getfilesystemencoding()
filename = utils.force_encoding(filename, encoding)
basename = utils.force_encoding(basename, encoding)
if os.path.isabs(filename) and (os.path.isdir(filename) or
filename.endswith(os.sep)):
# We got an absolute directory from the user, so we save it under
# the default filename in that directory.
return os.path.join(filename, basename)
elif os.path.isabs(filename):
# We got an absolute filename from the user, so we save it under
# that filename.
return filename
return None
def get_filename_question(*, suggested_filename, url, parent=None):
"""Get a Question object for a download-path.
Args:
suggested_filename: The "default"-name that is pre-entered as path.
url: The URL the download originated from.
parent: The parent of the question (a QObject).
"""
encoding = sys.getfilesystemencoding()
suggested_filename = utils.force_encoding(suggested_filename, encoding)
q = usertypes.Question(parent)
q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format(
html.escape(url.toDisplayString()))
q.mode = usertypes.PromptMode.download
q.completed.connect(q.deleteLater)
q.default = _path_suggestion(suggested_filename)
return q
def transform_path(path):
r"""Do platform-specific transformations, like changing E: to E:\.
Returns None if the path is invalid on the current platform.
"""
if not utils.is_windows:
return path
path = utils.expand_windows_drive(path)
# Drive dependent working directories are not supported, e.g.
# E:filename is invalid
if re.search(r'^[A-Z]:[^\\]', path, re.IGNORECASE):
return None
# Paths like COM1, ...
# See https://github.com/qutebrowser/qutebrowser/issues/82
if pathlib.Path(path).is_reserved():
return None
return path
def suggested_fn_from_title(url_path, title=None):
"""Suggest a filename depending on the URL extension and page title.
Args:
url_path: a string with the URL path
title: the page title string
Return:
The download filename based on the title, or None if the extension is
not found in the whitelist (or if there is no page title).
"""
ext_whitelist = [".html", ".htm", ".php", ""]
_, ext = os.path.splitext(url_path)
if ext.lower() in ext_whitelist and title:
suggested_fn = utils.sanitize_filename(title)
if not suggested_fn.lower().endswith((".html", ".htm")):
suggested_fn += ".html"
else:
suggested_fn = None
return suggested_fn
class NoFilenameError(Exception):
"""Raised when we can't find out a filename in DownloadTarget."""
# Where a download should be saved
class _DownloadTarget:
"""Abstract base class for different download targets."""
def __init__(self):
raise NotImplementedError
def suggested_filename(self):
"""Get the suggested filename for this download target."""
raise NotImplementedError
class FileDownloadTarget(_DownloadTarget):
"""Save the download to the given file.
Attributes:
filename: Filename where the download should be saved.
"""
def __init__(self, filename):
# pylint: disable=super-init-not-called
self.filename = filename
def suggested_filename(self):
return os.path.basename(self.filename)
def __str__(self):
return self.filename
class FileObjDownloadTarget(_DownloadTarget):
"""Save the download to the given file-like object.
Attributes:
fileobj: File-like object where the download should be written to.
"""
def __init__(self, fileobj):
# pylint: disable=super-init-not-called
self.fileobj = fileobj
def suggested_filename(self):
try:
return self.fileobj.name
except AttributeError:
raise NoFilenameError
def __str__(self):
try:
return 'file object at {}'.format(self.fileobj.name)
except AttributeError:
return 'anonymous file object'
class OpenFileDownloadTarget(_DownloadTarget):
"""Save the download in a temp dir and directly open it.
Attributes:
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default application.
If no `{}` is found, the filename is appended to the cmdline.
"""
def __init__(self, cmdline=None):
# pylint: disable=super-init-not-called
self.cmdline = cmdline
def suggested_filename(self):
raise NoFilenameError
def __str__(self):
return 'temporary file'
class DownloadItemStats(QObject):
"""Statistics (bytes done, total bytes, time, etc.) about a download.
Class attributes:
SPEED_AVG_WINDOW: How many seconds of speed data to average to
estimate the remaining time.
Attributes:
done: How many bytes there are already downloaded.
total: The total count of bytes. None if the total is unknown.
speed: The current download speed, in bytes per second.
_speed_avg: A rolling average of speeds.
_last_done: The count of bytes which where downloaded when calculating
the speed the last time.
"""
SPEED_AVG_WINDOW = 30
def __init__(self, parent=None):
super().__init__(parent)
self.total = None
self.done = 0
self.speed = 0
self._last_done = 0
samples = int(self.SPEED_AVG_WINDOW * (1000 / _REFRESH_INTERVAL))
self._speed_avg = collections.deque(maxlen=samples)
def update_speed(self):
"""Recalculate the current download speed.
The caller needs to guarantee this is called all _REFRESH_INTERVAL ms.
"""
if self.done is None:
# this can happen for very fast downloads, e.g. when actually
# opening a file
return
delta = self.done - self._last_done
self.speed = delta * 1000 / _REFRESH_INTERVAL
self._speed_avg.append(self.speed)
self._last_done = self.done
def finish(self):
"""Set the download stats as finished."""
self.done = self.total
def percentage(self):
"""The current download percentage, or None if unknown."""
if self.done == self.total:
return 100
elif self.total == 0 or self.total is None:
return None
else:
return 100 * self.done / self.total
def remaining_time(self):
"""The remaining download time in seconds, or None."""
if self.total is None or not self._speed_avg:
# No average yet or we don't know the total size.
return None
remaining_bytes = self.total - self.done
avg = sum(self._speed_avg) / len(self._speed_avg)
if avg == 0:
# Download stalled
return None
else:
return remaining_bytes / avg
@pyqtSlot('qint64', 'qint64')
def on_download_progress(self, bytes_done, bytes_total):
"""Update local variables when the download progress changed.
Args:
bytes_done: How many bytes are downloaded.
bytes_total: How many bytes there are to download in total.
"""
if bytes_total in [0, -1]: # QtWebEngine, QtWebKit
bytes_total = None
self.done = bytes_done
self.total = bytes_total
class AbstractDownloadItem(QObject):
"""Shared QtNetwork/QtWebEngine part of a download item.
Attributes:
done: Whether the download is finished.
stats: A DownloadItemStats object.
index: The index of the download in the view.
successful: Whether the download has completed successfully.
error_msg: The current error message, or None
fileobj: The file object to download the file to.
raw_headers: The headers sent by the server.
_filename: The filename of the download.
_dead: Whether the Download has _die()'d.
Signals:
data_changed: The downloads metadata changed.
finished: The download was finished.
cancelled: The download was cancelled.
error: An error with the download occurred.
arg: The error message as string.
remove_requested: Emitted when the removal of this download was
requested.
"""
data_changed = pyqtSignal()
finished = pyqtSignal()
error = pyqtSignal(str)
cancelled = pyqtSignal()
remove_requested = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self.done = False
self.stats = DownloadItemStats(self)
self.index = 0
self.error_msg = None
self.basename = '???'
self.successful = False
self.fileobj = UnsupportedAttribute()
self.raw_headers = UnsupportedAttribute()
self._filename = None
self._dead = False
def __repr__(self):
return utils.get_repr(self, basename=self.basename)
def __str__(self):
"""Get the download as a string.
Example: foo.pdf [699.2kB/s|0.34|16%|4.253/25.124]
"""
speed = utils.format_size(self.stats.speed, suffix='B/s')
down = utils.format_size(self.stats.done, suffix='B')
perc = self.stats.percentage()
remaining = self.stats.remaining_time()
if self.error_msg is None:
errmsg = ""
else:
errmsg = " - {}".format(self.error_msg)
if all(e is None for e in [perc, remaining, self.stats.total]):
return ('{index}: {name} [{speed:>10}|{down}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
down=down, errmsg=errmsg))
perc = round(perc)
if remaining is None:
remaining = '?'
else:
remaining = utils.format_seconds(remaining)
total = utils.format_size(self.stats.total, suffix='B')
if self.done:
return ('{index}: {name} [{perc:>2}%|{total}]{errmsg}'.format(
index=self.index, name=self.basename, perc=perc,
total=total, errmsg=errmsg))
else:
return ('{index}: {name} [{speed:>10}|{remaining:>5}|{perc:>2}%|'
'{down}/{total}]{errmsg}'.format(
index=self.index, name=self.basename, speed=speed,
remaining=remaining, perc=perc, down=down,
total=total, errmsg=errmsg))
def _do_die(self):
"""Do cleanup steps after a download has died."""
raise NotImplementedError
def _die(self, msg):
"""Abort the download and emit an error."""
assert not self.successful
# Prevent actions if calling _die() twice.
#
# For QtWebKit, this might happen if the error handler correctly
# connects, and the error occurs in _init_reply between
# reply.error.connect and the reply.error() check. In this case, the
# connected error handlers will be called twice, once via the direct
# error.emit() and once here in _die(). The stacks look like this then:
#
# <networkmanager error.emit> -> on_reply_error -> _die ->
# self.error.emit()
#
# and
#
# [_init_reply -> <single shot timer> ->] <lambda in _init_reply> ->
# self.error.emit()
#
# which may lead to duplicate error messages (and failing tests)
if self._dead:
return
self._dead = True
self._do_die()
self.error_msg = msg
self.stats.finish()
self.error.emit(msg)
self.done = True
self.data_changed.emit()
def get_status_color(self, position):
"""Choose an appropriate color for presenting the download's status.
Args:
position: The color type requested, can be 'fg' or 'bg'.
"""
assert position in ["fg", "bg"]
# pylint: disable=bad-config-option
start = getattr(config.val.colors.downloads.start, position)
stop = getattr(config.val.colors.downloads.stop, position)
system = getattr(config.val.colors.downloads.system, position)
error = getattr(config.val.colors.downloads.error, position)
# pylint: enable=bad-config-option
if self.error_msg is not None:
assert not self.successful
return error
elif self.stats.percentage() is None:
return start
else:
return utils.interpolate_color(start, stop,
self.stats.percentage(), system)
def _do_cancel(self):
"""Actual cancel implementation."""
raise NotImplementedError
@pyqtSlot()
def cancel(self, *, remove_data=True):
"""Cancel the download.
Args:
remove_data: Whether to remove the downloaded data.
"""
self._do_cancel()
log.downloads.debug("cancelled")
if remove_data:
self.delete()
self.done = True
self.finished.emit()
self.data_changed.emit()
@pyqtSlot()
def remove(self):
"""Remove the download from the model."""
self.remove_requested.emit()
def delete(self):
"""Delete the downloaded file."""
try:
if self._filename is not None and os.path.exists(self._filename):
os.remove(self._filename)
log.downloads.debug("Deleted {}".format(self._filename))
else:
log.downloads.debug("Not deleting {}".format(self._filename))
except OSError:
log.downloads.exception("Failed to remove partial file")
@pyqtSlot()
def retry(self):
"""Retry a failed download."""
raise NotImplementedError
@pyqtSlot()
def try_retry(self):
"""Try to retry a download and show an error if it's unsupported."""
try:
self.retry()
except UnsupportedOperationError as e:
message.error(str(e))
def _get_open_filename(self):
"""Get the filename to open a download.
Returns None if no suitable filename was found.
"""
raise NotImplementedError
@pyqtSlot()
def open_file(self, cmdline=None):
"""Open the downloaded file.
Args:
cmdline: The command to use as string. A `{}` is expanded to the
filename. None means to use the system's default
application or `downloads.open_dispatcher` if set. If no
`{}` is found, the filename is appended to the cmdline.
"""
assert self.successful
filename = self._get_open_filename()
if filename is None: # pragma: no cover
log.downloads.error("No filename to open the download!")
return
# By using a singleshot timer, we ensure that we return fast. This
# is important on systems where process creation takes long, as
# otherwise the prompt might hang around and cause bugs
# (see issue #2296)
QTimer.singleShot(0, lambda: utils.open_file(filename, cmdline))
def _ensure_can_set_filename(self, filename):
"""Make sure we can still set a filename."""
raise NotImplementedError
def _after_set_filename(self):
"""Finish initialization based on self._filename."""
raise NotImplementedError
def _ask_confirm_question(self, title, msg):
"""Ask a confirmation question for the download."""
raise NotImplementedError
def _ask_create_parent_question(self, title, msg,
force_overwrite, remember_directory):
"""Ask a confirmation question for the parent directory."""
raise NotImplementedError
def _set_fileobj(self, fileobj, *, autoclose=True):
"""Set a file object to save the download to.
Not supported by QtWebEngine.
Args:
fileobj: The file object to download to.
autoclose: Close the file object automatically when it's done.
"""
raise NotImplementedError
def _set_tempfile(self, fileobj):
"""Set a temporary file when opening the download."""
raise NotImplementedError
def _set_filename(self, filename, *, force_overwrite=False,
remember_directory=True):
"""Set the filename to save the download to.
Args:
filename: The full filename to save the download to.
None: special value to stop the download.
force_overwrite: Force overwriting existing files.
remember_directory: If True, remember the directory for future
downloads.
"""
filename = os.path.expanduser(filename)
self._ensure_can_set_filename(filename)
self._filename = create_full_filename(self.basename, filename)
if self._filename is None:
# We only got a filename (without directory) or a relative path
# from the user, so we append that to the default directory and
# try again.
self._filename = create_full_filename(
self.basename, os.path.join(download_dir(), filename))
# At this point, we have a misconfigured XDG_DOWNLOAD_DIR, as
# download_dir() + filename is still no absolute path.
# The config value is checked for "absoluteness", but
# ~/.config/user-dirs.dirs may be misconfigured and a non-absolute path
# may be set for XDG_DOWNLOAD_DIR
if self._filename is None:
message.error(
"XDG_DOWNLOAD_DIR points to a relative path - please check"
" your ~/.config/user-dirs.dirs. The download is saved in"
" your home directory.",
)
# fall back to $HOME as download_dir
self._filename = create_full_filename(self.basename,
os.path.expanduser('~'))
dirname = os.path.dirname(self._filename)
if not os.path.exists(dirname):
txt = ("<b>{}</b> does not exist. Create it?".
format(html.escape(
os.path.join(dirname, ""))))
self._ask_create_parent_question("Create directory?", txt,
force_overwrite,
remember_directory)
else:
self._after_create_parent_question(force_overwrite,
remember_directory)
def _after_create_parent_question(self,
force_overwrite, remember_directory):
"""After asking about parent directory.
Args:
force_overwrite: Force overwriting existing files.
remember_directory: If True, remember the directory for future
downloads.
"""
global last_used_directory
try:
os.makedirs(os.path.dirname(self._filename))
except FileExistsError:
pass
except OSError as e:
self._die(e.strerror)
self.basename = os.path.basename(self._filename)
if remember_directory:
last_used_directory = os.path.dirname(self._filename)
log.downloads.debug("Setting filename to {}".format(self._filename))
if force_overwrite:
self._after_set_filename()
elif os.path.isfile(self._filename):
# The file already exists, so ask the user if it should be
# overwritten.
txt = "<b>{}</b> already exists. Overwrite?".format(
html.escape(self._filename))
self._ask_confirm_question("Overwrite existing file?", txt)
# FIFO, device node, etc. Make sure we want to do this
elif (os.path.exists(self._filename) and
not os.path.isdir(self._filename)):
txt = ("<b>{}</b> already exists and is a special file. Write to "
"it anyways?".format(html.escape(self._filename)))
self._ask_confirm_question("Overwrite special file?", txt)
else:
self._after_set_filename()
def _open_if_successful(self, cmdline):
"""Open the downloaded file, but only if it was successful.
Args:
cmdline: Passed to DownloadItem.open_file().
"""
if not self.successful:
log.downloads.debug("{} finished but not successful, not opening!"
.format(self))
return
self.open_file(cmdline)
def set_target(self, target):
"""Set the target for a given download.
Args:
target: The DownloadTarget for this download.
"""
if isinstance(target, FileObjDownloadTarget):
self._set_fileobj(target.fileobj, autoclose=False)
elif isinstance(target, FileDownloadTarget):
self._set_filename(target.filename)
elif isinstance(target, OpenFileDownloadTarget):
try:
fobj = temp_download_manager.get_tmpfile(self.basename)
except OSError as exc:
msg = "Download error: {}".format(exc)
message.error(msg)
self.cancel()
return
self.finished.connect(
functools.partial(self._open_if_successful, target.cmdline))
self._set_tempfile(fobj)
else: # pragma: no cover
raise ValueError("Unsupported download target: {}".format(target))
class AbstractDownloadManager(QObject):
"""Backend-independent download manager code.
Attributes:
downloads: A list of active DownloadItems.
_networkmanager: A NetworkManager for generic downloads.
Signals:
begin_remove_row: Emitted before downloads are removed.
end_remove_row: Emitted after downloads are removed.
begin_insert_row: Emitted before downloads are inserted.
end_insert_row: Emitted after downloads are inserted.
data_changed: Emitted when the data of the model changed.
The argument is the index of the changed download
"""
begin_remove_row = pyqtSignal(int)
end_remove_row = pyqtSignal()
begin_insert_row = pyqtSignal(int)
end_insert_row = pyqtSignal()
data_changed = pyqtSignal(int)
def __init__(self, parent=None):
super().__init__(parent)
self.downloads = []
self._update_timer = usertypes.Timer(self, 'download-update')
self._update_timer.timeout.connect(self._update_gui)
self._update_timer.setInterval(_REFRESH_INTERVAL)
def __repr__(self):
return utils.get_repr(self, downloads=len(self.downloads))
@pyqtSlot()
def _update_gui(self):
"""Periodical GUI update of all items."""
assert self.downloads
for dl in self.downloads:
dl.stats.update_speed()
self.data_changed.emit(-1)
def _init_item(self, download, auto_remove, suggested_filename):
"""Initialize a newly created DownloadItem."""
download.cancelled.connect(download.remove)
download.remove_requested.connect(functools.partial(
self._remove_item, download))
delay = config.val.downloads.remove_finished
if delay > -1:
download.finished.connect(
lambda: QTimer.singleShot(delay, download.remove))
elif auto_remove:
download.finished.connect(download.remove)
download.data_changed.connect(
functools.partial(self._on_data_changed, download))
download.error.connect(self._on_error)
download.basename = suggested_filename
idx = len(self.downloads)
download.index = idx + 1 # "Human readable" index
self.begin_insert_row.emit(idx)
self.downloads.append(download)
self.end_insert_row.emit()
if not self._update_timer.isActive():
self._update_timer.start()
@pyqtSlot(AbstractDownloadItem)
def _on_data_changed(self, download):
"""Emit data_changed signal when download data changed."""
try:
idx = self.downloads.index(download)
except ValueError:
# download has been deleted in the meantime
return
self.data_changed.emit(idx)
@pyqtSlot(str)
def _on_error(self, msg):
"""Display error message on download errors."""
message.error("Download error: {}".format(msg))
@pyqtSlot(AbstractDownloadItem)
def _remove_item(self, download):
"""Remove a given download."""
if sip.isdeleted(self):
# https://github.com/qutebrowser/qutebrowser/issues/1242
return
try:
idx = self.downloads.index(download)
except ValueError:
# already removed
return
self.begin_remove_row.emit(idx)
del self.downloads[idx]
self.end_remove_row.emit()
download.deleteLater()
self._update_indexes()
if not self.downloads:
self._update_timer.stop()
log.downloads.debug("Removed download {}".format(download))
def _update_indexes(self):
"""Update indexes of all DownloadItems."""
for i, d in enumerate(self.downloads, 1):
d.index = i
self.data_changed.emit(-1)
def _init_filename_question(self, question, download):
"""Set up an existing filename question with a download."""
question.answered.connect(download.set_target)
question.cancelled.connect(download.cancel)
download.cancelled.connect(question.abort)
download.error.connect(question.abort)
class DownloadModel(QAbstractListModel):
"""A list model showing downloads."""
def __init__(self, qtnetwork_manager, webengine_manager=None, parent=None):
super().__init__(parent)
self._qtnetwork_manager = qtnetwork_manager
self._webengine_manager = webengine_manager
qtnetwork_manager.data_changed.connect(
functools.partial(self._on_data_changed, webengine=False))
qtnetwork_manager.begin_insert_row.connect(
functools.partial(self._on_begin_insert_row, webengine=False))
qtnetwork_manager.begin_remove_row.connect(
functools.partial(self._on_begin_remove_row, webengine=False))
qtnetwork_manager.end_insert_row.connect(self.endInsertRows)
qtnetwork_manager.end_remove_row.connect(self.endRemoveRows)
if webengine_manager is not None:
webengine_manager.data_changed.connect(
functools.partial(self._on_data_changed, webengine=True))
webengine_manager.begin_insert_row.connect(
functools.partial(self._on_begin_insert_row, webengine=True))
webengine_manager.begin_remove_row.connect(
functools.partial(self._on_begin_remove_row, webengine=True))
webengine_manager.end_insert_row.connect(self.endInsertRows)
webengine_manager.end_remove_row.connect(self.endRemoveRows)
def _all_downloads(self):
"""Combine downloads from both downloaders."""
if self._webengine_manager is None:
return self._qtnetwork_manager.downloads[:]
else:
return (self._qtnetwork_manager.downloads +
self._webengine_manager.downloads)
def __len__(self):
return len(self._all_downloads())
def __iter__(self):
return iter(self._all_downloads())
def __getitem__(self, idx):
return self._all_downloads()[idx]
def _on_begin_insert_row(self, idx, webengine=False):
log.downloads.debug("_on_begin_insert_row with idx {}, "
"webengine {}".format(idx, webengine))
if idx == -1:
self.beginInsertRows(QModelIndex(), 0, -1)
return
assert idx >= 0, idx
if webengine:
idx += len(self._qtnetwork_manager.downloads)
self.beginInsertRows(QModelIndex(), idx, idx)
def _on_begin_remove_row(self, idx, webengine=False):
log.downloads.debug("_on_begin_remove_row with idx {}, "
"webengine {}".format(idx, webengine))
if idx == -1:
self.beginRemoveRows(QModelIndex(), 0, -1)
return
assert idx >= 0, idx
if webengine:
idx += len(self._qtnetwork_manager.downloads)
self.beginRemoveRows(QModelIndex(), idx, idx)
def _on_data_changed(self, idx, *, webengine):
"""Called when a downloader's data changed.
Args:
start: The first changed index as int.
end: The last changed index as int, or -1 for all indices.
webengine: If given, the QtNetwork download length is added to the
index.
"""
if idx == -1:
start_index = self.index(0, 0)
end_index = self.last_index()
else:
if webengine:
idx += len(self._qtnetwork_manager.downloads)
start_index = self.index(idx, 0)
end_index = self.index(idx, 0)
qtutils.ensure_valid(start_index)
qtutils.ensure_valid(end_index)
self.dataChanged.emit(start_index, end_index)
def _raise_no_download(self, count):
"""Raise an exception that the download doesn't exist.
Args:
count: The index of the download
"""
if not count:
raise cmdexc.CommandError("There's no download!")
raise cmdexc.CommandError("There's no download {}!".format(count))
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_cancel(self, all_=False, count=0):
"""Cancel the last/[count]th download.
Args:
all_: Cancel all running downloads
count: The index of the download to cancel.
"""
downloads = self._all_downloads()
if all_:
for download in downloads:
if not download.done:
download.cancel()
else:
try:
download = downloads[count - 1]
except IndexError:
self._raise_no_download(count)
if download.done:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is already done!"
.format(count))
download.cancel()
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_delete(self, count=0):
"""Delete the last/[count]th download from disk.
Args:
count: The index of the download to delete.
"""
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.successful:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!".format(count))
download.delete()
download.remove()
log.downloads.debug("deleted download {}".format(download))
@cmdutils.register(instance='download-model', scope='window', maxsplit=0)
@cmdutils.argument('count', count=True)
def download_open(self, cmdline: str = None, count=0):
"""Open the last/[count]th download.
If no specific command is given, this will use the system's default
application to open the file.
Args:
cmdline: The command which should be used to open the file. A `{}`
is expanded to the temporary file name. If no `{}` is
present, the filename is automatically appended to the
cmdline.
count: The index of the download to open.
"""
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.successful:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!".format(count))
download.open_file(cmdline)
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_retry(self, count=0):
"""Retry the first failed/[count]th download.
Args:
count: The index of the download to retry.
"""
if count:
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if download.successful or not download.done:
raise cmdexc.CommandError("Download {} did not fail!".format(
count))
else:
to_retry = [d for d in self if d.done and not d.successful]
if not to_retry:
raise cmdexc.CommandError("No failed downloads!")
else:
download = to_retry[0]
download.try_retry()
def can_clear(self):
"""Check if there are finished downloads to clear."""
return any(download.done for download in self)
@cmdutils.register(instance='download-model', scope='window')
def download_clear(self):
"""Remove all finished downloads from the list."""
for download in self:
if download.done:
download.remove()
@cmdutils.register(instance='download-model', scope='window')
@cmdutils.argument('count', count=True)
def download_remove(self, all_=False, count=0):
"""Remove the last/[count]th download from the list.
Args:
all_: Remove all finished downloads.
count: The index of the download to remove.
"""
if all_:
self.download_clear()
else:
try:
download = self[count - 1]
except IndexError:
self._raise_no_download(count)
if not download.done:
if not count:
count = len(self)
raise cmdexc.CommandError("Download {} is not done!"
.format(count))
download.remove()
def running_downloads(self):
"""Return the amount of still running downloads.
Return:
The number of unfinished downloads.
"""
return sum(1 for download in self if not download.done)
def last_index(self):
"""Get the last index in the model.
Return:
A (possibly invalid) QModelIndex.
"""
idx = self.index(self.rowCount() - 1)
return idx
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Simple constant header."""
if (section == 0 and orientation == Qt.Horizontal and
role == Qt.DisplayRole):
return "Downloads"
else:
return ""
def data(self, index, role):
"""Download data from DownloadManager."""
if not index.isValid():
return None
if index.parent().isValid() or index.column() != 0:
return None
item = self[index.row()]
if role == Qt.DisplayRole:
data = str(item)
elif role == Qt.ForegroundRole:
data = item.get_status_color('fg')
elif role == Qt.BackgroundRole:
data = item.get_status_color('bg')
elif role == ModelRole.item:
data = item
elif role == Qt.ToolTipRole:
if item.error_msg is None:
data = None
else:
return item.error_msg
else:
data = None
return data
def flags(self, index):
"""Override flags so items aren't selectable.
The default would be Qt.ItemIsEnabled | Qt.ItemIsSelectable.
"""
if not index.isValid():
return Qt.ItemFlags()
return Qt.ItemIsEnabled | Qt.ItemNeverHasChildren
def rowCount(self, parent=QModelIndex()):
"""Get count of active downloads."""
if parent.isValid():
# We don't have children
return 0
return len(self)
class TempDownloadManager:
"""Manager to handle temporary download files.
The downloads are downloaded to a temporary location and then openened with
the system standard application. The temporary files are deleted when
qutebrowser is shutdown.
Attributes:
files: A list of NamedTemporaryFiles of downloaded items.
"""
def __init__(self):
self.files = []
self._tmpdir = None
def cleanup(self):
"""Clean up any temporary files."""
if self._tmpdir is not None:
try:
self._tmpdir.cleanup()
except OSError:
log.misc.exception("Failed to clean up temporary download "
"directory")
self._tmpdir = None
def _get_tmpdir(self):
"""Return the temporary directory that is used for downloads.
The directory is created lazily on first access.
Return:
The tempfile.TemporaryDirectory that is used.
"""
if self._tmpdir is None:
self._tmpdir = tempfile.TemporaryDirectory(
prefix='qutebrowser-downloads-')
return self._tmpdir
def get_tmpfile(self, suggested_name):
"""Return a temporary file in the temporary downloads directory.
The files are kept as long as qutebrowser is running and automatically
cleaned up at program exit.
Args:
suggested_name: str of the "suggested"/original filename. Used as a
suffix, so any file extenions are preserved.
Return:
A tempfile.NamedTemporaryFile that should be used to save the file.
"""
tmpdir = self._get_tmpdir()
encoding = sys.getfilesystemencoding()
suggested_name = utils.force_encoding(suggested_name, encoding)
# Make sure that the filename is not too long
suggested_name = utils.elide_filename(suggested_name, 50)
fobj = tempfile.NamedTemporaryFile(dir=tmpdir.name, delete=False,
suffix=suggested_name)
self.files.append(fobj)
return fobj
temp_download_manager = TempDownloadManager()
| 1 | 20,585 | This should be `toDisplayString()` to not contain e.g. passwords. | qutebrowser-qutebrowser | py |
@@ -180,6 +180,10 @@ class <%= controller_name.classify %>Controller < ApplicationController
# If there are more than this many search results, no spelling ("did you
# mean") suggestion is offered.
config.spell_max = 5
+
+ # Configuration for autocomplete suggestor
+ config.autocomplete_enabled = true
+ config.autocomplete_path = 'suggest'
end
end | 1 | # -*- encoding : utf-8 -*-
class <%= controller_name.classify %>Controller < ApplicationController
include Blacklight::Catalog
configure_blacklight do |config|
## Default parameters to send to solr for all search-like requests. See also SearchBuilder#processed_parameters
config.default_solr_params = {
:rows => 10
}
# solr path which will be added to solr base url before the other solr params.
#config.solr_path = 'select'
# items to show per page, each number in the array represent another option to choose from.
#config.per_page = [10,20,50,100]
## Default parameters to send on single-document requests to Solr. These settings are the Blackligt defaults (see SearchHelper#solr_doc_params) or
## parameters included in the Blacklight-jetty document requestHandler.
#
#config.default_document_solr_params = {
# :qt => 'document',
# ## These are hard-coded in the blacklight 'document' requestHandler
# # :fl => '*',
# # :rows => 1
# # :q => '{!term f=id v=$id}'
#}
# solr field configuration for search results/index views
config.index.title_field = 'title_display'
config.index.display_type_field = 'format'
# solr field configuration for document/show views
#config.show.title_field = 'title_display'
#config.show.display_type_field = 'format'
# solr fields that will be treated as facets by the blacklight application
# The ordering of the field names is the order of the display
#
# Setting a limit will trigger Blacklight's 'more' facet values link.
# * If left unset, then all facet values returned by solr will be displayed.
# * If set to an integer, then "f.somefield.facet.limit" will be added to
# solr request, with actual solr request being +1 your configured limit --
# you configure the number of items you actually want _displayed_ in a page.
# * If set to 'true', then no additional parameters will be sent to solr,
# but any 'sniffed' request limit parameters will be used for paging, with
# paging at requested limit -1. Can sniff from facet.limit or
# f.specific_field.facet.limit solr request params. This 'true' config
# can be used if you set limits in :default_solr_params, or as defaults
# on the solr side in the request handler itself. Request handler defaults
# sniffing requires solr requests to be made with "echoParams=all", for
# app code to actually have it echo'd back to see it.
#
# :show may be set to false if you don't want the facet to be drawn in the
# facet bar
#
# set :index_range to true if you want the facet pagination view to have facet prefix-based navigation
# (useful when user clicks "more" on a large facet and wants to navigate alphabetically across a large set of results)
# :index_range can be an array or range of prefixes that will be used to create the navigation (note: It is case sensitive when searching values)
config.add_facet_field 'format', :label => 'Format'
config.add_facet_field 'pub_date', :label => 'Publication Year', :single => true
config.add_facet_field 'subject_topic_facet', :label => 'Topic', :limit => 20, :index_range => 'A'..'Z'
config.add_facet_field 'language_facet', :label => 'Language', :limit => true
config.add_facet_field 'lc_1letter_facet', :label => 'Call Number'
config.add_facet_field 'subject_geo_facet', :label => 'Region'
config.add_facet_field 'subject_era_facet', :label => 'Era'
config.add_facet_field 'example_pivot_field', :label => 'Pivot Field', :pivot => ['format', 'language_facet']
config.add_facet_field 'example_query_facet_field', :label => 'Publish Date', :query => {
:years_5 => { :label => 'within 5 Years', :fq => "pub_date:[#{Time.now.year - 5 } TO *]" },
:years_10 => { :label => 'within 10 Years', :fq => "pub_date:[#{Time.now.year - 10 } TO *]" },
:years_25 => { :label => 'within 25 Years', :fq => "pub_date:[#{Time.now.year - 25 } TO *]" }
}
# Have BL send all facet field names to Solr, which has been the default
# previously. Simply remove these lines if you'd rather use Solr request
# handler defaults, or have no facets.
config.add_facet_fields_to_solr_request!
# solr fields to be displayed in the index (search results) view
# The ordering of the field names is the order of the display
config.add_index_field 'title_display', :label => 'Title'
config.add_index_field 'title_vern_display', :label => 'Title'
config.add_index_field 'author_display', :label => 'Author'
config.add_index_field 'author_vern_display', :label => 'Author'
config.add_index_field 'format', :label => 'Format'
config.add_index_field 'language_facet', :label => 'Language'
config.add_index_field 'published_display', :label => 'Published'
config.add_index_field 'published_vern_display', :label => 'Published'
config.add_index_field 'lc_callnum_display', :label => 'Call number'
# solr fields to be displayed in the show (single result) view
# The ordering of the field names is the order of the display
config.add_show_field 'title_display', :label => 'Title'
config.add_show_field 'title_vern_display', :label => 'Title'
config.add_show_field 'subtitle_display', :label => 'Subtitle'
config.add_show_field 'subtitle_vern_display', :label => 'Subtitle'
config.add_show_field 'author_display', :label => 'Author'
config.add_show_field 'author_vern_display', :label => 'Author'
config.add_show_field 'format', :label => 'Format'
config.add_show_field 'url_fulltext_display', :label => 'URL'
config.add_show_field 'url_suppl_display', :label => 'More Information'
config.add_show_field 'language_facet', :label => 'Language'
config.add_show_field 'published_display', :label => 'Published'
config.add_show_field 'published_vern_display', :label => 'Published'
config.add_show_field 'lc_callnum_display', :label => 'Call number'
config.add_show_field 'isbn_t', :label => 'ISBN'
# "fielded" search configuration. Used by pulldown among other places.
# For supported keys in hash, see rdoc for Blacklight::SearchFields
#
# Search fields will inherit the :qt solr request handler from
# config[:default_solr_parameters], OR can specify a different one
# with a :qt key/value. Below examples inherit, except for subject
# that specifies the same :qt as default for our own internal
# testing purposes.
#
# The :key is what will be used to identify this BL search field internally,
# as well as in URLs -- so changing it after deployment may break bookmarked
# urls. A display label will be automatically calculated from the :key,
# or can be specified manually to be different.
# This one uses all the defaults set by the solr request handler. Which
# solr request handler? The one set in config[:default_solr_parameters][:qt],
# since we aren't specifying it otherwise.
config.add_search_field 'all_fields', :label => 'All Fields'
# Now we see how to over-ride Solr request handler defaults, in this
# case for a BL "search field", which is really a dismax aggregate
# of Solr search fields.
config.add_search_field('title') do |field|
# solr_parameters hash are sent to Solr as ordinary url query params.
field.solr_parameters = { :'spellcheck.dictionary' => 'title' }
# :solr_local_parameters will be sent using Solr LocalParams
# syntax, as eg {! qf=$title_qf }. This is neccesary to use
# Solr parameter de-referencing like $title_qf.
# See: http://wiki.apache.org/solr/LocalParams
field.solr_local_parameters = {
:qf => '$title_qf',
:pf => '$title_pf'
}
end
config.add_search_field('author') do |field|
field.solr_parameters = { :'spellcheck.dictionary' => 'author' }
field.solr_local_parameters = {
:qf => '$author_qf',
:pf => '$author_pf'
}
end
# Specifying a :qt only to show it's possible, and so our internal automated
# tests can test it. In this case it's the same as
# config[:default_solr_parameters][:qt], so isn't actually neccesary.
config.add_search_field('subject') do |field|
field.solr_parameters = { :'spellcheck.dictionary' => 'subject' }
field.qt = 'search'
field.solr_local_parameters = {
:qf => '$subject_qf',
:pf => '$subject_pf'
}
end
# "sort results by" select (pulldown)
# label in pulldown is followed by the name of the SOLR field to sort by and
# whether the sort is ascending or descending (it must be asc or desc
# except in the relevancy case).
config.add_sort_field 'score desc, pub_date_sort desc, title_sort asc', :label => 'relevance'
config.add_sort_field 'pub_date_sort desc, title_sort asc', :label => 'year'
config.add_sort_field 'author_sort asc, title_sort asc', :label => 'author'
config.add_sort_field 'title_sort asc, pub_date_sort desc', :label => 'title'
# If there are more than this many search results, no spelling ("did you
# mean") suggestion is offered.
config.spell_max = 5
end
end
| 1 | 6,209 | Can we just say that having a non-nil `autocomplete_path` implies that autocomplete is enabled? | projectblacklight-blacklight | rb |
@@ -25,7 +25,7 @@ import (
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/gogo/protobuf/proto"
+ "github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/checker/decls" | 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"encoding/json"
"fmt"
"net/http"
"reflect"
"regexp"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/gogo/protobuf/proto"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/google/cel-go/cel"
"github.com/google/cel-go/checker/decls"
"github.com/google/cel-go/common/types"
"github.com/google/cel-go/common/types/ref"
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/ext"
"github.com/google/cel-go/interpreter/functions"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
)
func init() {
caddy.RegisterModule(MatchExpression{})
}
// MatchExpression matches requests by evaluating a
// [CEL](https://github.com/google/cel-spec) expression.
// This enables complex logic to be expressed using a comfortable,
// familiar syntax. Please refer to
// [the standard definitions of CEL functions and operators](https://github.com/google/cel-spec/blob/master/doc/langdef.md#standard-definitions).
//
// This matcher's JSON interface is actually a string, not a struct.
// The generated docs are not correct because this type has custom
// marshaling logic.
//
// COMPATIBILITY NOTE: This module is still experimental and is not
// subject to Caddy's compatibility guarantee.
type MatchExpression struct {
// The CEL expression to evaluate. Any Caddy placeholders
// will be expanded and situated into proper CEL function
// calls before evaluating.
Expr string
expandedExpr string
prg cel.Program
ta ref.TypeAdapter
}
// CaddyModule returns the Caddy module information.
func (MatchExpression) CaddyModule() caddy.ModuleInfo {
return caddy.ModuleInfo{
ID: "http.matchers.expression",
New: func() caddy.Module { return new(MatchExpression) },
}
}
// MarshalJSON marshals m's expression.
func (m MatchExpression) MarshalJSON() ([]byte, error) {
return json.Marshal(m.Expr)
}
// UnmarshalJSON unmarshals m's expression.
func (m *MatchExpression) UnmarshalJSON(data []byte) error {
return json.Unmarshal(data, &m.Expr)
}
// Provision sets ups m.
func (m *MatchExpression) Provision(_ caddy.Context) error {
// replace placeholders with a function call - this is just some
// light (and possibly naïve) syntactic sugar
m.expandedExpr = placeholderRegexp.ReplaceAllString(m.Expr, placeholderExpansion)
// our type adapter expands CEL's standard type support
m.ta = celTypeAdapter{}
// create the CEL environment
env, err := cel.NewEnv(
cel.Declarations(
decls.NewVar("request", httpRequestObjectType),
decls.NewFunction(placeholderFuncName,
decls.NewOverload(placeholderFuncName+"_httpRequest_string",
[]*exprpb.Type{httpRequestObjectType, decls.String},
decls.Any)),
),
cel.CustomTypeAdapter(m.ta),
ext.Strings(),
)
if err != nil {
return fmt.Errorf("setting up CEL environment: %v", err)
}
// parse and type-check the expression
checked, issues := env.Compile(m.expandedExpr)
if issues != nil && issues.Err() != nil {
return fmt.Errorf("compiling CEL program: %s", issues.Err())
}
// request matching is a boolean operation, so we don't really know
// what to do if the expression returns a non-boolean type
if !proto.Equal(checked.ResultType(), decls.Bool) {
return fmt.Errorf("CEL request matcher expects return type of bool, not %s", checked.ResultType())
}
// compile the "program"
m.prg, err = env.Program(checked,
cel.Functions(
&functions.Overload{
Operator: placeholderFuncName,
Binary: m.caddyPlaceholderFunc,
},
),
)
if err != nil {
return fmt.Errorf("compiling CEL program: %s", err)
}
return nil
}
// Match returns true if r matches m.
func (m MatchExpression) Match(r *http.Request) bool {
out, _, _ := m.prg.Eval(map[string]interface{}{
"request": celHTTPRequest{r},
})
if outBool, ok := out.Value().(bool); ok {
return outBool
}
return false
}
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
func (m *MatchExpression) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
m.Expr = strings.Join(d.RemainingArgs(), " ")
}
return nil
}
// caddyPlaceholderFunc implements the custom CEL function that accesses the
// Replacer on a request and gets values from it.
func (m MatchExpression) caddyPlaceholderFunc(lhs, rhs ref.Val) ref.Val {
celReq, ok := lhs.(celHTTPRequest)
if !ok {
return types.NewErr(
"invalid request of type '%v' to "+placeholderFuncName+"(request, placeholderVarName)",
lhs.Type())
}
phStr, ok := rhs.(types.String)
if !ok {
return types.NewErr(
"invalid placeholder variable name of type '%v' to "+placeholderFuncName+"(request, placeholderVarName)",
rhs.Type())
}
repl := celReq.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
val, _ := repl.Get(string(phStr))
return m.ta.NativeToValue(val)
}
// httpRequestCELType is the type representation of a native HTTP request.
var httpRequestCELType = types.NewTypeValue("http.Request", traits.ReceiverType)
// cellHTTPRequest wraps an http.Request with
// methods to satisfy the ref.Val interface.
type celHTTPRequest struct{ *http.Request }
func (cr celHTTPRequest) ConvertToNative(typeDesc reflect.Type) (interface{}, error) {
return cr.Request, nil
}
func (celHTTPRequest) ConvertToType(typeVal ref.Type) ref.Val {
panic("not implemented")
}
func (cr celHTTPRequest) Equal(other ref.Val) ref.Val {
if o, ok := other.Value().(celHTTPRequest); ok {
return types.Bool(o.Request == cr.Request)
}
return types.ValOrErr(other, "%v is not comparable type", other)
}
func (celHTTPRequest) Type() ref.Type { return httpRequestCELType }
func (cr celHTTPRequest) Value() interface{} { return cr }
// celTypeAdapter can adapt our custom types to a CEL value.
type celTypeAdapter struct{}
func (celTypeAdapter) NativeToValue(value interface{}) ref.Val {
switch v := value.(type) {
case celHTTPRequest:
return v
case time.Time:
// TODO: eliminate direct protobuf dependency, sigh -- just wrap stdlib time.Time instead...
return types.Timestamp{Timestamp: ×tamp.Timestamp{Seconds: v.Unix(), Nanos: int32(v.Nanosecond())}}
case error:
types.NewErr(v.Error())
}
return types.DefaultTypeAdapter.NativeToValue(value)
}
// Variables used for replacing Caddy placeholders in CEL
// expressions with a proper CEL function call; this is
// just for syntactic sugar.
var (
placeholderRegexp = regexp.MustCompile(`{([\w.-]+)}`)
placeholderExpansion = `caddyPlaceholder(request, "${1}")`
)
var httpRequestObjectType = decls.NewObjectType("http.Request")
// The name of the CEL function which accesses Replacer values.
const placeholderFuncName = "caddyPlaceholder"
// Interface guards
var (
_ caddy.Provisioner = (*MatchExpression)(nil)
_ RequestMatcher = (*MatchExpression)(nil)
_ caddyfile.Unmarshaler = (*MatchExpression)(nil)
_ json.Marshaler = (*MatchExpression)(nil)
_ json.Unmarshaler = (*MatchExpression)(nil)
)
| 1 | 15,212 | Note that the package github.com/golang/protobuf/proto is deprecated. We're instructed to use the "google.golang.org/protobuf/proto" package instead. I didn't want to change it now to avoid intrusive changes whose consequences are, currently, unknown. | caddyserver-caddy | go |
@@ -4,14 +4,3 @@ get "/pages/*id" => "pages#show", format: false
get "/subscribe" => "promoted_catalogs#show", as: 'subscribe'
get "/privacy" => "pages#show", as: :privacy, id: 'privacy'
get "/terms" => "pages#show", as: :terms, id: 'terms'
-
-get(
- "/group-training" => "pages#show",
- as: :group_training,
- id: "group-training"
-)
-get(
- "/rubyist-booster-shot" => "pages#show",
- as: :rubyist_booster_shot,
- id: "rubyist-booster-shot"
-) | 1 | get "/purchases/:lookup" => "pages#show", id: "purchase-show"
get "/pages/*id" => "pages#show", format: false
get "/subscribe" => "promoted_catalogs#show", as: 'subscribe'
get "/privacy" => "pages#show", as: :privacy, id: 'privacy'
get "/terms" => "pages#show", as: :terms, id: 'terms'
get(
"/group-training" => "pages#show",
as: :group_training,
id: "group-training"
)
get(
"/rubyist-booster-shot" => "pages#show",
as: :rubyist_booster_shot,
id: "rubyist-booster-shot"
)
| 1 | 12,268 | Are these gone routes? Do we need to 301 them? | thoughtbot-upcase | rb |
@@ -18,10 +18,13 @@ import (
"bytes"
"encoding/json"
"fmt"
+ "github.com/vmware-tanzu/antrea/pkg/apis/networking/v1beta1"
+ "github.com/vmware-tanzu/antrea/pkg/controller/networkpolicy"
"io"
"os"
"reflect"
"sort"
+ "strconv"
"strings"
"text/tabwriter"
| 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package antctl
import (
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"reflect"
"sort"
"strings"
"text/tabwriter"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog"
"github.com/vmware-tanzu/antrea/pkg/antctl/runtime"
"github.com/vmware-tanzu/antrea/pkg/antctl/transform/common"
)
type formatterType string
const (
jsonFormatter formatterType = "json"
yamlFormatter formatterType = "yaml"
tableFormatter formatterType = "table"
)
const (
maxTableOutputColumnLength int = 50
)
// commandGroup is used to group commands, it could be specified in commandDefinition.
// The default commandGroup of a commandDefinition is `flat` which means the command
// is a direct sub-command of the root command. For any other commandGroup, the
// antctl framework will generate a same name sub-command of the root command for
// each of them, any commands specified as one of these group will need to be invoked
// as:
// antctl <commandGroup> <command>
type commandGroup uint
type OutputType uint
// There are two output types: single item or list and the actual type is decided by
// OutputType value here and command's arguments.
const (
// defaultType represents the output type is single item if there is an argument
// and its value is provided. If not, the output type is list.
defaultType OutputType = iota
// single represents the output type is always single item.
single
// multiple represents the output type is always list.
multiple
)
const (
flat commandGroup = iota
get
)
var groupCommands = map[commandGroup]*cobra.Command{
get: {
Use: "get",
Short: "Get the status or resource of a topic",
Long: "Get the status or resource of a topic",
},
}
type endpointResponder interface {
OutputType() OutputType
flags() []flagInfo
}
type resourceEndpoint struct {
groupVersionResource *schema.GroupVersionResource
resourceName string
namespaced bool
}
func (e *resourceEndpoint) OutputType() OutputType {
if len(e.resourceName) != 0 {
return single
}
return defaultType
}
func (e *resourceEndpoint) flags() []flagInfo {
var flags []flagInfo
if len(e.resourceName) == 0 {
flags = append(flags, flagInfo{
name: "name",
defaultValue: "",
arg: true,
usage: "Retrieve the resource by name",
})
}
if e.namespaced {
flags = append(flags, flagInfo{
name: "namespace",
shorthand: "n",
defaultValue: metav1.NamespaceAll,
usage: "Filter the resource by namespace",
})
}
return flags
}
type nonResourceEndpoint struct {
path string
params []flagInfo
outputType OutputType
}
func (e *nonResourceEndpoint) flags() []flagInfo {
return e.params
}
func (e *nonResourceEndpoint) OutputType() OutputType {
return e.outputType
}
// endpoint is used to specified the API for an antctl running against antrea-controller.
type endpoint struct {
resourceEndpoint *resourceEndpoint
nonResourceEndpoint *nonResourceEndpoint
// addonTransform is used to transform or update the response data received
// from the handler, it must returns an interface which has same type as
// TransformedResponse.
addonTransform func(reader io.Reader, single bool) (interface{}, error)
}
// flagInfo represents a command-line flag that can be provided when invoking an antctl command.
type flagInfo struct {
name string
shorthand string
defaultValue string
arg bool
usage string
}
// rawCommand defines a full function cobra.Command which lets developers
// write complex client-side tasks. Only the global flags of the antctl framework will
// be passed to the cobra.Command.
type rawCommand struct {
cobraCommand *cobra.Command
supportAgent bool
supportController bool
}
// commandDefinition defines options to create a cobra.Command for an antctl client.
type commandDefinition struct {
// Cobra related
use string
aliases []string
short string
long string
example string // It will be filled with generated examples if it is not provided.
// commandGroup represents the group of the command.
commandGroup commandGroup
controllerEndpoint *endpoint
agentEndpoint *endpoint
// transformedResponse is the final response struct of the command. If the
// AddonTransform is set, TransformedResponse is not needed to be used as the
// response struct of the handler, but it is still needed to guide the formatter.
// It should always be filled.
transformedResponse reflect.Type
}
func (cd *commandDefinition) namespaced() bool {
if runtime.Mode == runtime.ModeAgent {
return cd.agentEndpoint != nil && cd.agentEndpoint.resourceEndpoint != nil && cd.agentEndpoint.resourceEndpoint.namespaced
} else if runtime.Mode == runtime.ModeController {
return cd.controllerEndpoint != nil && cd.controllerEndpoint.resourceEndpoint != nil && cd.controllerEndpoint.resourceEndpoint.namespaced
}
return false
}
func (cd *commandDefinition) getAddonTransform() func(reader io.Reader, single bool) (interface{}, error) {
if runtime.Mode == runtime.ModeAgent && cd.agentEndpoint != nil {
return cd.agentEndpoint.addonTransform
} else if runtime.Mode == runtime.ModeController && cd.controllerEndpoint != nil {
return cd.controllerEndpoint.addonTransform
}
return nil
}
func (cd *commandDefinition) getEndpoint() endpointResponder {
if runtime.Mode == runtime.ModeAgent {
if cd.agentEndpoint != nil {
if cd.agentEndpoint.resourceEndpoint != nil {
return cd.agentEndpoint.resourceEndpoint
}
return cd.agentEndpoint.nonResourceEndpoint
}
} else if runtime.Mode == runtime.ModeController {
if cd.controllerEndpoint != nil {
if cd.controllerEndpoint.resourceEndpoint != nil {
return cd.controllerEndpoint.resourceEndpoint
}
return cd.controllerEndpoint.nonResourceEndpoint
}
}
return nil
}
// applySubCommandToRoot applies the commandDefinition to a cobra.Command with
// the client. It populates basic fields of a cobra.Command and creates the
// appropriate RunE function for it according to the commandDefinition.
func (cd *commandDefinition) applySubCommandToRoot(root *cobra.Command, client *client) {
cmd := &cobra.Command{
Use: cd.use,
Aliases: cd.aliases,
Short: cd.short,
Long: cd.long,
}
renderDescription(cmd)
cd.applyFlagsToCommand(cmd)
if groupCommand, ok := groupCommands[cd.commandGroup]; ok {
groupCommand.AddCommand(cmd)
} else {
root.AddCommand(cmd)
}
cd.applyExampleToCommand(cmd)
cmd.RunE = cd.newCommandRunE(client)
}
// validate checks if the commandDefinition is valid.
func (cd *commandDefinition) validate() []error {
var errs []error
if len(cd.use) == 0 {
errs = append(errs, fmt.Errorf("the command does not have name"))
}
existingAliases := make(map[string]bool)
for _, a := range cd.aliases {
if a == cd.use {
errs = append(errs, fmt.Errorf("%s: command alias is the same with use of the command", cd.use))
}
if _, ok := existingAliases[a]; ok {
errs = append(errs, fmt.Errorf("%s: command alias is provided twice: %s", cd.use, a))
}
existingAliases[a] = true
}
if cd.transformedResponse == nil {
errs = append(errs, fmt.Errorf("%s: command does not define output struct", cd.use))
}
if cd.agentEndpoint == nil && cd.controllerEndpoint == nil {
errs = append(errs, fmt.Errorf("%s: command does not define any supported component", cd.use))
}
if cd.agentEndpoint != nil && cd.agentEndpoint.nonResourceEndpoint != nil && cd.agentEndpoint.resourceEndpoint != nil {
errs = append(errs, fmt.Errorf("%s: command for agent can only define one endpoint", cd.use))
}
if cd.agentEndpoint != nil && cd.agentEndpoint.nonResourceEndpoint == nil && cd.agentEndpoint.resourceEndpoint == nil {
errs = append(errs, fmt.Errorf("%s: command for agent must define one endpoint", cd.use))
}
if cd.controllerEndpoint != nil && cd.controllerEndpoint.nonResourceEndpoint != nil && cd.controllerEndpoint.resourceEndpoint != nil {
errs = append(errs, fmt.Errorf("%s: command for controller can only define one endpoint", cd.use))
}
if cd.controllerEndpoint != nil && cd.controllerEndpoint.nonResourceEndpoint == nil && cd.controllerEndpoint.resourceEndpoint == nil {
errs = append(errs, fmt.Errorf("%s: command for controller must define one endpoint", cd.use))
}
empty := struct{}{}
existingFlags := map[string]struct{}{"output": empty, "help": empty, "kubeconfig": empty, "timeout": empty, "verbose": empty}
if endpoint := cd.getEndpoint(); endpoint != nil {
for _, f := range endpoint.flags() {
if len(f.name) == 0 {
errs = append(errs, fmt.Errorf("%s: flag name cannot be empty", cd.use))
} else {
if _, ok := existingFlags[f.name]; ok {
errs = append(errs, fmt.Errorf("%s: flag redefined: %s", cd.use, f.name))
}
existingFlags[f.name] = empty
}
if len(f.shorthand) > 1 {
errs = append(errs, fmt.Errorf("%s: length of a flag shorthand cannot be larger than 1: %s", cd.use, f.shorthand))
}
}
}
return errs
}
// decode parses the data in reader and converts it to one or more
// TransformedResponse objects. If single is false, the return type is
// []TransformedResponse. Otherwise, the return type is TransformedResponse.
func (cd *commandDefinition) decode(r io.Reader, single bool) (interface{}, error) {
var refType reflect.Type
if single {
refType = cd.transformedResponse
} else {
refType = reflect.SliceOf(cd.transformedResponse)
}
ref := reflect.New(refType)
err := json.NewDecoder(r).Decode(ref.Interface())
if err != nil {
return nil, err
}
if single {
return ref.Interface(), nil
}
return reflect.Indirect(ref).Interface(), nil
}
func jsonEncode(obj interface{}, output *bytes.Buffer) error {
if err := json.NewEncoder(output).Encode(obj); err != nil {
return fmt.Errorf("error when encoding data in json: %w", err)
}
return nil
}
func (cd *commandDefinition) jsonOutput(obj interface{}, writer io.Writer) error {
var output bytes.Buffer
if err := jsonEncode(obj, &output); err != nil {
return fmt.Errorf("error when encoding data in json: %w", err)
}
var prettifiedBuf bytes.Buffer
err := json.Indent(&prettifiedBuf, output.Bytes(), "", " ")
if err != nil {
return fmt.Errorf("error when formatting outputing in json: %w", err)
}
_, err = io.Copy(writer, &prettifiedBuf)
if err != nil {
return fmt.Errorf("error when outputing in json format: %w", err)
}
return nil
}
func (cd *commandDefinition) yamlOutput(obj interface{}, writer io.Writer) error {
var jsonObj interface{}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(obj); err != nil {
return fmt.Errorf("error when outputing in yaml format: %w", err)
}
// Comment copied from: sigs.k8s.io/yaml
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshalling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process.
if err := yaml.Unmarshal(buf.Bytes(), &jsonObj); err != nil {
return fmt.Errorf("error when outputing in yaml format: %w", err)
}
if err := yaml.NewEncoder(writer).Encode(jsonObj); err != nil {
return fmt.Errorf("error when outputing in yaml format: %w", err)
}
return nil
}
// respTransformer collects output fields in original transformedResponse
// and flattens them. respTransformer realizes this by turning obj into
// JSON and unmarshalling it.
// E.g. agent's transformedVersionResponse will only have two fields after
// transforming: agentVersion and antctlVersion.
func respTransformer(obj interface{}) (interface{}, error) {
var jsonObj bytes.Buffer
if err := json.NewEncoder(&jsonObj).Encode(obj); err != nil {
return nil, fmt.Errorf("error when encoding data in json: %w", err)
}
jsonStr := jsonObj.String()
var target interface{}
if err := json.Unmarshal([]byte(jsonStr), &target); err != nil {
return nil, fmt.Errorf("error when unmarshalling data in json: %w", err)
}
return target, nil
}
// tableOutputForGetCommands formats the table output for "get" commands.
func (cd *commandDefinition) tableOutputForGetCommands(obj interface{}, writer io.Writer) error {
var list []common.TableOutput
if reflect.TypeOf(obj).Kind() == reflect.Slice {
s := reflect.ValueOf(obj)
if s.Len() == 0 || s.Index(0).Interface() == nil {
var buffer bytes.Buffer
buffer.WriteString("\n")
if _, err := io.Copy(writer, &buffer); err != nil {
return fmt.Errorf("error when copy output into writer: %w", err)
}
return nil
}
if _, ok := s.Index(0).Interface().(common.TableOutput); !ok {
return cd.tableOutput(obj, writer)
}
for i := 0; i < s.Len(); i++ {
ele := s.Index(i)
list = append(list, ele.Interface().(common.TableOutput))
}
} else {
ele, ok := obj.(common.TableOutput)
if !ok {
return cd.tableOutput(obj, writer)
}
list = []common.TableOutput{ele}
}
// Get the elements and headers of table.
args := list[0].GetTableHeader()
rows := make([][]string, len(list)+1)
rows[0] = list[0].GetTableHeader()
for i, element := range list {
rows[i+1] = element.GetTableRow(maxTableOutputColumnLength)
}
if list[0].SortRows() {
// Sort the table rows according to columns in order.
body := rows[1:]
sort.Slice(body, func(i, j int) bool {
for k := range body[i] {
if body[i][k] != body[j][k] {
return body[i][k] < body[j][k]
}
}
return true
})
}
numColumns := len(args)
widths := make([]int, numColumns)
if numColumns == 1 {
// Do not limit the column length for a single column table.
// This is for the case a single column table can have long rows which cannot
// fit into a single line (one example is the ovsflows outputs).
widths[0] = 0
} else {
// Get the width of every column.
for j := 0; j < numColumns; j++ {
width := len(rows[0][j])
for i := 1; i < len(list)+1; i++ {
if len(rows[i][j]) == 0 {
rows[i][j] = "<NONE>"
}
if width < len(rows[i][j]) {
width = len(rows[i][j])
}
}
widths[j] = width
if j != 0 {
widths[j]++
}
}
}
// Construct the table.
var buffer bytes.Buffer
for i := 0; i < len(list)+1; i++ {
for j := 0; j < len(args); j++ {
val := ""
if j != 0 {
val = " " + val
}
val += rows[i][j]
if widths[j] > 0 {
val += strings.Repeat(" ", widths[j]-len(val))
}
buffer.WriteString(val)
}
buffer.WriteString("\n")
}
if _, err := io.Copy(writer, &buffer); err != nil {
return fmt.Errorf("error when copy output into writer: %w", err)
}
return nil
}
func (cd *commandDefinition) tableOutput(obj interface{}, writer io.Writer) error {
target, err := respTransformer(obj)
if err != nil {
return fmt.Errorf("error when transforming obj: %w", err)
}
list, multiple := target.([]interface{})
var args []string
if multiple {
for _, el := range list {
m := el.(map[string]interface{})
for k := range m {
args = append(args, k)
}
break
}
} else {
m, _ := target.(map[string]interface{})
for k := range m {
args = append(args, k)
}
}
var buffer bytes.Buffer
for _, arg := range args {
buffer.WriteString(arg)
buffer.WriteString("\t")
}
attrLine := buffer.String()
var valLines []string
if multiple {
for _, el := range list {
m := el.(map[string]interface{})
buffer.Reset()
for _, k := range args {
var output bytes.Buffer
if err = jsonEncode(m[k], &output); err != nil {
return fmt.Errorf("error when encoding data in json: %w", err)
}
buffer.WriteString(strings.Trim(output.String(), "\"\n"))
buffer.WriteString("\t")
}
valLines = append(valLines, buffer.String())
}
} else {
buffer.Reset()
m, _ := target.(map[string]interface{})
for _, k := range args {
var output bytes.Buffer
if err = jsonEncode(m[k], &output); err != nil {
return fmt.Errorf("error when encoding: %w", err)
}
buffer.WriteString(strings.Trim(output.String(), "\"\n"))
buffer.WriteString("\t")
}
valLines = append(valLines, buffer.String())
}
var b bytes.Buffer
w := tabwriter.NewWriter(&b, 15, 0, 1, ' ', 0)
fmt.Fprintln(w, attrLine)
for _, line := range valLines {
fmt.Fprintln(w, line)
}
w.Flush()
if _, err = io.Copy(writer, &b); err != nil {
return fmt.Errorf("error when copy output into writer: %w", err)
}
return nil
}
// output reads bytes from the resp and outputs the data to the writer in desired
// format. If the AddonTransform is set, it will use the function to transform
// the data first. It will try to output the resp in the format ft specified after
// doing transform.
func (cd *commandDefinition) output(resp io.Reader, writer io.Writer, ft formatterType, single bool) (err error) {
var obj interface{}
addonTransform := cd.getAddonTransform()
if addonTransform == nil { // Decode the data if there is no AddonTransform.
obj, err = cd.decode(resp, single)
if err != nil {
return fmt.Errorf("error when decoding response: %w", err)
}
} else {
obj, err = addonTransform(resp, single)
if err != nil {
return fmt.Errorf("error when doing local transform: %w", err)
}
klog.Infof("After transforming %v", obj)
}
// Output structure data in format
switch ft {
case jsonFormatter:
return cd.jsonOutput(obj, writer)
case yamlFormatter:
return cd.yamlOutput(obj, writer)
case tableFormatter:
if cd.commandGroup == get {
return cd.tableOutputForGetCommands(obj, writer)
} else {
return cd.tableOutput(obj, writer)
}
default:
return fmt.Errorf("unsupport format type: %v", ft)
}
}
func (cd *commandDefinition) collectFlags(cmd *cobra.Command, args []string) (map[string]string, error) {
argMap := make(map[string]string)
if len(args) > 0 {
argMap["name"] = args[0]
}
if endpoint := cd.getEndpoint(); endpoint != nil {
for _, f := range endpoint.flags() {
vs, err := cmd.Flags().GetString(f.name)
if err == nil && len(vs) != 0 {
argMap[f.name] = vs
continue
}
}
}
if cd.namespaced() {
argMap["namespace"], _ = cmd.Flags().GetString("namespace")
}
return argMap, nil
}
// newCommandRunE creates the RunE function for the command. The RunE function
// checks the args according to argOption and flags.
func (cd *commandDefinition) newCommandRunE(c *client) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
argMap, err := cd.collectFlags(cmd, args)
if err != nil {
return err
}
klog.Infof("Args: %v", argMap)
var argGet bool
for _, flag := range cd.getEndpoint().flags() {
if _, ok := argMap[flag.name]; ok && flag.arg == true {
argGet = true
break
}
}
kubeconfigPath, _ := cmd.Flags().GetString("kubeconfig")
timeout, _ := cmd.Flags().GetDuration("timeout")
server, _ := cmd.Flags().GetString("server")
resp, err := c.request(&requestOption{
commandDefinition: cd,
kubeconfig: kubeconfigPath,
args: argMap,
timeout: timeout,
server: server,
})
if err != nil {
return err
}
outputFormat, err := cmd.Flags().GetString("output")
if err != nil {
return err
}
isSingle := cd.getEndpoint().OutputType() != multiple && (cd.getEndpoint().OutputType() == single || argGet)
return cd.output(resp, os.Stdout, formatterType(outputFormat), isSingle)
}
}
// applyFlagsToCommand sets up args and flags for the command.
func (cd *commandDefinition) applyFlagsToCommand(cmd *cobra.Command) {
var hasFlag bool
for _, flag := range cd.getEndpoint().flags() {
if flag.arg {
cmd.Args = cobra.MaximumNArgs(1)
cmd.Use += fmt.Sprintf(" [%s]", flag.name)
cmd.Long += fmt.Sprintf("\n\nArgs:\n %s\t%s", flag.name, flag.usage)
hasFlag = true
} else {
cmd.Flags().StringP(flag.name, flag.shorthand, flag.defaultValue, flag.usage)
}
}
if !hasFlag {
cmd.Args = cobra.NoArgs
}
if cd.commandGroup == get {
cmd.Flags().StringP("output", "o", "table", "output format: json|table|yaml")
} else {
cmd.Flags().StringP("output", "o", "yaml", "output format: json|table|yaml")
}
}
// applyExampleToCommand generates examples according to the commandDefinition.
// It only creates for commands which specified TransformedResponse. If the singleObject
// is specified, it only creates one example to retrieve the single object. Otherwise,
// it will generates examples about retrieving single object according to the key
// argOption and retrieving the object list.
func (cd *commandDefinition) applyExampleToCommand(cmd *cobra.Command) {
if len(cd.example) != 0 {
cmd.Example = cd.example
return
}
var commands []string
for iter := cmd; iter != nil; iter = iter.Parent() {
commands = append(commands, iter.Name())
}
for i := 0; i < len(commands)/2; i++ {
commands[i], commands[len(commands)-1-i] = commands[len(commands)-1-i], commands[i]
}
var buf bytes.Buffer
dataName := strings.ToLower(cd.use)
if cd.getEndpoint().OutputType() == single {
fmt.Fprintf(&buf, " Get the %s\n", dataName)
fmt.Fprintf(&buf, " $ %s\n", strings.Join(commands, " "))
} else {
fmt.Fprintf(&buf, " Get a %s\n", dataName)
fmt.Fprintf(&buf, " $ %s [name]\n", strings.Join(commands, " "))
fmt.Fprintf(&buf, " Get the list of %s\n", dataName)
fmt.Fprintf(&buf, " $ %s\n", strings.Join(commands, " "))
}
cmd.Example = buf.String()
}
| 1 | 22,037 | move them to internal group of imports | antrea-io-antrea | go |
@@ -124,6 +124,7 @@ func (c *CStorPoolController) cStorPoolEventHandler(operation common.QueueOperat
status, err := c.getPoolStatus(cStorPoolGot)
return status, err
}
+ glog.Errorf("Please handle cStorPool '%s' event on '%s'", string(operation), string(cStorPoolGot.ObjectMeta.Name))
return string(apis.CStorPoolStatusInvalid), nil
}
| 1 | /*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package poolcontroller
import (
"fmt"
"os"
"reflect"
"time"
"github.com/golang/glog"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/pool"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/lease/v1alpha1"
"github.com/openebs/maya/pkg/util"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
)
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the cStorPoolUpdated resource
// with the current status of the resource.
func (c *CStorPoolController) syncHandler(key string, operation common.QueueOperation) error {
cStorPoolGot, err := c.getPoolResource(key)
if err != nil {
return err
}
var newCspLease lease.Leaser
newCspLease = &lease.Lease{cStorPoolGot, lease.CspLeaseKey, c.clientset, c.kubeclientset}
csp, err := newCspLease.Hold()
cspObject, ok := csp.(*apis.CStorPool)
if !ok {
fmt.Errorf("expected csp object but got %#v", cspObject)
}
if err != nil {
glog.Errorf("Could not acquire lease on csp object:%v", err)
return err
}
glog.Infof("Lease acquired successfully on csp %s ", cspObject.Name)
status, err := c.cStorPoolEventHandler(operation, cspObject)
if status == "" {
glog.Warning("Empty status recieved for csp status in sync handler")
return nil
}
cspObject.Status.Phase = apis.CStorPoolPhase(status)
if err != nil {
glog.Errorf(err.Error())
_, err := c.clientset.OpenebsV1alpha1().CStorPools().Update(cspObject)
if err != nil {
return err
}
glog.Infof("cStorPool:%v, %v; Status: %v", cspObject.Name,
string(cspObject.GetUID()), cspObject.Status.Phase)
return err
}
// Synchronize cstor pool used and free capacity fields on CSP object.
// Any kind of sync activity should be done from here.
// ToDo: Move status sync (of csp) here from cStorPoolEventHandler function.
// ToDo: Instead of having statusSync, capacitySync we can make it generic resource sync which syncs all the
// ToDo: requried fields on CSP ( Some code re-organization will be required)
c.syncCsp(cspObject)
_, err = c.clientset.OpenebsV1alpha1().CStorPools().Update(cspObject)
if err != nil {
c.recorder.Event(cspObject, corev1.EventTypeWarning, string(common.FailedSynced), string(common.MessageResourceSyncFailure)+err.Error())
return err
} else {
c.recorder.Event(cspObject, corev1.EventTypeNormal, string(common.SuccessSynced), string(common.MessageResourceSyncSuccess))
}
glog.Infof("cStorPool:%v, %v; Status: %v", cspObject.Name,
string(cspObject.GetUID()), cspObject.Status.Phase)
return nil
}
// cStorPoolEventHandler is to handle cstor pool related events.
func (c *CStorPoolController) cStorPoolEventHandler(operation common.QueueOperation, cStorPoolGot *apis.CStorPool) (string, error) {
pool.RunnerVar = util.RealRunner{}
switch operation {
case common.QOpAdd:
glog.Infof("Processing cStorPool added event: %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID()))
// lock is to synchronize pool and volumereplica. Until certain pool related
// operations are over, the volumereplica threads will be held.
common.SyncResources.Mux.Lock()
status, err := c.cStorPoolAddEventHandler(cStorPoolGot)
common.SyncResources.Mux.Unlock()
pool.PoolAddEventHandled = true
return status, err
case common.QOpDestroy:
glog.Infof("Processing cStorPool Destroy event %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID()))
status, err := c.cStorPoolDestroyEventHandler(cStorPoolGot)
return status, err
case common.QOpSync:
// Check if pool is not imported/created earlier due to any failure or failure in getting lease
// try to import/create pool gere as part of resync.
if IsPendingStatus(cStorPoolGot) {
common.SyncResources.Mux.Lock()
status, err := c.cStorPoolAddEventHandler(cStorPoolGot)
common.SyncResources.Mux.Unlock()
pool.PoolAddEventHandled = true
return status, err
}
glog.Infof("Synchronizing cStor pool status for pool %s", cStorPoolGot.ObjectMeta.Name)
status, err := c.getPoolStatus(cStorPoolGot)
return status, err
}
return string(apis.CStorPoolStatusInvalid), nil
}
func (c *CStorPoolController) cStorPoolAddEventHandler(cStorPoolGot *apis.CStorPool) (string, error) {
// CheckValidPool is to check if pool attributes are correct.
err := pool.CheckValidPool(cStorPoolGot)
if err != nil {
c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureValidate), string(common.MessageResourceFailValidate))
return string(apis.CStorPoolStatusOffline), err
}
/* If pool is already present.
Pool CR status is online. This means pool (main car) is running successfully,
but watcher container got restarted.
Pool CR status is init/online. If entire pod got restarted, both zrepl and watcher
are started.
a) Zrepl could have come up first, in this case, watcher will update after
the specified interval of 120s.
b) Watcher could have come up first, in this case, there is a possibility
that zrepl goes down and comes up and the watcher sees that no pool is there,
so it will break the loop and attempt to import the pool. */
// cnt is no of attempts to wait and handle in case of already present pool.
cnt := common.NoOfPoolWaitAttempts
existingPool, _ := pool.GetPoolName()
isPoolExists := len(existingPool) != 0
for i := 0; isPoolExists && i < cnt; i++ {
// GetVolumes is called because, while importing a pool, volumes corresponding
// to the pool are also imported. This needs to be handled and made visible
// to cvr controller.
common.InitialImportedPoolVol, _ = volumereplica.GetVolumes()
// GetPoolName is to get pool name for particular no. of attempts.
existingPool, _ := pool.GetPoolName()
if common.CheckIfPresent(existingPool, string(pool.PoolPrefix)+string(cStorPoolGot.GetUID())) {
// In the last attempt, ignore and update the status.
if i == cnt-1 {
isPoolExists = false
if IsPendingStatus(cStorPoolGot) || IsEmptyStatus(cStorPoolGot) {
// Pool CR status is init. This means pool deployment was done
// successfully, but before updating the CR to Online status,
// the watcher container got restarted.
glog.Infof("Pool %v is online", string(pool.PoolPrefix)+string(cStorPoolGot.GetUID()))
c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent))
common.SyncResources.IsImported = true
return string(apis.CStorPoolStatusOnline), nil
}
glog.Infof("Pool %v already present", string(pool.PoolPrefix)+string(cStorPoolGot.GetUID()))
c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.AlreadyPresent), string(common.MessageResourceAlreadyPresent))
common.SyncResources.IsImported = true
return string(apis.CStorPoolStatusErrorDuplicate), fmt.Errorf("Duplicate resource request")
}
glog.Infof("Attempt %v: Waiting...", i+1)
time.Sleep(common.PoolWaitInterval)
} else {
// If no pool is present while trying for getpoolname, set isPoolExists to false and
// break the loop, to import the pool later.
isPoolExists = false
}
}
var importPoolErr error
var status string
cachfileFlags := []bool{true, false}
for _, cachefileFlag := range cachfileFlags {
status, importPoolErr = c.importPool(cStorPoolGot, cachefileFlag)
if status == string(apis.CStorPoolStatusOnline) {
c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.SuccessImported), string(common.MessageResourceImported))
common.SyncResources.IsImported = true
return status, nil
}
}
// make a check if initialImportedPoolVol is not empty, then notify cvr controller
// through channel.
if len(common.InitialImportedPoolVol) != 0 {
common.SyncResources.IsImported = true
} else {
common.SyncResources.IsImported = false
}
// IsInitStatus is to check if initial status of cstorpool object is `init`.
if IsEmptyStatus(cStorPoolGot) || IsPendingStatus(cStorPoolGot) {
// LabelClear is to clear pool label
err = pool.LabelClear(cStorPoolGot.Spec.Disks.DiskList)
if err != nil {
glog.Errorf(err.Error(), cStorPoolGot.GetUID())
} else {
glog.Infof("Label clear successful: %v", string(cStorPoolGot.GetUID()))
}
// CreatePool is to create cstor pool.
err = pool.CreatePool(cStorPoolGot)
if err != nil {
glog.Errorf("Pool creation failure: %v", string(cStorPoolGot.GetUID()))
c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureCreate), string(common.MessageResourceFailCreate))
return string(apis.CStorPoolStatusOffline), err
}
glog.Infof("Pool creation successful: %v", string(cStorPoolGot.GetUID()))
c.recorder.Event(cStorPoolGot, corev1.EventTypeNormal, string(common.SuccessCreated), string(common.MessageResourceCreated))
return string(apis.CStorPoolStatusOnline), nil
}
glog.Infof("Not init status: %v, %v", cStorPoolGot.ObjectMeta.Name, string(cStorPoolGot.GetUID()))
return string(apis.CStorPoolStatusOffline), importPoolErr
}
func (c *CStorPoolController) cStorPoolDestroyEventHandler(cStorPoolGot *apis.CStorPool) (string, error) {
// DeletePool is to delete cstor pool.
err := pool.DeletePool(string(pool.PoolPrefix) + string(cStorPoolGot.ObjectMeta.UID))
if err != nil {
c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureDestroy), string(common.MessageResourceFailDestroy))
return string(apis.CStorPoolStatusDeletionFailed), err
}
// LabelClear is to clear pool label
err = pool.LabelClear(cStorPoolGot.Spec.Disks.DiskList)
if err != nil {
glog.Errorf(err.Error(), cStorPoolGot.GetUID())
} else {
glog.Infof("Label clear successful: %v", string(cStorPoolGot.GetUID()))
}
// removeFinalizer is to remove finalizer of cStorPool resource.
err = c.removeFinalizer(cStorPoolGot)
if err != nil {
return string(apis.CStorPoolStatusOffline), err
}
return "", nil
}
// getPoolStatus is a wrapper that fetches the status of cstor pool.
func (c *CStorPoolController) getPoolStatus(cStorPoolGot *apis.CStorPool) (string, error) {
poolStatus, err := pool.Status(string(pool.PoolPrefix) + string(cStorPoolGot.ObjectMeta.UID))
if err != nil {
// ToDO : Put error in event recorder
c.recorder.Event(cStorPoolGot, corev1.EventTypeWarning, string(common.FailureStatusSync), string(common.MessageResourceFailStatusSync))
return "", err
}
return poolStatus, nil
}
// getPoolResource returns object corresponding to the resource key
func (c *CStorPoolController) getPoolResource(key string) (*apis.CStorPool, error) {
// Convert the key(namespace/name) string into a distinct name
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil, nil
}
cStorPoolGot, err := c.clientset.OpenebsV1alpha1().CStorPools().Get(name, metav1.GetOptions{})
if err != nil {
// The cStorPool resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("cStorPoolGot '%s' in work queue no longer exists", key))
return nil, nil
}
return nil, err
}
return cStorPoolGot, nil
}
// removeFinalizer is to remove finalizer of cstorpool resource.
func (c *CStorPoolController) removeFinalizer(cStorPoolGot *apis.CStorPool) error {
if len(cStorPoolGot.Finalizers) > 0 {
cStorPoolGot.Finalizers = []string{}
}
_, err := c.clientset.OpenebsV1alpha1().CStorPools().Update(cStorPoolGot)
if err != nil {
return err
}
glog.Infof("Removed Finalizer: %v, %v", cStorPoolGot.Name, string(cStorPoolGot.GetUID()))
return nil
}
func (c *CStorPoolController) importPool(cStorPoolGot *apis.CStorPool, cachefileFlag bool) (string, error) {
err := pool.ImportPool(cStorPoolGot, cachefileFlag)
if err == nil {
err = pool.SetCachefile(cStorPoolGot)
if err != nil {
common.SyncResources.IsImported = false
return string(apis.CStorPoolStatusOffline), err
}
glog.Infof("Set cachefile successful: %v", string(cStorPoolGot.GetUID()))
// GetVolumes is called because, while importing a pool, volumes corresponding
// to the pool are also imported. This needs to be handled and made visible
// to cvr controller.
common.InitialImportedPoolVol, err = volumereplica.GetVolumes()
if err != nil {
common.SyncResources.IsImported = false
return string(apis.CStorPoolStatusOffline), err
}
glog.Infof("Import Pool with cachefile successful: %v", string(cStorPoolGot.GetUID()))
return string(apis.CStorPoolStatusOnline), nil
}
return "", nil
}
// IsRightCStorPoolMgmt is to check if the pool request is for particular pod/application.
func IsRightCStorPoolMgmt(cStorPool *apis.CStorPool) bool {
if os.Getenv(string(common.OpenEBSIOCStorID)) == string(cStorPool.ObjectMeta.UID) {
return true
}
return false
}
// IsDestroyEvent is to check if the call is for cStorPool destroy.
func IsDestroyEvent(cStorPool *apis.CStorPool) bool {
if cStorPool.ObjectMeta.DeletionTimestamp != nil {
return true
}
return false
}
// IsOnlyStatusChange is to check only status change of cStorPool object.
func IsOnlyStatusChange(oldCStorPool, newCStorPool *apis.CStorPool) bool {
if reflect.DeepEqual(oldCStorPool.Spec, newCStorPool.Spec) &&
!reflect.DeepEqual(oldCStorPool.Status, newCStorPool.Status) {
return true
}
return false
}
// IsEmptyStatus is to check if the status of cStorPool object is empty.
func IsEmptyStatus(cStorPool *apis.CStorPool) bool {
if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusEmpty) {
glog.Infof("cStorPool empty status: %v", string(cStorPool.ObjectMeta.UID))
return true
}
glog.Infof("Not empty status: %v", string(cStorPool.ObjectMeta.UID))
return false
}
// IsPendingStatus is to check if the status of cStorPool object is pending.
func IsPendingStatus(cStorPool *apis.CStorPool) bool {
if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusPending) {
glog.Infof("cStorPool pending: %v", string(cStorPool.ObjectMeta.UID))
return true
}
glog.V(4).Infof("Not pending status: %v", string(cStorPool.ObjectMeta.UID))
return false
}
// IsErrorDuplicate is to check if the status of cStorPool object is error-duplicate.
func IsErrorDuplicate(cStorPool *apis.CStorPool) bool {
if string(cStorPool.Status.Phase) == string(apis.CStorPoolStatusErrorDuplicate) {
glog.Infof("cStorPool duplication error: %v", string(cStorPool.ObjectMeta.UID))
return true
}
glog.V(4).Infof("Not error duplicate status: %v", string(cStorPool.ObjectMeta.UID))
return false
}
// IsDeletionFailedBefore is to make sure no other operation should happen if the
// status of cStorPool is deletion-failed.
func IsDeletionFailedBefore(cStorPool *apis.CStorPool) bool {
if cStorPool.Status.Phase == apis.CStorPoolStatusDeletionFailed {
return true
}
return false
}
// syncCsp updates field on CSP object after fetching the values from zpool utility.
func (c *CStorPoolController) syncCsp(cStorPool *apis.CStorPool) {
// Get capacity of the pool.
capacity, err := pool.Capacity(string(pool.PoolPrefix) + string(cStorPool.ObjectMeta.UID))
if err != nil {
glog.Errorf("Unable to sync CSP capacity: %v", err)
c.recorder.Event(cStorPool, corev1.EventTypeWarning, string(common.FailureCapacitySync), string(common.MessageResourceFailCapacitySync))
} else {
cStorPool.Status.Capacity = *capacity
}
}
| 1 | 12,190 | can we rewrite this as: `ignored event '%s' for cstor pool '%s'` | openebs-maya | go |
@@ -4,6 +4,8 @@ module Test
end
class ClientRequest < ActiveRecord::Base
+ belongs_to :approving_official, class_name: User
+
def self.purchase_amount_column_name
:amount
end | 1 | module Test
def self.table_name_prefix
"test_"
end
class ClientRequest < ActiveRecord::Base
def self.purchase_amount_column_name
:amount
end
include ClientDataMixin
include PurchaseCardMixin
def editable?
true
end
def name
project_title
end
def self.expense_type_options
[]
end
end
def self.setup_models
ClientRequest.connection.create_table :test_client_requests do |t|
t.decimal :amount
t.string :project_title
t.datetime :created_at
t.datetime :updated_at
end
end
def self.teardown_models
ClientRequest.connection.drop_table :test_client_requests
end
# we must defer loading the factory till after we have defined our namespace,
# so call this explicitly to work around rails app load order.
require Rails.root + "spec/factories/test/client_request.rb"
end
| 1 | 16,677 | not all client data types have an approving official (eg: 18F does not). do we still want to include the relation here? | 18F-C2 | rb |
@@ -105,7 +105,7 @@ public interface WebDriver extends SearchContext {
* @see org.openqa.selenium.WebDriver.Timeouts
*/
@Override
- List<WebElement> findElements(By by);
+ <T extends WebElement> List<T> findElements(By by);
/** | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium;
import org.openqa.selenium.logging.LoggingPreferences;
import org.openqa.selenium.logging.Logs;
import java.net.URL;
import java.time.Duration;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/**
* WebDriver is a remote control interface that enables introspection and control of user agents
* (browsers). The methods in this interface fall into three categories:
* <ul>
* <li>Control of the browser itself</li>
* <li>Selection of {@link WebElement}s</li>
* <li>Debugging aids</li>
* </ul>
* <p>
* Key methods are {@link WebDriver#get(String)}, which is used to load a new web page, and the
* various methods similar to {@link WebDriver#findElement(By)}, which is used to find
* {@link WebElement}s.
* <p>
* Currently, you will need to instantiate implementations of this interface directly. It is hoped
* that you write your tests against this interface so that you may "swap in" a more fully featured
* browser when there is a requirement for one.
* <p>
* Most implementations of this interface follow
* <a href="https://w3c.github.io/webdriver/">W3C WebDriver specification</a>
*/
public interface WebDriver extends SearchContext {
// Navigation
/**
* Load a new web page in the current browser window. This is done using an HTTP POST operation,
* and the method will block until the load is complete (with the default 'page load strategy'.
* This will follow redirects issued either by the server or as a meta-redirect from within the
* returned HTML. Should a meta-redirect "rest" for any duration of time, it is best to wait until
* this timeout is over, since should the underlying page change whilst your test is executing the
* results of future calls against this interface will be against the freshly loaded page. Synonym
* for {@link org.openqa.selenium.WebDriver.Navigation#to(String)}.
* <p>
* See <a href="https://w3c.github.io/webdriver/#navigate-to">W3C WebDriver specification</a>
* for more details.
*
* @param url The URL to load. Must be a fully qualified URL
* @see org.openqa.selenium.PageLoadStrategy
*/
void get(String url);
/**
* Get a string representing the current URL that the browser is looking at.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-current-url">W3C WebDriver specification</a>
* for more details.
*
* @return The URL of the page currently loaded in the browser
*/
String getCurrentUrl();
// General properties
/**
* Get the title of the current page.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-title">W3C WebDriver specification</a>
* for more details.
*
* @return The title of the current page, with leading and trailing whitespace stripped, or null
* if one is not already set
*/
String getTitle();
/**
* Find all elements within the current page using the given mechanism.
* This method is affected by the 'implicit wait' times in force at the time of execution. When
* implicitly waiting, this method will return as soon as there are more than 0 items in the
* found collection, or will return an empty list if the timeout is reached.
* <p>
* See <a href="https://w3c.github.io/webdriver/#find-elements">W3C WebDriver specification</a>
* for more details.
*
* @param by The locating mechanism to use
* @return A list of all matching {@link WebElement}s, or an empty list if nothing matches
* @see org.openqa.selenium.By
* @see org.openqa.selenium.WebDriver.Timeouts
*/
@Override
List<WebElement> findElements(By by);
/**
* Find the first {@link WebElement} using the given method.
* This method is affected by the 'implicit wait' times in force at the time of execution.
* The findElement(..) invocation will return a matching row, or try again repeatedly until
* the configured timeout is reached.
* <p>
* findElement should not be used to look for non-present elements, use {@link #findElements(By)}
* and assert zero length response instead.
* <p>
* See <a href="https://w3c.github.io/webdriver/#find-element">W3C WebDriver specification</a>
* for more details.
*
* @param by The locating mechanism to use
* @return The first matching element on the current page
* @throws NoSuchElementException If no matching elements are found
* @see org.openqa.selenium.By
* @see org.openqa.selenium.WebDriver.Timeouts
*/
@Override
WebElement findElement(By by);
// Misc
/**
* Get the source of the last loaded page. If the page has been modified after loading (for
* example, by Javascript) there is no guarantee that the returned text is that of the modified
* page. Please consult the documentation of the particular driver being used to determine whether
* the returned text reflects the current state of the page or the text last sent by the web
* server. The page source returned is a representation of the underlying DOM: do not expect it to
* be formatted or escaped in the same way as the response sent from the web server. Think of it
* as an artist's impression.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-page-source">W3C WebDriver specification</a>
* for more details.
*
* @return The source of the current page
*/
String getPageSource();
/**
* Close the current window, quitting the browser if it's the last window currently open.
* <p>
* See <a href="https://w3c.github.io/webdriver/#close-window">W3C WebDriver specification</a>
* for more details.
*/
void close();
/**
* Quits this driver, closing every associated window.
*/
void quit();
/**
* Return a set of window handles which can be used to iterate over all open windows of this
* WebDriver instance by passing them to {@link #switchTo()}.{@link Options#window()}
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-window-handles">W3C WebDriver specification</a>
* for more details.
*
* @return A set of window handles which can be used to iterate over all open windows.
*/
Set<String> getWindowHandles();
/**
* Return an opaque handle to this window that uniquely identifies it within this driver instance.
* This can be used to switch to this window at a later date
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-window-handle">W3C WebDriver specification</a>
* for more details.
*
* @return the current window handle
*/
String getWindowHandle();
/**
* Send future commands to a different frame or window.
*
* @return A TargetLocator which can be used to select a frame or window
* @see org.openqa.selenium.WebDriver.TargetLocator
*/
TargetLocator switchTo();
/**
* An abstraction allowing the driver to access the browser's history and to navigate to a given
* URL.
*
* @return A {@link org.openqa.selenium.WebDriver.Navigation} that allows the selection of what to
* do next
*/
Navigation navigate();
/**
* Gets the Option interface
*
* @return An option interface
* @see org.openqa.selenium.WebDriver.Options
*/
Options manage();
/**
* An interface for managing stuff you would do in a browser menu
*/
interface Options {
/**
* Add a specific cookie. If the cookie's domain name is left blank, it is assumed that the
* cookie is meant for the domain of the current document.
* <p>
* See <a href="https://w3c.github.io/webdriver/#add-cookie">W3C WebDriver specification</a>
* for more details.
*
* @param cookie The cookie to add.
*/
void addCookie(Cookie cookie);
/**
* Delete the named cookie from the current domain. This is equivalent to setting the named
* cookie's expiry date to some time in the past.
* <p>
* See <a href="https://w3c.github.io/webdriver/#delete-cookie">W3C WebDriver specification</a>
* for more details.
*
* @param name The name of the cookie to delete
*/
void deleteCookieNamed(String name);
/**
* Delete a cookie from the browser's "cookie jar". The domain of the cookie will be ignored.
*
* @param cookie nom nom nom
*/
void deleteCookie(Cookie cookie);
/**
* Delete all the cookies for the current domain.
* <p>
* See <a href="https://w3c.github.io/webdriver/#delete-all-cookies">W3C WebDriver specification</a>
* for more details.
*/
void deleteAllCookies();
/**
* Get all the cookies for the current domain.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-all-cookies">W3C WebDriver specification</a>
* for more details.
*
* @return A Set of cookies for the current domain.
*/
Set<Cookie> getCookies();
/**
* Get a cookie with a given name.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-named-cookie">W3C WebDriver specification</a>
* for more details.
*
* @param name the name of the cookie
* @return the cookie, or null if no cookie with the given name is present
*/
Cookie getCookieNamed(String name);
/**
* @return the interface for managing driver timeouts.
*/
Timeouts timeouts();
/**
* @return the interface for controlling IME engines to generate complex-script input.
*/
ImeHandler ime();
/**
* @return the interface for managing the current window.
*/
Window window();
/**
* Gets the {@link Logs} interface used to fetch different types of logs.
* <p>
* To set the logging preferences {@link LoggingPreferences}.
*
* @return A Logs interface.
*/
@Beta
Logs logs();
}
/**
* An interface for managing timeout behavior for WebDriver instances.
* <p>
* See <a href="https://w3c.github.io/webdriver/#set-timeouts">W3C WebDriver specification</a>
* for more details.
*/
interface Timeouts {
/**
* @deprecated Use {@link #implicitlyWait(Duration)}
*
* Specifies the amount of time the driver should wait when searching for an element if it is
* not immediately present.
* <p>
* When searching for a single element, the driver should poll the page until the element has
* been found, or this timeout expires before throwing a {@link NoSuchElementException}. When
* searching for multiple elements, the driver should poll the page until at least one element
* has been found or this timeout has expired.
* <p>
* Increasing the implicit wait timeout should be used judiciously as it will have an adverse
* effect on test run time, especially when used with slower location strategies like XPath.
* <p>
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with invalid
* argument will be returned.
*
* @param time The amount of time to wait.
* @param unit The unit of measure for {@code time}.
* @return A self reference.
*/
@Deprecated
Timeouts implicitlyWait(long time, TimeUnit unit);
/**
* Specifies the amount of time the driver should wait when searching for an element if it is
* not immediately present.
* <p>
* When searching for a single element, the driver should poll the page until the element has
* been found, or this timeout expires before throwing a {@link NoSuchElementException}. When
* searching for multiple elements, the driver should poll the page until at least one element
* has been found or this timeout has expired.
* <p>
* Increasing the implicit wait timeout should be used judiciously as it will have an adverse
* effect on test run time, especially when used with slower location strategies like XPath.
* <p>
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with invalid
* argument will be returned.
*
* @param duration The duration to wait.
* @return A self reference.
*/
default Timeouts implicitlyWait(Duration duration) {
return implicitlyWait(duration.toMillis(), TimeUnit.MILLISECONDS);
}
/**
* Gets the amount of time the driver should wait when searching for an element if it is
* not immediately present.
*
* @return The amount of time the driver should wait when searching for an element.
* @see <a href="https://www.w3.org/TR/webdriver/#get-timeouts">W3C WebDriver</a>
*/
default Duration getImplicitWaitTimeout() {
throw new UnsupportedCommandException();
}
/**
* @deprecated Use {@link #setScriptTimeout(Duration)}
*
* Sets the amount of time to wait for an asynchronous script to finish execution before
* throwing an error. If the timeout is negative, not null, or greater than 2e16 - 1, an
* error code with invalid argument will be returned.
*
* @param time The timeout value.
* @param unit The unit of time.
* @return A self reference.
* @see JavascriptExecutor#executeAsyncScript(String, Object...)
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
@Deprecated
Timeouts setScriptTimeout(long time, TimeUnit unit);
/**
* Sets the amount of time to wait for an asynchronous script to finish execution before
* throwing an error. If the timeout is negative, not null, or greater than 2e16 - 1, an
* error code with invalid argument will be returned.
*
* @param duration The timeout value.
* @deprecated Use {@link #scriptTimeout(Duration)}
* @return A self reference.
* @see JavascriptExecutor#executeAsyncScript(String, Object...)
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
@Deprecated
default Timeouts setScriptTimeout(Duration duration) {
return setScriptTimeout(duration.toMillis(), TimeUnit.MILLISECONDS);
}
/**
* Sets the amount of time to wait for an asynchronous script to finish execution before
* throwing an error. If the timeout is negative, not null, or greater than 2e16 - 1, an
* error code with invalid argument will be returned.
*
* @param duration The timeout value.
* @return A self reference.
* @see JavascriptExecutor#executeAsyncScript(String, Object...)
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
default Timeouts scriptTimeout(Duration duration) {
return setScriptTimeout(duration);
}
/**
* Gets the amount of time to wait for an asynchronous script to finish execution before
* throwing an error. If the timeout is negative, not null, or greater than 2e16 - 1, an
* error code with invalid argument will be returned.
*
* @return The amount of time to wait for an asynchronous script to finish execution.
* @see <a href="https://www.w3.org/TR/webdriver/#get-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
default Duration getScriptTimeout() {
throw new UnsupportedCommandException();
}
/**
* @param time The timeout value.
* @param unit The unit of time.
* @return A Timeouts interface.
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
* @deprecated Use {@link #pageLoadTimeout(Duration)}
*
* Sets the amount of time to wait for a page load to complete before throwing an error.
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with
* invalid argument will be returned.
*/
@Deprecated
Timeouts pageLoadTimeout(long time, TimeUnit unit);
/**
* Sets the amount of time to wait for a page load to complete before throwing an error.
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with
* invalid argument will be returned.
*
* @param duration The timeout value.
* @return A Timeouts interface.
* @see <a href="https://www.w3.org/TR/webdriver/#set-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
default Timeouts pageLoadTimeout(Duration duration) {
return pageLoadTimeout(duration.toMillis(), TimeUnit.MILLISECONDS);
}
/**
* Gets the amount of time to wait for a page load to complete before throwing an error.
* If the timeout is negative, not null, or greater than 2e16 - 1, an error code with
* invalid argument will be returned.
*
* @return The amount of time to wait for a page load to complete.
* @see <a href="https://www.w3.org/TR/webdriver/#get-timeouts">W3C WebDriver</a>
* @see <a href="https://www.w3.org/TR/webdriver/#dfn-timeouts-configuration">W3C WebDriver</a>
*/
default Duration getPageLoadTimeout() {
throw new UnsupportedCommandException();
}
}
/**
* Used to locate a given frame or window.
*/
interface TargetLocator {
/**
* Select a frame by its (zero-based) index. Selecting a frame by index is equivalent to the
* JS expression window.frames[index] where "window" is the DOM window represented by the
* current context. Once the frame has been selected, all subsequent calls on the WebDriver
* interface are made to that frame.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-frame">W3C WebDriver specification</a>
* for more details.
*
* @param index (zero-based) index
* @return This driver focused on the given frame
* @throws NoSuchFrameException If the frame cannot be found
*/
WebDriver frame(int index);
/**
* Select a frame by its name or ID. Frames located by matching name attributes are always given
* precedence over those matched by ID.
*
* @param nameOrId the name of the frame window, the id of the <frame> or <iframe>
* element, or the (zero-based) index
* @return This driver focused on the given frame
* @throws NoSuchFrameException If the frame cannot be found
*/
WebDriver frame(String nameOrId);
/**
* Select a frame using its previously located {@link WebElement}.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-frame">W3C WebDriver specification</a>
* for more details.
*
* @param frameElement The frame element to switch to.
* @return This driver focused on the given frame.
* @throws NoSuchFrameException If the given element is neither an IFRAME nor a FRAME element.
* @throws StaleElementReferenceException If the WebElement has gone stale.
* @see WebDriver#findElement(By)
*/
WebDriver frame(WebElement frameElement);
/**
* Change focus to the parent context. If the current context is the top level browsing context,
* the context remains unchanged.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-parent-frame">W3C WebDriver specification</a>
* for more details.
*
* @return This driver focused on the parent frame
*/
WebDriver parentFrame();
/**
* Switch the focus of future commands for this driver to the window with the given name/handle.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-window">W3C WebDriver specification</a>
* for more details.
*
* @param nameOrHandle The name of the window or the handle as returned by
* {@link WebDriver#getWindowHandle()}
* @return This driver focused on the given window
* @throws NoSuchWindowException If the window cannot be found
*/
WebDriver window(String nameOrHandle);
/**
* Creates a new browser window and switches the focus for future commands of this driver
* to the new window.
* <p>
* See <a href="https://w3c.github.io/webdriver/#new-window">W3C WebDriver specification</a>
* for more details.
*
* @param typeHint The type of new browser window to be created. The created window is not
* guaranteed to be of the requested type; if the driver does not support
* the requested type, a new browser window will be created of whatever type
* the driver does support.
* @return This driver focused on the given window
*/
WebDriver newWindow(WindowType typeHint);
/**
* Selects either the first frame on the page, or the main document when a page contains
* iframes.
* <p>
* See <a href="https://w3c.github.io/webdriver/#switch-to-frame">W3C WebDriver specification</a>
* for more details.
*
* @return This driver focused on the top window/first frame.
*/
WebDriver defaultContent();
/**
* Switches to the element that currently has focus within the document currently "switched to",
* or the body element if this cannot be detected. This matches the semantics of calling
* "document.activeElement" in Javascript.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-active-element">W3C WebDriver specification</a>
* for more details.
*
* @return The WebElement with focus, or the body element if no element with focus can be
* detected.
*/
WebElement activeElement();
/**
* Switches to the currently active modal dialog for this particular driver instance.
*
* @return A handle to the dialog.
* @throws NoAlertPresentException If the dialog cannot be found
*/
Alert alert();
}
interface Navigation {
/**
* Move back a single "item" in the browser's history.
* <p>
* See <a href="https://w3c.github.io/webdriver/#back">W3C WebDriver specification</a>
* for more details.
*/
void back();
/**
* Move a single "item" forward in the browser's history. Does nothing if we are on the latest
* page viewed.
* <p>
* See <a href="https://w3c.github.io/webdriver/#forward">W3C WebDriver specification</a>
* for more details.
*/
void forward();
/**
* Load a new web page in the current browser window. This is done using an HTTP POST operation,
* and the method will block until the load is complete. This will follow redirects issued
* either by the server or as a meta-redirect from within the returned HTML. Should a
* meta-redirect "rest" for any duration of time, it is best to wait until this timeout is over,
* since should the underlying page change whilst your test is executing the results of future
* calls against this interface will be against the freshly loaded page.
* <p>
* See <a href="https://w3c.github.io/webdriver/#navigate-to">W3C WebDriver specification</a>
* for more details.
*
* @param url The URL to load. Must be a fully qualified URL
*/
void to(String url);
/**
* Overloaded version of {@link #to(String)} that makes it easy to pass in a URL.
*
* @param url URL
*/
void to(URL url);
/**
* Refresh the current page
* <p>
* See <a href="https://w3c.github.io/webdriver/#refresh">W3C WebDriver specification</a>
* for more details.
*/
void refresh();
}
/**
* An interface for managing input methods.
*/
interface ImeHandler {
/**
* All available engines on the machine. To use an engine, it has to be activated.
*
* @return list of available IME engines.
* @throws ImeNotAvailableException if the host does not support IME.
*/
List<String> getAvailableEngines();
/**
* Get the name of the active IME engine. The name string is platform-specific.
*
* @return name of the active IME engine.
* @throws ImeNotAvailableException if the host does not support IME.
*/
String getActiveEngine();
/**
* Indicates whether IME input active at the moment (not if it's available).
*
* @return true if IME input is available and currently active, false otherwise.
* @throws ImeNotAvailableException if the host does not support IME.
*/
boolean isActivated();
/**
* De-activate IME input (turns off the currently activated engine). Note that getActiveEngine
* may still return the name of the engine but isActivated will return false.
*
* @throws ImeNotAvailableException if the host does not support IME.
*/
void deactivate();
/**
* Make an engines that is available (appears on the list returned by getAvailableEngines)
* active. After this call, the only loaded engine on the IME daemon will be this one and the
* input sent using sendKeys will be converted by the engine. Note that this is a
* platform-independent method of activating IME (the platform-specific way being using keyboard
* shortcuts).
*
*
* @param engine name of engine to activate.
* @throws ImeNotAvailableException if the host does not support IME.
* @throws ImeActivationFailedException if the engine is not available or if activation failed
* for other reasons.
*/
void activateEngine(String engine);
}
@Beta
interface Window {
/**
* Get the size of the current window. This will return the outer window dimension, not just
* the view port.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-window-rect">W3C WebDriver specification</a>
* for more details.
*
* @return The current window size.
*/
Dimension getSize();
/**
* Set the size of the current window. This will change the outer window dimension,
* not just the view port, synonymous to window.resizeTo() in JS.
* <p>
* See <a href="https://w3c.github.io/webdriver/#set-window-rect">W3C WebDriver specification</a>
* for more details.
*
* @param targetSize The target size.
*/
void setSize(Dimension targetSize);
/**
* Get the position of the current window, relative to the upper left corner of the screen.
* <p>
* See <a href="https://w3c.github.io/webdriver/#get-window-rect">W3C WebDriver specification</a>
* for more details.
*
* @return The current window position.
*/
Point getPosition();
/**
* Set the position of the current window. This is relative to the upper left corner of the
* screen, synonymous to window.moveTo() in JS.
* <p>
* See <a href="https://w3c.github.io/webdriver/#set-window-rect">W3C WebDriver specification</a>
* for more details.
*
* @param targetPosition The target position of the window.
*/
void setPosition(Point targetPosition);
/**
* Maximizes the current window if it is not already maximized
* <p>
* See <a href="https://w3c.github.io/webdriver/#maximize-window">W3C WebDriver specification</a>
* for more details.
*/
void maximize();
/**
* Minimizes the current window if it is not already minimized
* <p>
* See <a href="https://w3c.github.io/webdriver/#minimize-window">W3C WebDriver specification</a>
* for more details.
*/
void minimize();
/**
* Fullscreen the current window if it is not already fullscreen
* <p>
* See <a href="https://w3c.github.io/webdriver/#fullscreen-window">W3C WebDriver specification</a>
* for more details.
*/
void fullscreen();
}
}
| 1 | 19,274 | This change should also probably go into the corresponding method of the abstract By class? | SeleniumHQ-selenium | rb |
@@ -127,9 +127,6 @@ module.exports = function(config, auth, storage) {
} else {
next(HTTPError[err.message ? 401 : 500](err.message));
}
-
- let base = Utils.combineBaseUrl(Utils.getWebProtocol(req), req.get('host'), config.url_prefix);
- res.redirect(base);
});
});
| 1 | 'use strict';
const bodyParser = require('body-parser');
const express = require('express');
const marked = require('marked');
const Search = require('../../lib/search');
const Middleware = require('./middleware');
const match = Middleware.match;
const validateName = Middleware.validate_name;
const validatePkg = Middleware.validate_package;
const securityIframe = Middleware.securityIframe;
const route = express.Router(); // eslint-disable-line
const async = require('async');
const HTTPError = require('http-errors');
const Utils = require('../../lib/utils');
/*
This file include all verdaccio only API(Web UI), for npm API please see ../endpoint/
*/
module.exports = function(config, auth, storage) {
Search.configureStorage(storage);
const can = Middleware.allow(auth);
// validate all of these params as a package name
// this might be too harsh, so ask if it causes trouble
route.param('package', validatePkg);
route.param('filename', validateName);
route.param('version', validateName);
route.param('anything', match(/.*/));
route.use(bodyParser.urlencoded({extended: false}));
route.use(auth.jwtMiddleware());
route.use(securityIframe);
// Get list of all visible package
route.get('/packages', function(req, res, next) {
storage.get_local(function(err, packages) {
if (err) {
// that function shouldn't produce any
throw err;
}
async.filterSeries(
packages,
function(pkg, cb) {
auth.allow_access(pkg.name, req.remote_user, function(err, allowed) {
if (err) {
cb(null, false);
} else {
cb(err, allowed);
}
});
},
function(err, packages) {
if (err) throw err;
packages.sort(function(a, b) {
if (a.name < b.name) {
return -1;
} else {
return 1;
}
});
next(packages);
}
);
});
});
// Get package readme
route.get('/package/readme(/@:scope?)?/:package/:version?', can('access'), function(req, res, next) {
let packageName = req.params.package;
if (req.params.scope) {
packageName = `@${req.params.scope}/${packageName}`;
}
storage.get_package(packageName, {req: req}, function(err, info) {
if (err) {
return next(err);
}
res.set('Content-Type', 'text/plain');
next(marked(info.readme || 'ERROR: No README data found!'));
});
});
// Search package
route.get('/search/:anything', function(req, res, next) {
const results = Search.query(req.params.anything);
const packages = [];
const getPackageInfo = function(i) {
storage.get_package(results[i].ref, (err, entry) => {
if (!err && entry) {
auth.allow_access(entry.name, req.remote_user, function(err, allowed) {
if (err || !allowed) {
return;
}
packages.push(entry.versions[entry['dist-tags'].latest]);
});
}
if (i >= results.length - 1) {
next(packages);
} else {
getPackageInfo(i + 1);
}
});
};
if (results.length) {
getPackageInfo(0);
} else {
next([]);
}
});
route.post('/login', function(req, res, next) {
auth.authenticate(req.body.username, req.body.password, (err, user) => {
if (!err) {
req.remote_user = user;
next({
token: auth.issue_token(user, '24h'),
username: req.remote_user.name,
});
} else {
next(HTTPError[err.message ? 401 : 500](err.message));
}
let base = Utils.combineBaseUrl(Utils.getWebProtocol(req), req.get('host'), config.url_prefix);
res.redirect(base);
});
});
route.post('/-/logout', function(req, res, next) {
let base = Utils.combineBaseUrl(Utils.getWebProtocol(req), req.get('host'), config.url_prefix);
res.cookies.set('token', '');
res.redirect(base);
});
// What are you looking for? logout? client side will remove token when user click logout,
// or it will auto expire after 24 hours.
// This token is different with the token send to npm client.
// We will/may replace current token with JWT in next major release, and it will not expire at all(configurable).
return route;
};
| 1 | 17,143 | why this remove? | verdaccio-verdaccio | js |
@@ -54,7 +54,7 @@ type Reader interface {
Stat(ctx context.Context, path string) (os.FileInfo, error)
// Open opens an existing file for reading, as os.Open.
- Open(ctx context.Context, path string) (io.ReadCloser, error)
+ Open(ctx context.Context, path string) (FileReader, error)
// Glob returns all the paths matching the specified glob pattern, as
// filepath.Glob. | 1 | /*
* Copyright 2015 The Kythe Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package vfs defines a generic file system interface commonly used by Kythe
// libraries.
package vfs // import "kythe.io/kythe/go/platform/vfs"
import (
"context"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
)
// ErrNotSupported is returned for all unsupported VFS operations.
var ErrNotSupported = errors.New("operation not supported")
// Interface is a virtual file system interface for reading and writing files.
// It is used to wrap the normal os package functions so that other file storage
// implementations be used in lieu. For instance, there could be
// implementations for cloud storage back-ends or databases. Depending on the
// implementation, the Writer methods can be unsupported and always return
// ErrNotSupported.
type Interface interface {
Reader
Writer
}
// TempFile composes io.WriteCloser and access to its "name". For
// file-based implementations, this should be the full path to the full.
type TempFile interface {
io.WriteCloser
Name() string
}
// Reader is a virtual file system interface for reading files.
type Reader interface {
// Stat returns file status information for path, as os.Stat.
Stat(ctx context.Context, path string) (os.FileInfo, error)
// Open opens an existing file for reading, as os.Open.
Open(ctx context.Context, path string) (io.ReadCloser, error)
// Glob returns all the paths matching the specified glob pattern, as
// filepath.Glob.
Glob(ctx context.Context, glob string) ([]string, error)
}
// Writer is a virtual file system interface for writing files.
type Writer interface {
// MkdirAll recursively creates the specified directory path with the given
// permissions, as os.MkdirAll.
MkdirAll(ctx context.Context, path string, mode os.FileMode) error
// Create creates a new file for writing, as os.Create.
Create(ctx context.Context, path string) (io.WriteCloser, error)
// CreateTempFile creates a new temp file returning a TempFile. The
// name of the file is constructed from dir pattern and per
// ioutil.TempFile:
// The filename is generated by taking pattern and adding a random
// string to the end. If pattern includes a "*", the random string
// replaces the last "*". If dir is the empty string, CreateTempFile
// uses an unspecified default directory.
CreateTempFile(ctx context.Context, dir, pattern string) (TempFile, error)
// Rename renames oldPath to newPath, as os.Rename, overwriting newPath if
// it exists.
Rename(ctx context.Context, oldPath, newPath string) error
// Remove deletes the file specified by path, as os.Remove.
Remove(ctx context.Context, path string) error
}
// Default is the global default VFS used by Kythe libraries that wish to access
// the file system. This is usually the LocalFS and should only be changed in
// very specialized cases (i.e. don't change it).
var Default Interface = LocalFS{}
// ReadFile is the equivalent of ioutil.ReadFile using the Default VFS.
func ReadFile(ctx context.Context, filename string) ([]byte, error) {
f, err := Open(ctx, filename)
if err != nil {
return nil, err
}
defer f.Close() // ignore errors
return ioutil.ReadAll(f)
}
// Stat returns file status information for path, using the Default VFS.
func Stat(ctx context.Context, path string) (os.FileInfo, error) { return Default.Stat(ctx, path) }
// MkdirAll recursively creates the specified directory path with the given
// permissions, using the Default VFS.
func MkdirAll(ctx context.Context, path string, mode os.FileMode) error {
return Default.MkdirAll(ctx, path, mode)
}
// Open opens an existing file for reading, using the Default VFS.
func Open(ctx context.Context, path string) (io.ReadCloser, error) { return Default.Open(ctx, path) }
// Create creates a new file for writing, using the Default VFS.
func Create(ctx context.Context, path string) (io.WriteCloser, error) {
return Default.Create(ctx, path)
}
// CreateTempFile creates a new TempFile, using the Default VFS.
func CreateTempFile(ctx context.Context, dir, pattern string) (TempFile, error) {
return Default.CreateTempFile(ctx, dir, pattern)
}
// Rename renames oldPath to newPath, using the Default VFS, overwriting newPath
// if it exists.
func Rename(ctx context.Context, oldPath, newPath string) error {
return Default.Rename(ctx, oldPath, newPath)
}
// Remove deletes the file specified by path, using the Default VFS.
func Remove(ctx context.Context, path string) error { return Default.Remove(ctx, path) }
// Glob returns all the paths matching the specified glob pattern, using the
// Default VFS.
func Glob(ctx context.Context, glob string) ([]string, error) { return Default.Glob(ctx, glob) }
// LocalFS implements the VFS interface using the standard Go library.
type LocalFS struct{}
// Stat implements part of the VFS interface.
func (LocalFS) Stat(_ context.Context, path string) (os.FileInfo, error) {
return os.Stat(path)
}
// MkdirAll implements part of the VFS interface.
func (LocalFS) MkdirAll(_ context.Context, path string, mode os.FileMode) error {
return os.MkdirAll(path, mode)
}
// Open implements part of the VFS interface.
func (LocalFS) Open(_ context.Context, path string) (io.ReadCloser, error) {
if path == "-" {
return ioutil.NopCloser(os.Stdin), nil
}
return os.Open(path)
}
// Create implements part of the VFS interface.
func (LocalFS) Create(_ context.Context, path string) (io.WriteCloser, error) {
return os.Create(path)
}
// CreateTempFile implements part of the VFS interface.
func (LocalFS) CreateTempFile(_ context.Context, dir, pattern string) (TempFile, error) {
return ioutil.TempFile(dir, pattern)
}
// Rename implements part of the VFS interface.
func (LocalFS) Rename(_ context.Context, oldPath, newPath string) error {
return os.Rename(oldPath, newPath)
}
// Remove implements part of the VFS interface.
func (LocalFS) Remove(_ context.Context, path string) error {
return os.Remove(path)
}
// Glob implements part of the VFS interface.
func (LocalFS) Glob(_ context.Context, glob string) ([]string, error) {
return filepath.Glob(glob)
}
// UnsupportedWriter implements the Writer interface methods with stubs that
// always return ErrNotSupported.
type UnsupportedWriter struct{ Reader }
// Create implements part of Writer interface. It is not supported.
func (UnsupportedWriter) Create(_ context.Context, _ string) (io.WriteCloser, error) {
return nil, ErrNotSupported
}
// CreateTempFile implements part of the VFS interface. It is not supported.
func (UnsupportedWriter) CreateTempFile(_ context.Context, dir, pattern string) (TempFile, error) {
return nil, ErrNotSupported
}
// MkdirAll implements part of Writer interface. It is not supported.
func (UnsupportedWriter) MkdirAll(_ context.Context, _ string, _ os.FileMode) error {
return ErrNotSupported
}
// Rename implements part of Writer interface. It is not supported.
func (UnsupportedWriter) Rename(_ context.Context, _, _ string) error { return ErrNotSupported }
// Remove implements part of Writer interface. It is not supported.
func (UnsupportedWriter) Remove(_ context.Context, _ string) error { return ErrNotSupported }
| 1 | 11,316 | As the test failures indicate, making this change is going to mean changing everything which currently implements this interface. | kythe-kythe | go |
@@ -13,7 +13,7 @@ get(
get "/humans-present/oss" => redirect( "https://www.youtube.com/watch?v=VMBhumlUP-A")
get "/ios-on-rails" => redirect("https://gumroad.com/l/ios-on-rails")
get "/ios-on-rails-beta" => redirect("https://gumroad.com/l/ios-on-rails")
-get "/live" => redirect(OfficeHours.url)
+get "/live" => redirect("http://forum.upcase.com")
get "/pages/tmux" => redirect("https://www.youtube.com/watch?v=CKC8Ph-s2F4")
get "/prime" => redirect("/subscribe")
get "/products/:id/purchases/:lookup" => redirect("/purchases/%{lookup}") | 1 | get "/5by5" => redirect("/design-for-developers?utm_source=5by5")
get "/:id/articles" => redirect("http://robots.thoughtbot.com/tags/%{id}")
get "/backbone-js-on-rails" => redirect("https://gumroad.com/l/backbone-js-on-rails")
get "/courses.json" => redirect("/video_tutorials.json")
get "/courses/:id" => redirect("/video_tutorials/%{id}")
get "/d4d-resources" => redirect("/design-for-developers-resources")
get "/geocoding-on-rails" => redirect("https://gumroad.com/l/geocoding-on-rails")
get(
"/gettingstartedwithios" => redirect(
"/video_tutorials/24-getting-started-with-ios-development?utm_source=podcast"
)
)
get "/humans-present/oss" => redirect( "https://www.youtube.com/watch?v=VMBhumlUP-A")
get "/ios-on-rails" => redirect("https://gumroad.com/l/ios-on-rails")
get "/ios-on-rails-beta" => redirect("https://gumroad.com/l/ios-on-rails")
get "/live" => redirect(OfficeHours.url)
get "/pages/tmux" => redirect("https://www.youtube.com/watch?v=CKC8Ph-s2F4")
get "/prime" => redirect("/subscribe")
get "/products/:id/purchases/:lookup" => redirect("/purchases/%{lookup}")
get "/ruby-science" => redirect("https://gumroad.com/l/ruby-science")
get "/workshops/:id" => redirect("/video_tutorials/%{id}")
if Rails.env.staging? || Rails.env.production?
get(
"/products/:id" => redirect("/video_tutorials/18-test-driven-rails"),
constraints: { id: /(10|12).*/ }
)
get(
"/products/:id" => redirect("/video_tutorials/19-design-for-developers"),
constraints: { id: /(9|11).*/ }
)
get(
"/products/:id" => redirect("https://www.youtube.com/watch?v=CKC8Ph-s2F4"),
constraints: { id: /(4).*/ }
)
get "/products/14" => redirect("/prime")
get "/products/14-prime" => redirect("/prime")
end
| 1 | 11,934 | Do we want to redirect this to the forum or something in case people have it linked/bookmarked? | thoughtbot-upcase | rb |
@@ -21,7 +21,7 @@ export default Component.extend({
}),
backgroundStyle: computed('member.{name,email}', function () {
- let name = this.member.name || this.member.email;
+ let name = this.member.name || this.member.email || 'NM';
if (name) {
let color = stringToHslColor(name, 55, 55);
return htmlSafe(`background-color: ${color}`); | 1 | import Component from '@ember/component';
import {computed} from '@ember/object';
import {htmlSafe} from '@ember/string';
const stringToHslColor = function (str, saturation, lightness) {
var hash = 0;
for (var i = 0; i < str.length; i++) {
hash = str.charCodeAt(i) + ((hash << 5) - hash);
}
var h = hash % 360;
return 'hsl(' + h + ', ' + saturation + '%, ' + lightness + '%)';
};
export default Component.extend({
tagName: '',
member: null,
initialsClass: computed('sizeClass', function () {
return this.sizeClass || 'gh-member-list-avatar';
}),
backgroundStyle: computed('member.{name,email}', function () {
let name = this.member.name || this.member.email;
if (name) {
let color = stringToHslColor(name, 55, 55);
return htmlSafe(`background-color: ${color}`);
}
return htmlSafe('');
}),
initials: computed('member.{name,email}', function () {
let name = this.member.name || this.member.email;
if (name) {
let names = name.split(' ');
let intials = names.length > 1 ? [names[0][0], names[names.length - 1][0]] : [names[0][0]];
return intials.join('').toUpperCase();
}
return '';
})
});
| 1 | 9,334 | @peterzimon came up with this 'NM' piece as a placeholder for New Member initials, without this the screen looks broken. It changes to normal initials calculation once email or name is entered. Lmk if you'd like to put something different here ;) | TryGhost-Admin | js |
@@ -4,5 +4,6 @@ import attr from 'ember-data/attr';
export default DS.Model.extend({
name: attr('string'),
email: attr('string'),
- createdAt: attr('moment-utc')
+ createdAt: attr('moment-utc'),
+ subscriptions: attr('member-subscription')
}); | 1 | import DS from 'ember-data';
import attr from 'ember-data/attr';
export default DS.Model.extend({
name: attr('string'),
email: attr('string'),
createdAt: attr('moment-utc')
});
| 1 | 9,224 | @kevinansfield Would be cool if you can take a if this is the best way to add `subscriptions` info on member model. This uses the transform + separate model definition way which seemed to be the right way from other references in Admin | TryGhost-Admin | js |
@@ -31,10 +31,12 @@ try:
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
- # Suppress; we log it at debug level to avoid polluting the logs of apps
- # and services that don't care about plotting
- logging.debug("Cannot import matplotlib. Plot class will not work.",
- exc_info=True)
+ # Suppress this optional dependency on matplotlib. NOTE we don't log this,
+ # because python logging implicitly adds the StreamHandler to root logger when
+ # calling `logging.debug`, etc., which may undermine an application's logging
+ # configuration.
+ plt = None
+ cm = None
| 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot class used in monitor mixin framework.
"""
import logging
import os
try:
# We import in here to avoid creating a matplotlib dependency in nupic.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
except ImportError:
# Suppress; we log it at debug level to avoid polluting the logs of apps
# and services that don't care about plotting
logging.debug("Cannot import matplotlib. Plot class will not work.",
exc_info=True)
class Plot(object):
def __init__(self, monitor, title, show=True):
"""
@param monitor (MonitorMixinBase) Monitor Mixin instance that generated
this plot
@param title (string) Plot title
"""
self._monitor = monitor
self._title = title
self._fig = self._initFigure()
self._show = show
if self._show:
plt.ion()
plt.show()
def _initFigure(self):
fig = plt.figure()
fig.suptitle(self._prettyPrintTitle())
return fig
def _prettyPrintTitle(self):
if self._monitor.mmName is not None:
return "[{0}] {1}".format(self._monitor.mmName, self._title)
return self._title
def addGraph(self, data, position=111, xlabel=None, ylabel=None):
""" Adds a graph to the plot's figure.
@param data See matplotlib.Axes.plot documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.plot(data)
plt.draw()
def addHistogram(self, data, position=111, xlabel=None, ylabel=None,
bins=None):
""" Adds a histogram to the plot's figure.
@param data See matplotlib.Axes.hist documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
"""
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.hist(data, bins=bins, color="green", alpha=0.8)
plt.draw()
def add2DArray(self, data, position=111, xlabel=None, ylabel=None, cmap=None,
aspect="auto", interpolation="nearest", name=None):
""" Adds an image to the plot's figure.
@param data a 2D array. See matplotlib.Axes.imshow documentation.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@param cmap color map used in the rendering
@param aspect how aspect ratio is handled during resize
@param interpolation interpolation method
"""
if cmap is None:
# The default colormodel is an ugly blue-red model.
cmap = cm.Greys
ax = self._addBase(position, xlabel=xlabel, ylabel=ylabel)
ax.imshow(data, cmap=cmap, aspect=aspect, interpolation=interpolation)
if self._show:
plt.draw()
if name is not None:
if not os.path.exists("log"):
os.mkdir("log")
plt.savefig("log/{name}.png".format(name=name), bbox_inches="tight",
figsize=(8, 6), dpi=400)
def _addBase(self, position, xlabel=None, ylabel=None):
""" Adds a subplot to the plot's figure at specified position.
@param position A 3-digit number. The first two digits define a 2D grid
where subplots may be added. The final digit specifies the nth grid
location for the added subplot
@param xlabel text to be displayed on the x-axis
@param ylabel text to be displayed on the y-axis
@returns (matplotlib.Axes) Axes instance
"""
ax = self._fig.add_subplot(position)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ax
| 1 | 20,855 | This is the common way of dealing with optional dependencies | numenta-nupic | py |
@@ -67,8 +67,8 @@ func (t testHelper) AvailableEndpoints() *corev1.Endpoints {
}
}
-func (t testHelper) ReadyDependencyStatus() *duckv1.KResource {
- kr := &duckv1.KResource{}
+func (t testHelper) ReadyDependencyStatus() *duckv1.Source {
+ kr := &duckv1.Source{}
kr.Status.SetConditions(apis.Conditions{{
Type: "Ready",
Status: corev1.ConditionTrue, | 1 | /*
Copyright 2020 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
duckv1 "knative.dev/pkg/apis/duck/v1"
)
type testHelper struct{}
// TestHelper contains helpers for unit tests.
var TestHelper = testHelper{}
func (t testHelper) ReadyBrokerStatus() *BrokerStatus {
bs := &BrokerStatus{}
bs.SetAddress(apis.HTTP("example.com"))
bs.MarkSubscriptionReady()
bs.MarkTopicReady()
bs.MarkBrokerCellReady()
return bs
}
func (t testHelper) UnconfiguredBrokerStatus() *BrokerStatus {
bs := &BrokerStatus{}
return bs
}
func (t testHelper) UnknownBrokerStatus() *BrokerStatus {
bs := &BrokerStatus{}
bs.InitializeConditions()
return bs
}
func (t testHelper) FalseBrokerStatus() *BrokerStatus {
bs := &BrokerStatus{}
bs.SetAddress(nil)
return bs
}
func (t testHelper) AvailableEndpoints() *corev1.Endpoints {
return &corev1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: "available",
},
Subsets: []corev1.EndpointSubset{{
Addresses: []corev1.EndpointAddress{{
IP: "127.0.0.1",
}},
}},
}
}
func (t testHelper) ReadyDependencyStatus() *duckv1.KResource {
kr := &duckv1.KResource{}
kr.Status.SetConditions(apis.Conditions{{
Type: "Ready",
Status: corev1.ConditionTrue,
}})
return kr
}
func (t testHelper) UnconfiguredDependencyStatus() *duckv1.KResource {
kr := &duckv1.KResource{}
return kr
}
func (t testHelper) UnknownDependencyStatus() *duckv1.KResource {
kr := &duckv1.KResource{}
kr.Status.SetConditions(apis.Conditions{{
Type: "Ready",
Status: corev1.ConditionUnknown,
}})
return kr
}
func (t testHelper) FalseDependencyStatus() *duckv1.KResource {
kr := &duckv1.KResource{}
kr.Status.SetConditions(apis.Conditions{{
Type: "Ready",
Status: corev1.ConditionFalse,
}})
return kr
}
| 1 | 18,741 | nit: we can replace all `kr` in this file with like `src`. | google-knative-gcp | go |
@@ -101,7 +101,7 @@ ReadPreference.fromOptions = function(options) {
if (typeof readPreference === 'string') {
return new ReadPreference(readPreference, readPreferenceTags);
} else if (!(readPreference instanceof ReadPreference) && typeof readPreference === 'object') {
- const mode = readPreference.mode || readPreference.preference;
+ const mode = readPreference.mode;
if (mode && typeof mode === 'string') {
return new ReadPreference(mode, readPreference.tags, {
maxStalenessSeconds: readPreference.maxStalenessSeconds | 1 | 'use strict';
/**
* The **ReadPreference** class is a class that represents a MongoDB ReadPreference and is
* used to construct connections.
* @class
* @param {string} mode A string describing the read preference mode (primary|primaryPreferred|secondary|secondaryPreferred|nearest)
* @param {array} tags The tags object
* @param {object} [options] Additional read preference options
* @param {number} [options.maxStalenessSeconds] Max secondary read staleness in seconds, Minimum value is 90 seconds.
* @see https://docs.mongodb.com/manual/core/read-preference/
* @return {ReadPreference}
*/
const ReadPreference = function(mode, tags, options) {
if (!ReadPreference.isValid(mode)) {
throw new TypeError(`Invalid read preference mode ${mode}`);
}
// TODO(major): tags MUST be an array of tagsets
if (tags && !Array.isArray(tags)) {
console.warn(
'ReadPreference tags must be an array, this will change in the next major version'
);
if (typeof tags.maxStalenessSeconds !== 'undefined') {
// this is likely an options object
options = tags;
tags = undefined;
} else {
tags = [tags];
}
}
this.mode = mode;
this.tags = tags;
options = options || {};
if (options.maxStalenessSeconds != null) {
if (options.maxStalenessSeconds <= 0) {
throw new TypeError('maxStalenessSeconds must be a positive integer');
}
this.maxStalenessSeconds = options.maxStalenessSeconds;
// NOTE: The minimum required wire version is 5 for this read preference. If the existing
// topology has a lower value then a MongoError will be thrown during server selection.
this.minWireVersion = 5;
}
if (this.mode === ReadPreference.PRIMARY) {
if (this.tags && Array.isArray(this.tags) && this.tags.length > 0) {
throw new TypeError('Primary read preference cannot be combined with tags');
}
if (this.maxStalenessSeconds) {
throw new TypeError('Primary read preference cannot be combined with maxStalenessSeconds');
}
}
};
// Support the deprecated `preference` property introduced in the porcelain layer
Object.defineProperty(ReadPreference.prototype, 'preference', {
enumerable: true,
get: function() {
return this.mode;
}
});
/*
* Read preference mode constants
*/
ReadPreference.PRIMARY = 'primary';
ReadPreference.PRIMARY_PREFERRED = 'primaryPreferred';
ReadPreference.SECONDARY = 'secondary';
ReadPreference.SECONDARY_PREFERRED = 'secondaryPreferred';
ReadPreference.NEAREST = 'nearest';
const VALID_MODES = [
ReadPreference.PRIMARY,
ReadPreference.PRIMARY_PREFERRED,
ReadPreference.SECONDARY,
ReadPreference.SECONDARY_PREFERRED,
ReadPreference.NEAREST,
null
];
/**
* Construct a ReadPreference given an options object.
*
* @param {object} options The options object from which to extract the read preference.
* @return {ReadPreference}
*/
ReadPreference.fromOptions = function(options) {
const readPreference = options.readPreference;
const readPreferenceTags = options.readPreferenceTags;
if (readPreference == null) {
return null;
}
if (typeof readPreference === 'string') {
return new ReadPreference(readPreference, readPreferenceTags);
} else if (!(readPreference instanceof ReadPreference) && typeof readPreference === 'object') {
const mode = readPreference.mode || readPreference.preference;
if (mode && typeof mode === 'string') {
return new ReadPreference(mode, readPreference.tags, {
maxStalenessSeconds: readPreference.maxStalenessSeconds
});
}
}
return readPreference;
};
/**
* Validate if a mode is legal
*
* @method
* @param {string} mode The string representing the read preference mode.
* @return {boolean} True if a mode is valid
*/
ReadPreference.isValid = function(mode) {
return VALID_MODES.indexOf(mode) !== -1;
};
/**
* Validate if a mode is legal
*
* @method
* @param {string} mode The string representing the read preference mode.
* @return {boolean} True if a mode is valid
*/
ReadPreference.prototype.isValid = function(mode) {
return ReadPreference.isValid(typeof mode === 'string' ? mode : this.mode);
};
const needSlaveOk = ['primaryPreferred', 'secondary', 'secondaryPreferred', 'nearest'];
/**
* Indicates that this readPreference needs the "slaveOk" bit when sent over the wire
* @method
* @return {boolean}
* @see https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#op-query
*/
ReadPreference.prototype.slaveOk = function() {
return needSlaveOk.indexOf(this.mode) !== -1;
};
/**
* Are the two read preference equal
* @method
* @param {ReadPreference} readPreference The read preference with which to check equality
* @return {boolean} True if the two ReadPreferences are equivalent
*/
ReadPreference.prototype.equals = function(readPreference) {
return readPreference.mode === this.mode;
};
/**
* Return JSON representation
* @method
* @return {Object} A JSON representation of the ReadPreference
*/
ReadPreference.prototype.toJSON = function() {
const readPreference = { mode: this.mode };
if (Array.isArray(this.tags)) readPreference.tags = this.tags;
if (this.maxStalenessSeconds) readPreference.maxStalenessSeconds = this.maxStalenessSeconds;
return readPreference;
};
/**
* Primary read preference
* @member
* @type {ReadPreference}
*/
ReadPreference.primary = new ReadPreference('primary');
/**
* Primary Preferred read preference
* @member
* @type {ReadPreference}
*/
ReadPreference.primaryPreferred = new ReadPreference('primaryPreferred');
/**
* Secondary read preference
* @member
* @type {ReadPreference}
*/
ReadPreference.secondary = new ReadPreference('secondary');
/**
* Secondary Preferred read preference
* @member
* @type {ReadPreference}
*/
ReadPreference.secondaryPreferred = new ReadPreference('secondaryPreferred');
/**
* Nearest read preference
* @member
* @type {ReadPreference}
*/
ReadPreference.nearest = new ReadPreference('nearest');
module.exports = ReadPreference;
| 1 | 17,693 | same concern here as above | mongodb-node-mongodb-native | js |
@@ -147,7 +147,7 @@ module Mongoid
def raw
validate_out!
cmd = command
- opts = { read: cmd.delete(:read) } if cmd[:read]
+ opts = { read: criteria.options.fetch(:read) } if criteria.options[:read]
@map_reduce.database.command(cmd, (opts || {}).merge(session: _session)).first
end
alias :results :raw | 1 | # frozen_string_literal: true
module Mongoid
module Contextual
class MapReduce
extend Forwardable
include Enumerable
include Command
def_delegators :results, :[]
def_delegators :entries, :==, :empty?
# Get all the counts returned by the map/reduce.
#
# @example Get the counts.
# map_reduce.counts
#
# @return [ Hash ] The counts.
def counts
results["counts"]
end
# Iterates over each of the documents in the map/reduce, excluding the
# extra information that was passed back from the database.
#
# @example Iterate over the results.
# map_reduce.each do |doc|
# p doc
# end
#
# @return [ Enumerator ] The enumerator.
def each
validate_out!
if block_given?
@map_reduce.each do |doc|
yield doc
end
else
@map_reduce.to_enum
end
end
# Get the number of documents emitted by the map/reduce.
#
# @example Get the emitted document count.
# map_reduce.emitted
#
# @return [ Integer ] The number of emitted documents.
def emitted
counts["emit"]
end
# Provide a finalize js function for the map/reduce.
#
# @example Provide a finalize function.
# map_reduce.finalize(func)
#
# @param [ String ] function The finalize function.
#
# @return [ MapReduce ] The map reduce.
def finalize(function)
@map_reduce = @map_reduce.finalize(function)
self
end
# Initialize the new map/reduce directive.
#
# @example Initialize the new map/reduce.
# MapReduce.new(criteria, map, reduce)
#
# @param [ Criteria ] criteria The Mongoid criteria.
# @param [ String ] map The map js function.
# @param [ String ] reduce The reduce js function.
def initialize(collection, criteria, map, reduce)
@collection = collection
@criteria = criteria
@map_reduce = @criteria.view.map_reduce(map, reduce)
end
# Get the number of documents that were input into the map/reduce.
#
# @example Get the count of input documents.
# map_reduce.input
#
# @return [ Integer ] The number of input documents.
def input
counts["input"]
end
# Sets the map/reduce to use jsMode.
#
# @example Set the map/reduce to jsMode.
# map_reduce.js_mode
#
# @return [ MapReduce ] The map/reduce.
def js_mode
@map_reduce = @map_reduce.js_mode(true)
self
end
# Specifies where the map/reduce output is to be stored.
# Please see MongoDB documentation for supported map reduce options.
#
# @example Store output in memory.
# map_reduce.out(inline: 1)
#
# @example Store output in a collection, replacing existing documents.
# map_reduce.out(replace: "collection_name")
#
# @example Store output in a collection, merging existing documents.
# map_reduce.out(merge: "collection_name")
#
# @example Store output in a collection, reducing existing documents.
# map_reduce.out(reduce: "collection_name")
#
# @example Return results from map reduce.
# map_reduce.out(inline: 1)
#
# @param [ Hash ] location The place to store the results.
#
# @return [ MapReduce ] The map/reduce object.
def out(location)
normalized = location.dup
normalized.update_values do |value|
value.is_a?(::Symbol) ? value.to_s : value
end
@map_reduce = @map_reduce.out(normalized)
self
end
# Get the number of documents output by the map/reduce.
#
# @example Get the output document count.
# map_reduce.output
#
# @return [ Integer ] The number of output documents.
def output
counts["output"]
end
# Get the raw output from the map/reduce operation.
#
# @example Get the raw output.
# map_reduce.raw
#
# @return [ Hash ] The raw output.
def raw
validate_out!
cmd = command
opts = { read: cmd.delete(:read) } if cmd[:read]
@map_reduce.database.command(cmd, (opts || {}).merge(session: _session)).first
end
alias :results :raw
# Execute the map/reduce, returning the raw output.
# Useful when you don't care about map/reduce's output.
#
# @example Run the map reduce
# map_reduce.execute
#
# @return [ Hash ] The raw output
alias :execute :raw
# Get the number of documents reduced by the map/reduce.
#
# @example Get the reduced document count.
# map_reduce.reduced
#
# @return [ Integer ] The number of reduced documents.
def reduced
counts["reduce"]
end
# Adds a javascript object to the global scope of the map/reduce.
#
# @example Add an object to the global scope.
# map_reduce.scope(name: value)
#
# @param [ Hash ] object A hash of key/values for the global scope.
#
# @return [ MapReduce ]
def scope(object)
@map_reduce = @map_reduce.scope(object)
self
end
# Get the execution time of the map/reduce.
#
# @example Get the execution time.
# map_reduce.time
#
# @return [ Float ] The time in milliseconds.
def time
results["timeMillis"]
end
# Get a pretty string representation of the map/reduce, including the
# criteria, map, reduce, finalize, and out option.
#
# @example Inspect the map_reduce.
# map_reduce.inspect
#
# @return [ String ] The inspection string.
def inspect
%Q{#<Mongoid::Contextual::MapReduce
selector: #{criteria.selector.inspect}
class: #{criteria.klass}
map: #{command[:map]}
reduce: #{command[:reduce]}
finalize: #{command[:finalize]}
out: #{command[:out].inspect}>
}
end
def command
@map_reduce.send(:map_reduce_spec)[:selector]
end
private
def validate_out!
raise Errors::NoMapReduceOutput.new({}) unless @map_reduce.out
end
def _session
criteria.send(:_session)
end
end
end
end
| 1 | 13,471 | Thank you for this, I gather this repairs failures that I've seen in another PR. | mongodb-mongoid | rb |
@@ -159,7 +159,7 @@ abstract class BaseSparkAction<ThisT, R> implements Action<ThisT, R> {
.repartition(spark.sessionState().conf().numShufflePartitions()) // avoid adaptive execution combining tasks
.as(Encoders.bean(ManifestFileBean.class));
- return allManifests.flatMap(new ReadManifest(ioBroadcast), Encoders.STRING()).toDF("file_path");
+ return allManifests.flatMap(new ReadManifest(ioBroadcast), Encoders.STRING()).toDF("file_path").distinct();
}
protected Dataset<Row> buildManifestFileDF(Table table) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.actions;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import org.apache.iceberg.BaseTable;
import org.apache.iceberg.ManifestFiles;
import org.apache.iceberg.MetadataTableType;
import org.apache.iceberg.Snapshot;
import org.apache.iceberg.StaticTableOperations;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.actions.Action;
import org.apache.iceberg.actions.ManifestFileBean;
import org.apache.iceberg.common.DynMethods;
import org.apache.iceberg.io.ClosingIterator;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.spark.JobGroupInfo;
import org.apache.iceberg.spark.JobGroupUtils;
import org.apache.iceberg.spark.SparkUtil;
import org.apache.spark.SparkContext;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.DataFrameReader;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import static org.apache.iceberg.MetadataTableType.ALL_MANIFESTS;
abstract class BaseSparkAction<ThisT, R> implements Action<ThisT, R> {
private static final AtomicInteger JOB_COUNTER = new AtomicInteger();
private final SparkSession spark;
private final JavaSparkContext sparkContext;
private final Map<String, String> options = Maps.newHashMap();
protected BaseSparkAction(SparkSession spark) {
this.spark = spark;
this.sparkContext = new JavaSparkContext(spark.sparkContext());
}
protected SparkSession spark() {
return spark;
}
protected JavaSparkContext sparkContext() {
return sparkContext;
}
protected abstract ThisT self();
@Override
public ThisT option(String name, String value) {
options.put(name, value);
return self();
}
@Override
public ThisT options(Map<String, String> newOptions) {
options.putAll(newOptions);
return self();
}
protected Map<String, String> options() {
return options;
}
protected <T> T withJobGroupInfo(JobGroupInfo info, Supplier<T> supplier) {
SparkContext context = spark().sparkContext();
JobGroupInfo previousInfo = JobGroupUtils.getJobGroupInfo(context);
try {
JobGroupUtils.setJobGroupInfo(context, info);
return supplier.get();
} finally {
JobGroupUtils.setJobGroupInfo(context, previousInfo);
}
}
protected JobGroupInfo newJobGroupInfo(String groupId, String desc) {
return new JobGroupInfo(groupId + "-" + JOB_COUNTER.incrementAndGet(), desc, false);
}
/**
* Returns all the path locations of all Manifest Lists for a given list of snapshots
* @param snapshots snapshots
* @return the paths of the Manifest Lists
*/
private List<String> getManifestListPaths(Iterable<Snapshot> snapshots) {
List<String> manifestLists = Lists.newArrayList();
for (Snapshot snapshot : snapshots) {
String manifestListLocation = snapshot.manifestListLocation();
if (manifestListLocation != null) {
manifestLists.add(manifestListLocation);
}
}
return manifestLists;
}
/**
* Returns all Metadata file paths which may not be in the current metadata. Specifically
* this includes "version-hint" files as well as entries in metadata.previousFiles.
* @param ops TableOperations for the table we will be getting paths from
* @return a list of paths to metadata files
*/
private List<String> getOtherMetadataFilePaths(TableOperations ops) {
List<String> otherMetadataFiles = Lists.newArrayList();
otherMetadataFiles.add(ops.metadataFileLocation("version-hint.text"));
TableMetadata metadata = ops.current();
otherMetadataFiles.add(metadata.metadataFileLocation());
for (TableMetadata.MetadataLogEntry previousMetadataFile : metadata.previousFiles()) {
otherMetadataFiles.add(previousMetadataFile.file());
}
return otherMetadataFiles;
}
protected Table newStaticTable(TableMetadata metadata, FileIO io) {
String metadataFileLocation = metadata.metadataFileLocation();
StaticTableOperations ops = new StaticTableOperations(metadataFileLocation, io);
return new BaseTable(ops, metadataFileLocation);
}
protected Dataset<Row> buildValidDataFileDF(Table table) {
JavaSparkContext context = new JavaSparkContext(spark.sparkContext());
Broadcast<FileIO> ioBroadcast = context.broadcast(SparkUtil.serializableFileIO(table));
Dataset<ManifestFileBean> allManifests = loadMetadataTable(table, ALL_MANIFESTS)
.selectExpr("path", "length", "partition_spec_id as partitionSpecId", "added_snapshot_id as addedSnapshotId")
.dropDuplicates("path")
.repartition(spark.sessionState().conf().numShufflePartitions()) // avoid adaptive execution combining tasks
.as(Encoders.bean(ManifestFileBean.class));
return allManifests.flatMap(new ReadManifest(ioBroadcast), Encoders.STRING()).toDF("file_path");
}
protected Dataset<Row> buildManifestFileDF(Table table) {
return loadMetadataTable(table, ALL_MANIFESTS).selectExpr("path as file_path");
}
protected Dataset<Row> buildManifestListDF(Table table) {
List<String> manifestLists = getManifestListPaths(table.snapshots());
return spark.createDataset(manifestLists, Encoders.STRING()).toDF("file_path");
}
protected Dataset<Row> buildOtherMetadataFileDF(TableOperations ops) {
List<String> otherMetadataFiles = getOtherMetadataFilePaths(ops);
return spark.createDataset(otherMetadataFiles, Encoders.STRING()).toDF("file_path");
}
protected Dataset<Row> buildValidMetadataFileDF(Table table, TableOperations ops) {
Dataset<Row> manifestDF = buildManifestFileDF(table);
Dataset<Row> manifestListDF = buildManifestListDF(table);
Dataset<Row> otherMetadataFileDF = buildOtherMetadataFileDF(ops);
return manifestDF.union(otherMetadataFileDF).union(manifestListDF);
}
// Attempt to use Spark3 Catalog resolution if available on the path
private static final DynMethods.UnboundMethod LOAD_CATALOG = DynMethods.builder("loadCatalogMetadataTable")
.hiddenImpl("org.apache.iceberg.spark.Spark3Util", SparkSession.class, String.class, MetadataTableType.class)
.orNoop()
.build();
private Dataset<Row> loadCatalogMetadataTable(String tableName, MetadataTableType type) {
Preconditions.checkArgument(!LOAD_CATALOG.isNoop(), "Cannot find Spark3Util class but Spark3 is in use");
return LOAD_CATALOG.asStatic().invoke(spark, tableName, type);
}
protected Dataset<Row> loadMetadataTable(Table table, MetadataTableType type) {
String tableName = table.name();
String tableLocation = table.location();
DataFrameReader dataFrameReader = spark.read().format("iceberg");
if (tableName.contains("/")) {
// Hadoop Table or Metadata location passed, load without a catalog
return dataFrameReader.load(tableName + "#" + type);
}
// Try DSV2 catalog based name based resolution
if (spark.version().startsWith("3")) {
Dataset<Row> catalogMetadataTable = loadCatalogMetadataTable(tableName, type);
if (catalogMetadataTable != null) {
return catalogMetadataTable;
}
}
// Catalog based resolution failed, our catalog may be a non-DatasourceV2 Catalog
if (tableName.startsWith("hadoop.")) {
// Try loading by location as Hadoop table without Catalog
return dataFrameReader.load(tableLocation + "#" + type);
} else if (tableName.startsWith("hive")) {
// Try loading by name as a Hive table without Catalog
return dataFrameReader.load(tableName.replaceFirst("hive\\.", "") + "." + type);
} else {
throw new IllegalArgumentException(String.format(
"Cannot find the metadata table for %s of type %s", tableName, type));
}
}
private static class ReadManifest implements FlatMapFunction<ManifestFileBean, String> {
private final Broadcast<FileIO> io;
ReadManifest(Broadcast<FileIO> io) {
this.io = io;
}
@Override
public Iterator<String> call(ManifestFileBean manifest) {
return new ClosingIterator<>(ManifestFiles.readPaths(manifest, io.getValue()).iterator());
}
}
}
| 1 | 36,012 | This place probably makes sense to me. We can consider exposing an argument to make the dedup step optional (I am not sure it is a good idea but I want to think this through together). The dedup step we are adding is going to trigger a shuffle. Technically, we are fine in the existing expire snapshots action as it does the dedup in `except`. The question is what kind of performance impact deduplicating here will have. We only have duplicates if multiple manifests reference the same files. In `rewrite_manifests` procedure, we rewrite all manifests, meaning we produce a snapshot with new manifests where entries are old data files. Also, there are updates and deletes that may rewrite manifests. I think deduplicating here makes sense to me in all cases. Thoughts, @rdblue @flyrain @RussellSpitzer @karuppayya? | apache-iceberg | java |
@@ -1516,6 +1516,11 @@ func (c *ClusterManager) NodeRemoveDone(nodeID string, result error) {
nodeID, err)
logrus.Errorf(msg)
}
+
+ // Remove osdconfig data from etcd
+ if err := c.configManager.DeleteNodeConf(nodeID); err != nil {
+ logrus.Warn("error removing node from osdconfig:", err)
+ }
}
func (c *ClusterManager) replayNodeDecommission() { | 1 | // Package cluster implements a cluster state machine. It relies on a cluster
// wide keyvalue store for coordinating the state of the cluster.
// It also stores the state of the cluster in this keyvalue store.
package cluster
import (
"container/list"
"encoding/gob"
"errors"
"fmt"
"net"
"os"
"os/exec"
"strings"
"sync"
"time"
"github.com/libopenstorage/gossip"
"github.com/libopenstorage/gossip/types"
"github.com/libopenstorage/openstorage/api"
"github.com/libopenstorage/openstorage/config"
"github.com/libopenstorage/openstorage/osdconfig"
"github.com/libopenstorage/systemutils"
"github.com/portworx/kvdb"
"github.com/sirupsen/logrus"
)
const (
heartbeatKey = "heartbeat"
clusterLockKey = "/cluster/lock"
gossipVersionKey = "Gossip Version"
decommissionErrMsg = "Node %s must be offline or in maintenance " +
"mode to be decommissioned."
)
var (
// ErrNodeRemovePending is returned when Node remove does not succeed and is
// kept in pending state
ErrNodeRemovePending = errors.New("Node remove is pending")
ErrInitNodeNotFound = errors.New("This node is already initialized but " +
"could not be found in the cluster map.")
ErrNodeDecommissioned = errors.New("Node is decomissioned.")
stopHeartbeat = make(chan bool)
ErrRemoveCausesDataLoss = errors.New("Cannot remove node without data loss")
)
// ClusterManager implements the cluster interface
type ClusterManager struct {
size int
listeners *list.List
config config.ClusterConfig
kv kvdb.Kvdb
status api.Status
nodeCache map[string]api.Node // Cached info on the nodes in the cluster.
nodeCacheLock sync.Mutex
nodeStatuses map[string]api.Status // Set of nodes currently marked down.
gossip gossip.Gossiper
gossipVersion string
gossipPort string
gEnabled bool
selfNode api.Node
selfNodeLock sync.Mutex // Lock that guards data and label of selfNode
system systemutils.System
configManager osdconfig.ConfigManager
}
type checkFunc func(ClusterInfo) error
func ifaceToIp(iface *net.Interface) (string, error) {
addrs, err := iface.Addrs()
if err != nil {
return "", err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
if ip.String() == "" {
continue // address is empty string
}
return ip.String(), nil
}
return "", errors.New("Node not connected to the network.")
}
func ifaceNameToIp(ifaceName string) (string, error) {
stdout, err := exec.Command("/usr/sbin/ip", "a", "show", ifaceName, "label", ifaceName).Output()
if err != nil {
return "", err
}
ipOp := string(stdout)
// Parse the output of command /usr/bin/ip a show eth0 label eth0:0
ipOpParts := strings.Fields(ipOp)
for i, tokens := range ipOpParts {
// Only check for ipv4 addresses
if tokens == "inet" {
ip := ipOpParts[i+1]
// Remove the mask
ipAddr := strings.Split(ip, "/")
if strings.Contains(ipAddr[0], "127") {
// Loopback address
continue
}
if ipAddr[0] == "" {
// Address is empty string
continue
}
return ipAddr[0], nil
}
}
return "", fmt.Errorf("Unable to find Ip address for given interface")
}
// ExternalIp returns the mgmt and data ip based on the config
func ExternalIp(config *config.ClusterConfig) (string, string, error) {
mgmtIp := ""
dataIp := ""
var err error
if config.MgmtIp == "" && config.MgtIface != "" {
mgmtIp, err = ifaceNameToIp(config.MgtIface)
if err != nil {
return "", "", errors.New("Invalid data network interface " +
"specified.")
}
} else if config.MgmtIp != "" {
mgmtIp = config.MgmtIp
}
if config.DataIp == "" && config.DataIface != "" {
dataIp, err = ifaceNameToIp(config.DataIface)
if err != nil {
return "", "", errors.New("Invalid data network interface " +
"specified.")
}
} else if config.DataIp != "" {
dataIp = config.DataIp
}
if mgmtIp != "" && dataIp != "" {
return mgmtIp, dataIp, nil
} else if mgmtIp != "" { // dataIp is empty
return mgmtIp, mgmtIp, nil
} else if dataIp != "" { // mgmtIp is empty
return dataIp, dataIp, nil
} // both are empty, try to pick first available interface for both
// No network interface specified, pick first default.
ifaces, err := net.Interfaces()
if err != nil {
return "", "", err
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
mgmtIp, err = ifaceToIp(&iface)
if err != nil {
logrus.Printf("Skipping interface without IP: %v: %v",
iface, err)
continue
}
return mgmtIp, mgmtIp, err
}
return "", "", errors.New("Node not connected to the network.")
}
// getNodeEntry is internal helper method, shared between Inspect() and enumerateNodesFromCache()
// Parameter 'clustDBRef' may be a pointer to "empty" struct, in which case it'll be populated, but it must not be NULL.
// Also, it's caller's responsibility to lock the access to the NodeCache.
func (c *ClusterManager) getNodeEntry(nodeID string, clustDBRef *ClusterInfo) (api.Node, error) {
var n api.Node
var ok bool
if nodeID == c.selfNode.Id {
n = *c.getCurrentState()
} else if n, ok = c.nodeCache[nodeID]; !ok {
return api.Node{}, errors.New("Unable to locate node with provided UUID.")
} else if n.Status == api.Status_STATUS_OFFLINE &&
(n.DataIp == "" || n.MgmtIp == "") {
// cached info unstable, read from DB
if clustDBRef.Id == "" {
// We've been passed "empty" struct, lazy-init before use
clusterDB, _, _ := readClusterInfo()
*clustDBRef = clusterDB
}
// Gossip does not have essential information of
// an offline node. Provide the essential data
// that we have in the cluster db
if v, ok := clustDBRef.NodeEntries[n.Id]; ok {
n.MgmtIp = v.MgmtIp
n.DataIp = v.DataIp
n.Hostname = v.Hostname
n.NodeLabels = v.NodeLabels
} else {
logrus.Warnf("Could not query NodeID %v", nodeID)
// Node entry won't be refreshed form DB, will use the "offline" original
}
}
return n, nil
}
// Inspect inspects given node and returns the state
func (c *ClusterManager) Inspect(nodeID string) (api.Node, error) {
c.nodeCacheLock.Lock()
defer c.nodeCacheLock.Unlock()
return c.getNodeEntry(nodeID, &ClusterInfo{})
}
// AddEventListener adds a new listener
func (c *ClusterManager) AddEventListener(listener ClusterListener) error {
logrus.Printf("Adding cluster event listener: %s", listener.String())
c.listeners.PushBack(listener)
return nil
}
// UpdateData updates self node data
func (c *ClusterManager) UpdateData(nodeData map[string]interface{}) error {
c.selfNodeLock.Lock()
defer c.selfNodeLock.Unlock()
for dataKey, dataValue := range nodeData {
c.selfNode.NodeData[dataKey] = dataValue
}
return nil
}
func (c *ClusterManager) UpdateLabels(nodeLabels map[string]string) error {
c.selfNodeLock.Lock()
defer c.selfNodeLock.Unlock()
if c.selfNode.NodeLabels == nil {
c.selfNode.NodeLabels = make(map[string]string)
}
for labelKey, labelValue := range nodeLabels {
c.selfNode.NodeLabels[labelKey] = labelValue
}
return nil
}
// GetData returns self node's data
func (c *ClusterManager) GetData() (map[string]*api.Node, error) {
nodes := make(map[string]*api.Node)
c.nodeCacheLock.Lock()
defer c.nodeCacheLock.Unlock()
for _, value := range c.nodeCache {
copyValue := value.Copy()
nodes[value.Id] = copyValue
}
return nodes, nil
}
// getCurrentState always returns the copy of selfNode that
// cluster manager maintains. It also updates the selfNode
// with latest data.
func (c *ClusterManager) getCurrentState() *api.Node {
c.selfNodeLock.Lock()
defer c.selfNodeLock.Unlock()
c.selfNode.Timestamp = time.Now()
c.selfNode.Cpu, _, _ = c.system.CpuUsage()
c.selfNode.MemTotal, c.selfNode.MemUsed, c.selfNode.MemFree = c.system.MemUsage()
c.selfNode.Timestamp = time.Now()
for e := c.listeners.Front(); e != nil; e = e.Next() {
listenerDataMap := e.Value.(ClusterListener).ListenerData()
if listenerDataMap == nil {
continue
}
for key, val := range listenerDataMap {
c.selfNode.NodeData[key] = val
}
}
nodeCopy := (&c.selfNode).Copy()
return nodeCopy
}
func (c *ClusterManager) getNonDecommisionedPeers(
db ClusterInfo,
) map[types.NodeId]types.NodeUpdate {
peers := make(map[types.NodeId]types.NodeUpdate)
for _, nodeEntry := range db.NodeEntries {
if nodeEntry.Status == api.Status_STATUS_DECOMMISSION {
continue
}
peers[types.NodeId(nodeEntry.Id)] = types.NodeUpdate{
Addr: nodeEntry.DataIp + ":" + c.gossipPort,
QuorumMember: !nodeEntry.NonQuorumMember,
}
}
return peers
}
// Get the latest config.
func (c *ClusterManager) watchDB(key string, opaque interface{},
kvp *kvdb.KVPair, watchErr error) error {
db, kvdbVersion, err := readClusterInfo()
if err != nil {
logrus.Warnln("Failed to read database after update ", err)
// Exit since an update may be missed here.
os.Exit(1)
}
// Update all the listeners with the new db
for e := c.listeners.Front(); e != nil; e = e.Next() {
err := e.Value.(ClusterListener).UpdateCluster(&c.selfNode, &db)
if err != nil {
logrus.Warnln("Failed to notify ", e.Value.(ClusterListener).String())
}
}
for _, nodeEntry := range db.NodeEntries {
if nodeEntry.Status == api.Status_STATUS_DECOMMISSION {
logrus.Infof("ClusterManager watchDB, node ID "+
"%s state is Decommission.",
nodeEntry.Id)
n, found := c.getNodeCacheEntry(nodeEntry.Id)
if !found {
logrus.Errorf("ClusterManager watchDB, "+
"node ID %s not in node cache",
nodeEntry.Id)
continue
}
if n.Status == api.Status_STATUS_DECOMMISSION {
logrus.Infof("ClusterManager watchDB, "+
"node ID %s is already decommission "+
"on this node",
nodeEntry.Id)
continue
}
logrus.Infof("ClusterManager watchDB, "+
"decommsission node ID %s on this node",
nodeEntry.Id)
n.Status = api.Status_STATUS_DECOMMISSION
c.putNodeCacheEntry(nodeEntry.Id, n)
// We are getting decommissioned!!
if nodeEntry.Id == c.selfNode.Id {
// We are getting decommissioned.
// Stop the heartbeat and stop the watch
stopHeartbeat <- true
c.gossip.Stop(time.Duration(10 * time.Second))
return fmt.Errorf("stop watch")
}
}
}
c.size = db.Size
peers := c.getNonDecommisionedPeers(db)
c.gossip.UpdateCluster(peers)
c.nodeCacheLock.Lock()
defer c.nodeCacheLock.Unlock()
for _, n := range c.nodeCache {
_, found := peers[types.NodeId(n.Id)]
if !found {
delete(c.nodeCache, n.Id)
}
}
if watchErr != nil && c.selfNode.Status != api.Status_STATUS_DECOMMISSION {
logrus.Errorf("ClusterManager watch stopped, restarting (err: %v)",
watchErr)
c.startClusterDBWatch(kvdbVersion, kvdb.Instance())
}
return watchErr
}
func (c *ClusterManager) getLatestNodeConfig(nodeId string) *NodeEntry {
db, _, err := readClusterInfo()
if err != nil {
logrus.Warnln("Failed to read the database for updating config")
return nil
}
ne, exists := db.NodeEntries[nodeId]
if !exists {
logrus.Warnln("Could not find info for node with id ", nodeId)
return nil
}
return &ne
}
func (c *ClusterManager) initNode(db *ClusterInfo) (*api.Node, bool) {
_, exists := db.NodeEntries[c.selfNode.Id]
// Add us into the database.
labels := make(map[string]string)
labels[gossipVersionKey] = c.gossipVersion
nodeEntry := NodeEntry{
Id: c.selfNode.Id,
MgmtIp: c.selfNode.MgmtIp,
DataIp: c.selfNode.DataIp,
GenNumber: c.selfNode.GenNumber,
StartTime: c.selfNode.StartTime,
MemTotal: c.selfNode.MemTotal,
Hostname: c.selfNode.Hostname,
NodeLabels: labels,
}
db.NodeEntries[c.config.NodeId] = nodeEntry
logrus.Infof("Node %s joining cluster...", c.config.NodeId)
logrus.Infof("Cluster ID: %s", c.config.ClusterId)
logrus.Infof("Node Mgmt IP: %s", c.selfNode.MgmtIp)
logrus.Infof("Node Data IP: %s", c.selfNode.DataIp)
return &c.selfNode, exists
}
func (c *ClusterManager) cleanupInit(db *ClusterInfo, self *api.Node) error {
var resErr error
var err error
logrus.Infof("Cleanup Init services")
for e := c.listeners.Front(); e != nil; e = e.Next() {
logrus.Warnf("Cleanup Init for service %s.",
e.Value.(ClusterListener).String())
err = e.Value.(ClusterListener).CleanupInit(self, db)
if err != nil {
logrus.Warnf("Failed to Cleanup Init %s: %v",
e.Value.(ClusterListener).String(), err)
resErr = err
}
}
return resErr
}
// Initialize node and alert listeners that we are initializing a node in the cluster.
func (c *ClusterManager) initNodeInCluster(
clusterInfo *ClusterInfo,
self *api.Node,
exist bool,
nodeInitialized bool,
) ([]FinalizeInitCb, error) {
// If I am already in the cluster map, don't add me again.
if exist {
return nil, nil
}
if nodeInitialized {
logrus.Errorf(ErrInitNodeNotFound.Error())
return nil, ErrInitNodeNotFound
}
// Alert all listeners that we are a new node and we are initializing.
finalizeCbs := make([]FinalizeInitCb, 0)
for e := c.listeners.Front(); e != nil; e = e.Next() {
finalizeCb, err := e.Value.(ClusterListener).Init(self, clusterInfo)
if err != nil {
if self.Status != api.Status_STATUS_MAINTENANCE {
self.Status = api.Status_STATUS_ERROR
}
logrus.Warnf("Failed to initialize Init %s: %v",
e.Value.(ClusterListener).String(), err)
c.cleanupInit(clusterInfo, self)
return nil, err
}
if finalizeCb != nil {
finalizeCbs = append(finalizeCbs, finalizeCb)
}
}
return finalizeCbs, nil
}
// Alert all listeners that we are joining the cluster
func (c *ClusterManager) joinCluster(
self *api.Node,
exist bool,
) error {
// Listeners may update initial state, so snap again.
// The cluster db may have diverged since we waited for quorum
// in between. Snapshot is created under cluster db lock to make
// sure cluster db updates do not happen during snapshot, otherwise
// there may be a mismatch between db updates from listeners and
// cluster db state.
kvdb := kvdb.Instance()
kvlock, err := kvdb.LockWithID(clusterLockKey, c.config.NodeId)
if err != nil {
logrus.Warnln("Unable to obtain cluster lock before creating snapshot: ",
err)
return err
}
initState, err := snapAndReadClusterInfo()
kvdb.Unlock(kvlock)
if err != nil {
logrus.Panicf("Fatal, Unable to create snapshot: %v", err)
return err
}
defer func() {
if initState.Collector != nil {
initState.Collector.Stop()
initState.Collector = nil
}
}()
// Alert all listeners that we are joining the cluster.
for e := c.listeners.Front(); e != nil; e = e.Next() {
err := e.Value.(ClusterListener).Join(self, initState, c.HandleNotifications)
if err != nil {
if self.Status != api.Status_STATUS_MAINTENANCE {
self.Status = api.Status_STATUS_ERROR
}
logrus.Warnf("Failed to initialize Join %s: %v",
e.Value.(ClusterListener).String(), err)
if exist == false {
c.cleanupInit(initState.ClusterInfo, self)
}
logrus.Errorln("Failed to join cluster.", err)
return err
}
}
selfNodeEntry, ok := initState.ClusterInfo.NodeEntries[c.config.NodeId]
if !ok {
logrus.Panicln("Fatal, Unable to find self node entry in local cache")
}
_, _, err = c.updateNodeEntryDB(selfNodeEntry, nil)
if err != nil {
return err
}
return nil
}
func (c *ClusterManager) initClusterForListeners(
self *api.Node,
) error {
err := error(nil)
// Alert all listeners that we are initializing a new cluster.
for e := c.listeners.Front(); e != nil; e = e.Next() {
err = e.Value.(ClusterListener).ClusterInit(self)
if err != nil {
if self.Status != api.Status_STATUS_MAINTENANCE {
self.Status = api.Status_STATUS_ERROR
}
logrus.Printf("Failed to initialize %s",
e.Value.(ClusterListener).String())
goto done
}
}
done:
return err
}
func (c *ClusterManager) startClusterDBWatch(lastIndex uint64,
kv kvdb.Kvdb) error {
logrus.Infof("Cluster manager starting watch at version %d", lastIndex)
go kv.WatchKey(ClusterDBKey, lastIndex, nil, c.watchDB)
return nil
}
func (c *ClusterManager) startHeartBeat(clusterInfo *ClusterInfo) {
gossipStoreKey := types.StoreKey(heartbeatKey + c.config.ClusterId)
node := c.getCurrentState()
c.putNodeCacheEntry(c.selfNode.Id, *node)
c.gossip.UpdateSelf(gossipStoreKey, *node)
var nodeIps []string
for nodeId, nodeEntry := range clusterInfo.NodeEntries {
if nodeId == node.Id {
continue
}
labels := nodeEntry.NodeLabels
version, ok := labels[gossipVersionKey]
if !ok || version != c.gossipVersion {
// Do not add nodes with mismatched version
continue
}
nodeIps = append(nodeIps, nodeEntry.DataIp+":"+c.gossipPort)
}
if len(nodeIps) > 0 {
logrus.Infof("Starting Gossip... Gossiping to these nodes : %v", nodeIps)
} else {
logrus.Infof("Starting Gossip...")
}
c.gossip.Start(nodeIps)
c.gossip.UpdateCluster(c.getNonDecommisionedPeers(*clusterInfo))
lastUpdateTs := time.Now()
for {
select {
case <-stopHeartbeat:
return
default:
node = c.getCurrentState()
currTime := time.Now()
diffTime := currTime.Sub(lastUpdateTs)
if diffTime > 10*time.Second {
logrus.Warnln("No gossip update for ", diffTime.Seconds(), "s")
}
c.gossip.UpdateSelf(gossipStoreKey, *node)
lastUpdateTs = currTime
}
time.Sleep(2 * time.Second)
}
}
func (c *ClusterManager) updateClusterStatus() {
gossipStoreKey := types.StoreKey(heartbeatKey + c.config.ClusterId)
for {
node := c.getCurrentState()
c.putNodeCacheEntry(node.Id, *node)
// Process heartbeats from other nodes...
gossipValues := c.gossip.GetStoreKeyValue(gossipStoreKey)
numNodes := 0
for id, gossipNodeInfo := range gossipValues {
numNodes = numNodes + 1
// Check to make sure we are not exceeding the size of the cluster.
if c.size > 0 && numNodes > c.size {
logrus.Fatalf("Fatal, number of nodes in the cluster has"+
"exceeded the cluster size: %d > %d", numNodes, c.size)
os.Exit(1)
}
// Special handling for self node
if id == types.NodeId(node.Id) {
// TODO: Implement State Machine for node statuses similar to the one in gossip
if c.selfNode.Status == api.Status_STATUS_OK &&
gossipNodeInfo.Status == types.NODE_STATUS_SUSPECT_NOT_IN_QUORUM {
// Current:
// Cluster Manager Status: UP.
// Gossip Status: Suspecting Not in Quorum (stays in this state for quorumTimeout)
// New:
// Cluster Manager: Not in Quorum
// Cluster Manager does not have a Suspect in Quorum status
logrus.Warnf("Can't reach quorum no. of nodes. Suspecting out of quorum...")
c.selfNode.Status = api.Status_STATUS_NOT_IN_QUORUM
c.status = api.Status_STATUS_NOT_IN_QUORUM
} else if (c.selfNode.Status == api.Status_STATUS_NOT_IN_QUORUM ||
c.selfNode.Status == api.Status_STATUS_OK) &&
(gossipNodeInfo.Status == types.NODE_STATUS_NOT_IN_QUORUM ||
gossipNodeInfo.Status == types.NODE_STATUS_DOWN) {
// Current:
// Cluster Manager Status: UP or Not in Quorum.
// Gossip Status: Not in Quorum or DOWN
// New:
// Cluster Manager: DOWN
// Gossip waited for quorumTimeout and indicates we are Not in Quorum and should go Down
logrus.Warnf("Not in quorum. Gracefully shutting down...")
c.gossip.UpdateSelfStatus(types.NODE_STATUS_DOWN)
c.selfNode.Status = api.Status_STATUS_OFFLINE
c.status = api.Status_STATUS_NOT_IN_QUORUM
c.Shutdown()
os.Exit(1)
} else if c.selfNode.Status == api.Status_STATUS_NOT_IN_QUORUM &&
gossipNodeInfo.Status == types.NODE_STATUS_UP {
// Current:
// Cluster Manager Status: Not in Quorum
// Gossip Status: Up
// New:
// Cluster Manager : UP
c.selfNode.Status = api.Status_STATUS_OK
c.status = api.Status_STATUS_OK
} else {
// Ignore the update
}
continue
}
// Notify node status change if required.
peerNodeInCache := api.Node{}
peerNodeInCache.Id = string(id)
peerNodeInCache.Status = api.Status_STATUS_OK
switch {
case gossipNodeInfo.Status == types.NODE_STATUS_DOWN:
// Replace the status of this node in cache to offline
peerNodeInCache.Status = api.Status_STATUS_OFFLINE
lastStatus, ok := c.nodeStatuses[string(id)]
if !ok {
// This node was probably added recently into gossip node
// map through cluster database and is yet to reach out to us.
// Mark this node down.
logrus.Warnln("Detected new node with ", id,
" to be offline due to inactivity.")
} else {
if lastStatus == peerNodeInCache.Status {
break
}
logrus.Warnln("Detected node ", id,
" to be offline due to inactivity.")
}
c.nodeStatuses[string(id)] = peerNodeInCache.Status
for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
err := e.Value.(ClusterListener).Update(&peerNodeInCache)
if err != nil {
logrus.Warnln("Failed to notify ",
e.Value.(ClusterListener).String())
}
}
case gossipNodeInfo.Status == types.NODE_STATUS_UP:
peerNodeInCache.Status = api.Status_STATUS_OK
lastStatus, ok := c.nodeStatuses[string(id)]
if ok && lastStatus == peerNodeInCache.Status {
break
}
c.nodeStatuses[string(id)] = peerNodeInCache.Status
// A node discovered in the cluster.
logrus.Infoln("Detected node", peerNodeInCache.Id,
" to be in the cluster.")
for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() {
err := e.Value.(ClusterListener).Add(&peerNodeInCache)
if err != nil {
logrus.Warnln("Failed to notify ",
e.Value.(ClusterListener).String())
}
}
}
// Update cache with gossip data
if gossipNodeInfo.Value != nil {
peerNodeInGossip, ok := gossipNodeInfo.Value.(api.Node)
if ok {
if peerNodeInCache.Status == api.Status_STATUS_OFFLINE {
// Overwrite the status of Node in Gossip data with Down
peerNodeInGossip.Status = peerNodeInCache.Status
} else {
if peerNodeInGossip.Status == api.Status_STATUS_MAINTENANCE {
// If the node sent its status as Maintenance
// do not overwrite it with online
} else {
peerNodeInGossip.Status = peerNodeInCache.Status
}
}
c.putNodeCacheEntry(peerNodeInGossip.Id, peerNodeInGossip)
} else {
logrus.Errorln("Unable to get node info from gossip")
c.putNodeCacheEntry(peerNodeInCache.Id, peerNodeInCache)
}
} else {
c.putNodeCacheEntry(peerNodeInCache.Id, peerNodeInCache)
}
}
time.Sleep(2 * time.Second)
}
}
// DisableUpdates disables gossip updates
func (c *ClusterManager) DisableUpdates() error {
logrus.Warnln("Disabling gossip updates")
c.gEnabled = false
return nil
}
// EnableUpdates enables gossip updates
func (c *ClusterManager) EnableUpdates() error {
logrus.Warnln("Enabling gossip updates")
c.gEnabled = true
return nil
}
// GetGossipState returns current gossip state
func (c *ClusterManager) GetGossipState() *ClusterState {
gossipStoreKey := types.StoreKey(heartbeatKey + c.config.ClusterId)
nodeValue := c.gossip.GetStoreKeyValue(gossipStoreKey)
nodes := make([]types.NodeValue, len(nodeValue), len(nodeValue))
i := 0
for _, value := range nodeValue {
nodes[i] = value
i++
}
return &ClusterState{NodeStatus: nodes}
}
func (c *ClusterManager) waitForQuorum(exist bool) error {
// Max quorum retries allowed = 600
// 600 * 2 seconds (gossip interval) = 20 minutes before it restarts
quorumRetries := 0
for {
gossipSelfStatus := c.gossip.GetSelfStatus()
if c.selfNode.Status == api.Status_STATUS_NOT_IN_QUORUM &&
gossipSelfStatus == types.NODE_STATUS_UP {
// Node not initialized yet
// Achieved quorum in the cluster.
// Lets start the node
c.selfNode.Status = api.Status_STATUS_INIT
err := c.joinCluster(&c.selfNode, exist)
if err != nil {
if c.selfNode.Status != api.Status_STATUS_MAINTENANCE {
c.selfNode.Status = api.Status_STATUS_ERROR
}
return err
}
c.status = api.Status_STATUS_OK
c.selfNode.Status = api.Status_STATUS_OK
break
} else {
c.status = api.Status_STATUS_NOT_IN_QUORUM
if quorumRetries == 600 {
err := fmt.Errorf("Unable to achieve Quorum." +
" Timeout 20 minutes exceeded.")
logrus.Warnln("Failed to join cluster: ", err)
c.status = api.Status_STATUS_NOT_IN_QUORUM
c.selfNode.Status = api.Status_STATUS_OFFLINE
c.gossip.UpdateSelfStatus(types.NODE_STATUS_DOWN)
return err
}
if quorumRetries == 0 {
logrus.Infof("Waiting for the cluster to reach quorum...")
}
time.Sleep(types.DEFAULT_GOSSIP_INTERVAL)
quorumRetries++
}
}
// Update the listeners that we have joined the cluster and
// and our quorum status
for e := c.listeners.Front(); e != nil; e = e.Next() {
err := e.Value.(ClusterListener).JoinComplete(&c.selfNode)
if err != nil {
logrus.Warnln("Failed to notify ", e.Value.(ClusterListener).String())
}
}
return nil
}
func (c *ClusterManager) initializeCluster(db kvdb.Kvdb) (
*ClusterInfo,
error,
) {
kvlock, err := db.LockWithID(clusterLockKey, c.config.NodeId)
if err != nil {
logrus.Panicln("Fatal, Unable to obtain cluster lock.", err)
}
defer db.Unlock(kvlock)
clusterInfo, _, err := readClusterInfo()
if err != nil {
logrus.Panicln(err)
}
selfNodeEntry, ok := clusterInfo.NodeEntries[c.config.NodeId]
if ok && selfNodeEntry.Status == api.Status_STATUS_DECOMMISSION {
msg := fmt.Sprintf("Node is in decommision state, Node ID %s.",
c.selfNode.Id)
logrus.Errorln(msg)
return nil, ErrNodeDecommissioned
}
// Set the clusterID in db
clusterInfo.Id = c.config.ClusterId
if clusterInfo.Status == api.Status_STATUS_INIT {
logrus.Infoln("Initializing a new cluster.")
// Initialize self node
clusterInfo.Status = api.Status_STATUS_OK
err = c.initClusterForListeners(&c.selfNode)
if err != nil {
logrus.Errorln("Failed to initialize the cluster.", err)
return nil, err
}
// While we hold the lock write the cluster info
// to kvdb.
_, err := writeClusterInfo(&clusterInfo)
if err != nil {
logrus.Errorln("Failed to initialize the cluster.", err)
return nil, err
}
} else if clusterInfo.Status&api.Status_STATUS_OK > 0 {
logrus.Infoln("Cluster state is OK... Joining the cluster.")
} else {
return nil, errors.New("Fatal, Cluster is in an unexpected state.")
}
// Cluster database max size... 0 if unlimited.
c.size = clusterInfo.Size
c.status = api.Status_STATUS_OK
return &clusterInfo, nil
}
func (c *ClusterManager) quorumMember() bool {
if c.listeners.Len() == 0 {
// If there are no listeners registered by the driver, assume
// this node is a quorum member, so this becomes the default behavior
// for drivers which do not implement the ClusterListener interface.
return true
}
quorumRequired := false
for e := c.listeners.Front(); e != nil; e = e.Next() {
quorumRequired = quorumRequired ||
e.Value.(ClusterListener).QuorumMember(&c.selfNode)
}
return quorumRequired
}
func (c *ClusterManager) initListeners(
db kvdb.Kvdb,
clusterMaxSize int,
nodeExists *bool,
nodeInitialized bool,
) (uint64, *ClusterInfo, error) {
// Initialize the cluster if required
clusterInfo, err := c.initializeCluster(db)
if err != nil {
return 0, nil, err
}
// Initialize the node in cluster
self, exist := c.initNode(clusterInfo)
*nodeExists = exist
finalizeCbs, err := c.initNodeInCluster(
clusterInfo,
self,
*nodeExists,
nodeInitialized,
)
if err != nil {
logrus.Errorln("Failed to initialize node in cluster.", err)
return 0, nil, err
}
selfNodeEntry, ok := clusterInfo.NodeEntries[c.config.NodeId]
if !ok {
logrus.Panicln("Fatal, Unable to find self node entry in local cache")
}
// the inverse value is to handle upgrades.
// This node does not participate in quorum decisions if it is
// decommissioned or if none of the listeners require it.
selfNodeEntry.NonQuorumMember =
selfNodeEntry.Status == api.Status_STATUS_DECOMMISSION ||
!c.quorumMember()
if !selfNodeEntry.NonQuorumMember {
logrus.Infof("This node participates in quorum decisions")
} else {
logrus.Infof("This node does not participates in quorum decisions")
}
initFunc := func(clusterInfo ClusterInfo) error {
numNodes := 0
for _, node := range clusterInfo.NodeEntries {
if node.Status != api.Status_STATUS_DECOMMISSION {
numNodes++
}
}
if clusterMaxSize > 0 && numNodes > clusterMaxSize {
return fmt.Errorf("Cluster is operating at maximum capacity "+
"(%v nodes). Please remove a node before attempting to "+
"add a new node.", clusterMaxSize)
}
// Finalize inits from subsystems under cluster db lock.
for _, finalizeCb := range finalizeCbs {
if err := finalizeCb(); err != nil {
logrus.Errorf("Failed finalizing init: %s", err.Error())
return err
}
}
return nil
}
kvp, kvClusterInfo, err := c.updateNodeEntryDB(selfNodeEntry,
initFunc)
if err != nil {
logrus.Errorln("Failed to save the database.", err)
return 0, nil, err
}
if kvClusterInfo.Status == api.Status_STATUS_INIT {
logrus.Panicln("Cluster in an unexpected state: ", kvClusterInfo.Status)
}
return kvp.ModifiedIndex, kvClusterInfo, nil
}
func (c *ClusterManager) initializeAndStartHeartbeat(
kvdb kvdb.Kvdb,
clusterMaxSize int,
exist *bool,
nodeInitialized bool,
) (uint64, error) {
lastIndex, clusterInfo, err := c.initListeners(
kvdb,
clusterMaxSize,
exist,
nodeInitialized,
)
if err != nil {
return 0, err
}
// Set the status to NOT_IN_QUORUM to start the node.
// Once we achieve quorum then we actually join the cluster
// and change the status to OK
c.selfNode.Status = api.Status_STATUS_NOT_IN_QUORUM
// Start heartbeating to other nodes.
go c.startHeartBeat(clusterInfo)
return lastIndex, nil
}
// Start initiates the cluster manager and the cluster state machine
func (c *ClusterManager) Start(
clusterMaxSize int,
nodeInitialized bool,
gossipPort string,
) error {
var err error
logrus.Infoln("Cluster manager starting...")
c.gEnabled = true
c.selfNode = api.Node{}
c.selfNode.GenNumber = uint64(time.Now().UnixNano())
c.selfNode.Id = c.config.NodeId
c.selfNode.Status = api.Status_STATUS_INIT
c.selfNode.MgmtIp, c.selfNode.DataIp, err = ExternalIp(&c.config)
c.selfNode.StartTime = time.Now()
c.selfNode.Hostname, _ = os.Hostname()
c.gossipPort = gossipPort
if err != nil {
logrus.Errorf("Failed to get external IP address for mgt/data interfaces: %s.",
err)
return err
}
c.selfNode.NodeData = make(map[string]interface{})
c.system = systemutils.New()
// Start the gossip protocol.
// XXX Make the port configurable.
gob.Register(api.Node{})
gossipIntervals := types.GossipIntervals{
GossipInterval: types.DEFAULT_GOSSIP_INTERVAL,
PushPullInterval: types.DEFAULT_PUSH_PULL_INTERVAL,
ProbeInterval: types.DEFAULT_PROBE_INTERVAL,
ProbeTimeout: types.DEFAULT_PROBE_TIMEOUT,
QuorumTimeout: types.DEFAULT_QUORUM_TIMEOUT,
}
c.gossip = gossip.New(
c.selfNode.DataIp+":"+c.gossipPort,
types.NodeId(c.config.NodeId),
c.selfNode.GenNumber,
gossipIntervals,
types.GOSSIP_VERSION_2,
c.config.ClusterId,
)
c.gossipVersion = types.GOSSIP_VERSION_2
var exist bool
kvdb := kvdb.Instance()
lastIndex, err := c.initializeAndStartHeartbeat(
kvdb,
clusterMaxSize,
&exist,
nodeInitialized,
)
if err != nil {
return err
}
c.startClusterDBWatch(lastIndex, kvdb)
err = c.waitForQuorum(exist)
if err != nil {
return err
}
c.configManager, err = osdconfig.NewManager(c.kv)
if err != nil {
return err
}
go c.updateClusterStatus()
go c.replayNodeDecommission()
return nil
}
// NodeStatus returns the status of a node. It compares the status maintained by the
// cluster manager and the provided listener and returns the appropriate one
func (c *ClusterManager) NodeStatus() (api.Status, error) {
clusterNodeStatus := c.selfNode.Status
if clusterNodeStatus != api.Status_STATUS_OK {
// Status of this node as seen by Cluster Manager is not OK
// This takes highest precedence over other listener statuses.
// Returning our status
return clusterNodeStatus, nil
}
for e := c.listeners.Front(); e != nil; e = e.Next() {
listenerStatus := e.Value.(ClusterListener).ListenerStatus()
if listenerStatus == api.Status_STATUS_NONE {
continue
}
if int(listenerStatus.StatusKind()) >= int(clusterNodeStatus.StatusKind()) {
clusterNodeStatus = listenerStatus
}
}
return clusterNodeStatus, nil
}
// PeerStatus returns the status of a peer node as seen by us
func (c *ClusterManager) PeerStatus(listenerName string) (map[string]api.Status, error) {
statusMap := make(map[string]api.Status)
var listenerStatusMap map[string]api.Status
for e := c.listeners.Front(); e != nil; e = e.Next() {
if e.Value.(ClusterListener).String() == listenerName {
listenerStatusMap = e.Value.(ClusterListener).ListenerPeerStatus()
break
}
}
c.nodeCacheLock.Lock()
defer c.nodeCacheLock.Unlock()
// Listener failed to provide peer status
if listenerStatusMap == nil || len(listenerStatusMap) == 0 {
for _, n := range c.nodeCache {
if n.Id == c.selfNode.Id {
// skip self
continue
}
statusMap[n.Id] = n.Status
}
return statusMap, nil
}
// Compare listener's peer statuses and cluster provider's peer statuses
for _, n := range c.nodeCache {
if n.Id == c.selfNode.Id {
// Skip self
continue
}
clusterNodeStatus := n.Status
listenerNodeStatus, ok := listenerStatusMap[n.Id]
if !ok {
// Could not find listener's peer status
// Using cluster provider's peer status
statusMap[n.Id] = clusterNodeStatus
}
if int(listenerNodeStatus.StatusKind()) >= int(clusterNodeStatus.StatusKind()) {
// Use listener's peer status
statusMap[n.Id] = listenerNodeStatus
} else {
// Use the cluster provider's peer status
statusMap[n.Id] = clusterNodeStatus
}
}
return statusMap, nil
}
func (c *ClusterManager) enumerateNodesFromClusterDB() []api.Node {
clusterDB, _, err := readClusterInfo()
if err != nil {
logrus.Errorf("enumerateNodesFromClusterDB failed with error: %v", err)
return make([]api.Node, 0)
}
nodes := []api.Node{}
for _, n := range clusterDB.NodeEntries {
node := api.Node{}
if n.Status == api.Status_STATUS_DECOMMISSION {
continue
}
if n.Id == c.selfNode.Id {
node = *c.getCurrentState()
} else {
node.Id = n.Id
node.Status = n.Status
node.MgmtIp = n.MgmtIp
node.DataIp = n.DataIp
node.Hostname = n.Hostname
node.NodeLabels = n.NodeLabels
}
nodes = append(nodes, node)
}
return nodes
}
func (c *ClusterManager) enumerateNodesFromCache() []api.Node {
var clusterDB ClusterInfo
c.nodeCacheLock.Lock()
defer c.nodeCacheLock.Unlock()
nodes := make([]api.Node, len(c.nodeCache))
i := 0
for _, n := range c.nodeCache {
n, _ := c.getNodeEntry(n.Id, &clusterDB)
nodes[i] = *n.Copy()
i++
}
return nodes
}
// Enumerate lists all the nodes in the cluster.
func (c *ClusterManager) Enumerate() (api.Cluster, error) {
cluster := api.Cluster{
Id: c.config.ClusterId,
Status: c.status,
NodeId: c.selfNode.Id,
}
if c.selfNode.Status == api.Status_STATUS_NOT_IN_QUORUM ||
c.selfNode.Status == api.Status_STATUS_MAINTENANCE {
// If the node is not yet ready, query the cluster db
// for node members since gossip is not ready yet.
cluster.Nodes = c.enumerateNodesFromClusterDB()
} else {
cluster.Nodes = c.enumerateNodesFromCache()
}
// Allow listeners to add/modify data
for e := c.listeners.Front(); e != nil; e = e.Next() {
if err := e.Value.(ClusterListener).Enumerate(cluster); err != nil {
logrus.Warnf("listener %s enumerate failed: %v",
e.Value.(ClusterListener).String(), err)
continue
}
}
return cluster, nil
}
func (c *ClusterManager) updateNodeEntryDB(
nodeEntry NodeEntry,
checkCbBeforeUpdate checkFunc,
) (*kvdb.KVPair, *ClusterInfo, error) {
kvdb := kvdb.Instance()
kvlock, err := kvdb.LockWithID(clusterLockKey, c.config.NodeId)
if err != nil {
logrus.Warnln("Unable to obtain cluster lock for updating cluster DB.",
err)
return nil, nil, err
}
defer kvdb.Unlock(kvlock)
currentState, _, err := readClusterInfo()
if err != nil {
return nil, nil, err
}
currentState.NodeEntries[nodeEntry.Id] = nodeEntry
if checkCbBeforeUpdate != nil {
err = checkCbBeforeUpdate(currentState)
if err != nil {
return nil, nil, err
}
}
kvp, err := writeClusterInfo(¤tState)
if err != nil {
logrus.Errorln("Failed to save the database.", err)
}
return kvp, ¤tState, err
}
// SetSize sets the maximum number of nodes in a cluster.
func (c *ClusterManager) SetSize(size int) error {
kvdb := kvdb.Instance()
kvlock, err := kvdb.LockWithID(clusterLockKey, c.config.NodeId)
if err != nil {
logrus.Warnln("Unable to obtain cluster lock for updating config", err)
return nil
}
defer kvdb.Unlock(kvlock)
db, _, err := readClusterInfo()
if err != nil {
return err
}
db.Size = size
_, err = writeClusterInfo(&db)
return err
}
func (c *ClusterManager) getNodeInfoFromClusterDb(id string) (api.Node, error) {
node := api.Node{Id: id}
kvdb := kvdb.Instance()
kvlock, err := kvdb.LockWithID(clusterLockKey, c.config.NodeId)
if err != nil {
logrus.Warnln("Unable to obtain cluster lock for marking "+
"node decommission", err)
return node, err
}
defer kvdb.Unlock(kvlock)
db, _, err := readClusterInfo()
if err != nil {
return node, err
}
nodeEntry, ok := db.NodeEntries[id]
if !ok {
msg := fmt.Sprintf("Node entry does not exist, Node ID %s", id)
return node, errors.New(msg)
}
node.Status = nodeEntry.Status
return node, nil
}
func (c *ClusterManager) markNodeDecommission(node api.Node) error {
kvdb := kvdb.Instance()
kvlock, err := kvdb.LockWithID(clusterLockKey, c.config.NodeId)
if err != nil {
logrus.Warnln("Unable to obtain cluster lock for marking "+
"node decommission",
err)
return err
}
defer kvdb.Unlock(kvlock)
db, _, err := readClusterInfo()
if err != nil {
return err
}
nodeEntry, ok := db.NodeEntries[node.Id]
if !ok {
msg := fmt.Sprintf("Node entry does not exist, Node ID %s",
node.Id)
return errors.New(msg)
}
nodeEntry.Status = api.Status_STATUS_DECOMMISSION
db.NodeEntries[node.Id] = nodeEntry
if c.selfNode.Id == node.Id {
c.selfNode.Status = api.Status_STATUS_DECOMMISSION
}
_, err = writeClusterInfo(&db)
return err
}
func (c *ClusterManager) deleteNodeFromDB(nodeID string) error {
// Delete node from cluster DB
kvdb := kvdb.Instance()
kvlock, err := kvdb.LockWithID(clusterLockKey, c.config.NodeId)
if err != nil {
logrus.Panicln("fatal, unable to obtain cluster lock. ", err)
}
defer kvdb.Unlock(kvlock)
currentState, _, err := readClusterInfo()
if err != nil {
logrus.Errorln("Failed to read cluster info. ", err)
return err
}
delete(currentState.NodeEntries, nodeID)
_, err = writeClusterInfo(¤tState)
if err != nil {
logrus.Errorln("Failed to save the database.", err)
}
return err
}
// Remove node(s) from the cluster permanently.
func (c *ClusterManager) Remove(nodes []api.Node, forceRemove bool) error {
logrus.Infof("ClusterManager Remove node.")
var resultErr error
inQuorum := !(c.selfNode.Status == api.Status_STATUS_NOT_IN_QUORUM)
for _, n := range nodes {
node, exist := c.getNodeCacheEntry(n.Id)
if !exist {
node, resultErr = c.getNodeInfoFromClusterDb(n.Id)
if resultErr != nil {
logrus.Errorf("Error getting node info for id %s : %v", n.Id,
resultErr)
return fmt.Errorf("Node %s does not exist", n.Id)
}
}
// If removing node is self and node is not in maintenance mode,
// disallow node remove.
if n.Id == c.selfNode.Id &&
c.selfNode.Status != api.Status_STATUS_MAINTENANCE {
msg := fmt.Sprintf(decommissionErrMsg, n.Id)
logrus.Errorf(msg)
return errors.New(msg)
} else if n.Id != c.selfNode.Id && inQuorum {
nodeCacheStatus := node.Status
// If node is not down, do not remove it
if nodeCacheStatus != api.Status_STATUS_OFFLINE &&
nodeCacheStatus != api.Status_STATUS_MAINTENANCE &&
nodeCacheStatus != api.Status_STATUS_DECOMMISSION {
msg := fmt.Sprintf(decommissionErrMsg, n.Id)
logrus.Errorf(msg+", node status: %s", nodeCacheStatus)
return errors.New(msg)
}
}
if forceRemove {
// Mark the other node down so that it can be decommissioned.
for e := c.listeners.Front(); e != nil; e = e.Next() {
logrus.Infof("Remove node: ask cluster listener %s "+
"to mark node %s down ",
e.Value.(ClusterListener).String(), n.Id)
err := e.Value.(ClusterListener).MarkNodeDown(&n)
if err != nil {
logrus.Warnf("Node mark down error: %v", err)
return err
}
}
}
// Ask listeners, can we remove this node?
for e := c.listeners.Front(); e != nil; e = e.Next() {
logrus.Infof("Remove node: ask cluster listener: "+
"can we remove node ID %s, %s",
n.Id, e.Value.(ClusterListener).String())
additionalMsg, err := e.Value.(ClusterListener).CanNodeRemove(&n)
if err != nil && !(err == ErrRemoveCausesDataLoss && forceRemove) {
msg := fmt.Sprintf("Cannot remove node ID %s: %s.", n.Id, err)
if additionalMsg != "" {
msg = msg + " " + additionalMsg
}
logrus.Warnf(msg)
return errors.New(msg)
}
}
err := c.markNodeDecommission(n)
if err != nil {
msg := fmt.Sprintf("Failed to mark node as "+
"decommision, error %s",
err)
logrus.Errorf(msg)
return errors.New(msg)
}
if !inQuorum {
// If we are not in quorum, we only mark the node as decommissioned
// since this node is not functional yet.
continue
}
// Alert all listeners that we are removing this node.
for e := c.listeners.Front(); e != nil; e = e.Next() {
logrus.Infof("Remove node: notify cluster listener: %s",
e.Value.(ClusterListener).String())
err := e.Value.(ClusterListener).Remove(&n, forceRemove)
if err != nil {
if err != ErrNodeRemovePending {
logrus.Warnf("Cluster listener failed to "+
"remove node: %s: %s",
e.Value.(ClusterListener).String(),
err)
return err
} else {
resultErr = err
}
}
}
}
return resultErr
}
// NodeRemoveDone is called from the listeners when their job of Node removal is done.
func (c *ClusterManager) NodeRemoveDone(nodeID string, result error) {
// XXX: only storage will make callback right now
if result != nil {
msg := fmt.Sprintf("Storage failed to decommission node %s, "+
"error %s",
nodeID,
result)
logrus.Errorf(msg)
return
}
logrus.Infof("Cluster manager node remove done: node ID %s", nodeID)
err := c.deleteNodeFromDB(nodeID)
if err != nil {
msg := fmt.Sprintf("Failed to delete node %s "+
"from cluster database, error %s",
nodeID, err)
logrus.Errorf(msg)
}
}
func (c *ClusterManager) replayNodeDecommission() {
currentState, _, err := readClusterInfo()
if err != nil {
logrus.Infof("Failed to read cluster db for node decommissions: %v", err)
return
}
for _, nodeEntry := range currentState.NodeEntries {
if nodeEntry.Status == api.Status_STATUS_DECOMMISSION {
logrus.Infof("Replay Node Remove for node ID %s", nodeEntry.Id)
var n api.Node
n.Id = nodeEntry.Id
nodes := make([]api.Node, 0)
nodes = append(nodes, n)
err := c.Remove(nodes, false)
if err != nil {
logrus.Warnf("Failed to replay node remove: "+
"node ID %s, error %s",
nodeEntry.Id, err)
}
}
}
}
// Shutdown can be called when THIS node is gracefully shutting down.
func (c *ClusterManager) Shutdown() error {
db, _, err := readClusterInfo()
if err != nil {
logrus.Warnf("Could not read cluster database (%v).", err)
return err
}
// Alert all listeners that we are shutting this node down.
for e := c.listeners.Front(); e != nil; e = e.Next() {
logrus.Infof("Shutting down %s", e.Value.(ClusterListener).String())
if err := e.Value.(ClusterListener).Halt(&c.selfNode, &db); err != nil {
logrus.Warnf("Failed to shutdown %s",
e.Value.(ClusterListener).String())
}
}
return nil
}
// HandleNotifications is a callback function used by the listeners
func (c *ClusterManager) HandleNotifications(culpritNodeId string, notification api.ClusterNotify) (string, error) {
if notification == api.ClusterNotify_CLUSTER_NOTIFY_DOWN {
killNodeId := c.gossip.ExternalNodeLeave(types.NodeId(culpritNodeId))
return string(killNodeId), nil
} else {
return "", fmt.Errorf("Error in Handle Notifications. Unknown Notification : %v", notification)
}
}
func (c *ClusterManager) EnumerateAlerts(ts, te time.Time, resource api.ResourceType) (*api.Alerts, error) {
a := api.Alerts{}
for e := c.listeners.Front(); e != nil; e = e.Next() {
listenerAlerts, err := e.Value.(ClusterListener).EnumerateAlerts(ts, te, resource)
if err != nil {
logrus.Warnf("Failed to enumerate alerts from (%v): %v",
e.Value.(ClusterListener).String(), err)
continue
}
if listenerAlerts != nil {
a.Alert = append(a.Alert, listenerAlerts.Alert...)
}
}
return &a, nil
}
func (c *ClusterManager) ClearAlert(resource api.ResourceType, alertID int64) error {
cleared := false
for e := c.listeners.Front(); e != nil; e = e.Next() {
if err := e.Value.(ClusterListener).ClearAlert(resource, alertID); err != nil {
continue
}
cleared = true
}
if !cleared {
return fmt.Errorf("Unable to clear alert (%v)", alertID)
}
return nil
}
func (c *ClusterManager) EraseAlert(resource api.ResourceType, alertID int64) error {
erased := false
for e := c.listeners.Front(); e != nil; e = e.Next() {
if err := e.Value.(ClusterListener).EraseAlert(resource, alertID); err != nil {
continue
}
erased = true
}
if !erased {
return fmt.Errorf("Unable to erase alert (%v)", alertID)
}
return nil
}
func (c *ClusterManager) getNodeCacheEntry(nodeId string) (api.Node, bool) {
c.nodeCacheLock.Lock()
defer c.nodeCacheLock.Unlock()
n, ok := c.nodeCache[nodeId]
return n, ok
}
func (c *ClusterManager) putNodeCacheEntry(nodeId string, node api.Node) {
c.nodeCacheLock.Lock()
defer c.nodeCacheLock.Unlock()
c.nodeCache[nodeId] = node
}
// osdconfig.ConfigCaller compliance
func (c *ClusterManager) GetClusterConf() (*osdconfig.ClusterConfig, error) {
return c.configManager.GetClusterConf()
}
func (c *ClusterManager) GetNodeConf(nodeID string) (*osdconfig.NodeConfig, error) {
return c.configManager.GetNodeConf(nodeID)
}
func (c *ClusterManager) SetClusterConf(config *osdconfig.ClusterConfig) error {
return c.configManager.SetClusterConf(config)
}
func (c *ClusterManager) SetNodeConf(config *osdconfig.NodeConfig) error {
return c.configManager.SetNodeConf(config)
}
func (c *ClusterManager) DeleteNodeConf(nodeID string) error {
return c.configManager.DeleteNodeConf(nodeID)
}
func (c *ClusterManager) EnumerateNodeConf() (*osdconfig.NodesConfig, error) {
return c.configManager.EnumerateNodeConf()
}
| 1 | 6,660 | can we move the code before deleteNodeFromDB ? this way even if the node crashes after remove config we can still re-run decommission again ? | libopenstorage-openstorage | go |
@@ -6684,7 +6684,7 @@ defaultdict(<class 'list'>, {'col..., 'col...})]
return self._internal.copy(sdf=sdf, data_columns=columns, column_index=idx)
- def melt(self, id_vars=None, value_vars=None, var_name='variable',
+ def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value'):
"""
Unpivot a DataFrame from wide format to long format, optionally | 1 | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A wrapper class for Spark DataFrame to behave similar to pandas DataFrame.
"""
from collections import OrderedDict
from distutils.version import LooseVersion
import re
import warnings
import inspect
import json
from functools import partial, reduce
import sys
from itertools import zip_longest
from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar, Iterable, Dict
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like, is_dict_like
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
from pandas.core.dtypes.common import infer_dtype_from_object
else:
from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object
from pandas.core.accessor import CachedAccessor
from pandas.core.dtypes.inference import is_sequence
from pyspark import sql as spark
from pyspark.sql import functions as F, Column
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (BooleanType, ByteType, DecimalType, DoubleType, FloatType,
IntegerType, LongType, NumericType, ShortType)
from pyspark.sql.utils import AnalysisException
from pyspark.sql.window import Window
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas.utils import validate_arguments_and_invoke_function, align_diff_frames
from databricks.koalas.generic import _Frame
from databricks.koalas.internal import _InternalFrame, IndexMap, SPARK_INDEX_NAME_FORMAT
from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame
from databricks.koalas.ml import corr
from databricks.koalas.utils import column_index_level, scol_for
from databricks.koalas.typedef import _infer_return_type, as_spark_type, as_python_type
from databricks.koalas.plot import KoalasFramePlotMethods
from databricks.koalas.config import get_option
# These regular expression patterns are complied and defined here to avoid to compile the same
# pattern every time it is used in _repr_ and _repr_html_ in DataFrame.
# Two patterns basically seek the footer string from Pandas'
REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$")
REPR_HTML_PATTERN = re.compile(
r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$")
_flex_doc_FRAME = """
Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`).
Equivalent to ``{equiv}``. With reverse version, `{reverse}`.
Among flexible wrappers (`add`, `sub`, `mul`, `div`) to
arithmetic operators: `+`, `-`, `*`, `/`, `//`.
Parameters
----------
other : scalar
Any single data
Returns
-------
DataFrame
Result of the arithmetic operation.
Examples
--------
>>> df = ks.DataFrame({{'angles': [0, 3, 4],
... 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle'],
... columns=['angles', 'degrees'])
>>> df
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Add a scalar with operator version which return the same
results. Also reverse version.
>>> df + 1
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
Divide and true divide by constant with reverse version.
>>> df / 10
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.div(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rdiv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
>>> df.truediv(10)
angles degrees
circle 0.0 36.0
triangle 0.3 18.0
rectangle 0.4 36.0
>>> df.rtruediv(10)
angles degrees
circle NaN 0.027778
triangle 3.333333 0.055556
rectangle 2.500000 0.027778
Subtract by constant with reverse version.
>>> df - 1
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
Multiply by constant with reverse version.
>>> df * 1
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.mul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
Floor Divide by constant with reverse version.
>>> df // 10
angles degrees
circle 0 36
triangle 0 18
rectangle 0 36
>>> df.floordiv(10)
angles degrees
circle 0 36
triangle 0 18
rectangle 0 36
>>> df.rfloordiv(10)
angles degrees
circle NaN 0
triangle 3.0 0
rectangle 2.0 0
Mod by constant with reverse version.
>>> df % 2
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.mod(2)
angles degrees
circle 0 0
triangle 1 0
rectangle 0 0
>>> df.rmod(2)
angles degrees
circle NaN 2
triangle 2.0 2
rectangle 2.0 2
Power by constant with reverse version.
>>> df ** 2
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.pow(2)
angles degrees
circle 0.0 129600.0
triangle 9.0 32400.0
rectangle 16.0 129600.0
>>> df.rpow(2)
angles degrees
circle 1.0 2.348543e+108
triangle 8.0 1.532496e+54
rectangle 16.0 2.348543e+108
"""
T = TypeVar('T')
if (3, 5) <= sys.version_info < (3, 7):
from typing import GenericMeta
# This is a workaround to support variadic generic in DataFrame in Python 3.5+.
# See https://github.com/python/typing/issues/193
# We wrap the input params by a tuple to mimic variadic generic.
old_getitem = GenericMeta.__getitem__ # type: ignore
def new_getitem(self, params):
if hasattr(self, "is_dataframe"):
return old_getitem(self, Tuple[params])
else:
return old_getitem(self, params)
GenericMeta.__getitem__ = new_getitem # type: ignore
class DataFrame(_Frame, Generic[T]):
"""
Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame
internally.
:ivar _internal: an internal immutable Frame to manage metadata.
:type _internal: _InternalFrame
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame, Spark DataFrame \
or Koalas Series
Dict can contain Series, arrays, constants, or list-like objects
If data is a dict, argument order is maintained for Python 3.6
and later.
Note that if `data` is a Pandas DataFrame, a Spark DataFrame, and a Koalas Series,
other arguments should not be used.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = ks.DataFrame(data=d, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
Constructing DataFrame from Pandas DataFrame
>>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2']))
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = ks.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df2 # doctest: +SKIP
a b c d e
0 3 1 4 9 8
1 4 8 4 8 4
2 7 6 5 6 7
3 8 7 9 1 0
4 2 5 4 3 9
"""
def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False):
if isinstance(data, _InternalFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(data)
elif isinstance(data, spark.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
super(DataFrame, self).__init__(_InternalFrame(data))
elif isinstance(data, ks.Series):
assert index is None
assert columns is None
assert dtype is None
assert not copy
data = data.to_dataframe()
super(DataFrame, self).__init__(data._internal)
else:
if isinstance(data, pd.DataFrame):
assert index is None
assert columns is None
assert dtype is None
assert not copy
pdf = data
else:
pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy)
super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf))
@property
def _sdf(self) -> spark.DataFrame:
return self._internal.sdf
def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=False):
"""
Applies sfun to each column and returns a pd.Series where the number of rows equal the
number of columns.
Parameters
----------
sfun : either an 1-arg function that takes a Column and returns a Column, or
a 2-arg function that takes a Column and its DataType and returns a Column.
axis: used only for sanity check because series only support index axis.
name : original pandas API name.
axis : axis to apply. 0 or 1, or 'index' or 'columns.
numeric_only : boolean, default False
If True, sfun is applied on numeric columns (including booleans) only.
"""
from inspect import signature
from databricks.koalas import Series
if axis in ('index', 0, None):
exprs = []
num_args = len(signature(sfun).parameters)
for idx in self._internal.column_index:
col_sdf = self._internal.scol_for(idx)
col_type = self._internal.spark_type_for(idx)
is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType))
min_or_max = sfun.__name__ in ('min', 'max')
keep_column = not numeric_only or is_numeric_or_boolean or min_or_max
if keep_column:
if isinstance(col_type, BooleanType) and not min_or_max:
# Stat functions cannot be used with boolean values by default
# Thus, cast to integer (true to 1 and false to 0)
# Exclude the min and max methods though since those work with booleans
col_sdf = col_sdf.cast('integer')
if num_args == 1:
# Only pass in the column if sfun accepts only one arg
col_sdf = sfun(col_sdf)
else: # must be 2
assert num_args == 2
# Pass in both the column and its data type if sfun accepts two args
col_sdf = sfun(col_sdf, col_type)
exprs.append(col_sdf.alias(str(idx) if len(idx) > 1 else idx[0]))
sdf = self._sdf.select(*exprs)
pdf = sdf.toPandas()
if self._internal.column_index_level > 1:
pdf.columns = pd.MultiIndex.from_tuples(self._internal.column_index)
assert len(pdf) == 1, (sdf, pdf)
row = pdf.iloc[0]
row.name = None
# TODO: return Koalas series.
return row # Return first row as a Series
elif axis in ('columns', 1):
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only)
if len(pdf) <= limit:
return Series(pser)
@pandas_udf(returnType=as_spark_type(pser.dtype.type))
def calculate_columns_axis(*cols):
return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only)
df = self._sdf.select(calculate_columns_axis(*self._internal.data_scols).alias("0"))
return DataFrame(df)["0"]
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
# Arithmetic Operators
def _map_series_op(self, op, other):
from databricks.koalas.base import IndexOpsMixin
if not isinstance(other, DataFrame) and (isinstance(other, IndexOpsMixin) or
is_sequence(other)):
raise ValueError(
"%s with a sequence is currently not supported; "
"however, got %s." % (op, type(other)))
if isinstance(other, DataFrame) and self is not other:
if self._internal.column_index_level != other._internal.column_index_level:
raise ValueError('cannot join with no overlapping index names')
# Different DataFrames
def apply_op(kdf, this_column_index, that_column_index):
for this_idx, that_idx in zip(this_column_index, that_column_index):
yield (getattr(kdf[this_idx], op)(kdf[that_idx]), this_idx)
return align_diff_frames(apply_op, self, other, fillna=True, how="full")
else:
# DataFrame and Series
applied = []
for idx in self._internal.column_index:
applied.append(getattr(self[idx], op)(other))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0]
for c in applied],
column_index=[c._internal.column_index[0]
for c in applied])
return DataFrame(internal)
def __add__(self, other):
return self._map_series_op("add", other)
def __radd__(self, other):
return self._map_series_op("radd", other)
def __div__(self, other):
return self._map_series_op("div", other)
def __rdiv__(self, other):
return self._map_series_op("rdiv", other)
def __truediv__(self, other):
return self._map_series_op("truediv", other)
def __rtruediv__(self, other):
return self._map_series_op("rtruediv", other)
def __mul__(self, other):
return self._map_series_op("mul", other)
def __rmul__(self, other):
return self._map_series_op("rmul", other)
def __sub__(self, other):
return self._map_series_op("sub", other)
def __rsub__(self, other):
return self._map_series_op("rsub", other)
def __pow__(self, other):
return self._map_series_op("pow", other)
def __rpow__(self, other):
return self._map_series_op("rpow", other)
def __mod__(self, other):
return self._map_series_op("mod", other)
def __rmod__(self, other):
return self._map_series_op("rmod", other)
def __floordiv__(self, other):
return self._map_series_op("floordiv", other)
def __rfloordiv__(self, other):
return self._map_series_op("rfloordiv", other)
def add(self, other):
return self + other
# create accessor for plot
plot = CachedAccessor("plot", KoalasFramePlotMethods)
def hist(self, bins=10, **kwds):
return self.plot.hist(bins, **kwds)
hist.__doc__ = KoalasFramePlotMethods.hist.__doc__
def kde(self, bw_method=None, ind=None, **kwds):
return self.plot.kde(bw_method, ind, **kwds)
kde.__doc__ = KoalasFramePlotMethods.kde.__doc__
add.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name='+',
equiv='dataframe + other',
reverse='radd')
def radd(self, other):
return other + self
radd.__doc__ = _flex_doc_FRAME.format(
desc='Addition',
op_name="+",
equiv="other + dataframe",
reverse='add')
def div(self, other):
return self / other
div.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rdiv')
divide = div
def rdiv(self, other):
return other / self
rdiv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='div')
def truediv(self, other):
return self / other
truediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="dataframe / other",
reverse='rtruediv')
def rtruediv(self, other):
return other / self
rtruediv.__doc__ = _flex_doc_FRAME.format(
desc='Floating division',
op_name="/",
equiv="other / dataframe",
reverse='truediv')
def mul(self, other):
return self * other
mul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="dataframe * other",
reverse='rmul')
multiply = mul
def rmul(self, other):
return other * self
rmul.__doc__ = _flex_doc_FRAME.format(
desc='Multiplication',
op_name="*",
equiv="other * dataframe",
reverse='mul')
def sub(self, other):
return self - other
sub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="dataframe - other",
reverse='rsub')
subtract = sub
def rsub(self, other):
return other - self
rsub.__doc__ = _flex_doc_FRAME.format(
desc='Subtraction',
op_name="-",
equiv="other - dataframe",
reverse='sub')
def mod(self, other):
return self % other
mod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='dataframe % other',
reverse='rmod')
def rmod(self, other):
return other % self
rmod.__doc__ = _flex_doc_FRAME.format(
desc='Modulo',
op_name='%',
equiv='other % dataframe',
reverse='mod')
def pow(self, other):
return self ** other
pow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power of series',
op_name='**',
equiv='dataframe ** other',
reverse='rpow')
def rpow(self, other):
return other ** self
rpow.__doc__ = _flex_doc_FRAME.format(
desc='Exponential power',
op_name='**',
equiv='other ** dataframe',
reverse='pow')
def floordiv(self, other):
return self // other
floordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='dataframe // other',
reverse='rfloordiv')
def rfloordiv(self, other):
return other // self
rfloordiv.__doc__ = _flex_doc_FRAME.format(
desc='Integer division',
op_name='//',
equiv='other // dataframe',
reverse='floordiv')
# Comparison Operators
def __eq__(self, other):
return self._map_series_op("eq", other)
def __ne__(self, other):
return self._map_series_op("ne", other)
def __lt__(self, other):
return self._map_series_op("lt", other)
def __le__(self, other):
return self._map_series_op("le", other)
def __ge__(self, other):
return self._map_series_op("ge", other)
def __gt__(self, other):
return self._map_series_op("gt", other)
def eq(self, other):
"""
Compare if the current value is equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.eq(1)
a b
a True True
b False None
c False True
d False None
"""
return self == other
equals = eq
def gt(self, other):
"""
Compare if the current value is greater than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.gt(2)
a b
a False False
b False None
c True False
d True None
"""
return self > other
def ge(self, other):
"""
Compare if the current value is greater than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ge(1)
a b
a True True
b True None
c True True
d True None
"""
return self >= other
def lt(self, other):
"""
Compare if the current value is less than the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.lt(1)
a b
a False False
b False None
c False False
d False None
"""
return self < other
def le(self, other):
"""
Compare if the current value is less than or equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.le(2)
a b
a True True
b True None
c False True
d False None
"""
return self <= other
def ne(self, other):
"""
Compare if the current value is not equal to the other.
>>> df = ks.DataFrame({'a': [1, 2, 3, 4],
... 'b': [1, np.nan, 1, np.nan]},
... index=['a', 'b', 'c', 'd'], columns=['a', 'b'])
>>> df.ne(1)
a b
a False False
b True None
c True False
d True None
"""
return self != other
def applymap(self, func):
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> np.int32:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
Examples
--------
>>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> def str_len(x) -> int:
... return len(str(x))
>>> df.applymap(str_len)
0 1
0 3 4
1 5 5
>>> def power(x) -> float:
... return x ** 2
>>> df.applymap(power)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
You can omit the type hint and let Koalas infer its type.
>>> df.applymap(lambda x: x ** 2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
applied = []
for idx in self._internal.column_index:
# TODO: We can implement shortcut theoretically since it creates new DataFrame
# anyway and we don't have to worry about operations on different DataFrames.
applied.append(self[idx].apply(func))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: not all arguments are implemented comparing to Pandas' for now.
def aggregate(self, func: Union[List[str], Dict[str, List[str]]]):
"""Aggregate using one or more operations over the specified axis.
Parameters
----------
func : dict or a list
a dict mapping from column name (string) to
aggregate functions (list of strings).
If a list is given, the aggregation is performed against
all columns.
Returns
-------
DataFrame
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
databricks.koalas.Series.groupby
databricks.koalas.DataFrame.groupby
Examples
--------
>>> df = ks.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
>>> df
A B C
0 1.0 2.0 3.0
1 4.0 5.0 6.0
2 7.0 8.0 9.0
3 NaN NaN NaN
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])[['A', 'B', 'C']]
A B C
min 1.0 2.0 3.0
sum 12.0 15.0 18.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']]
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
"""
from databricks.koalas.groupby import GroupBy
if isinstance(func, list):
if all((isinstance(f, str) for f in func)):
func = dict([
(column, func) for column in self.columns])
else:
raise ValueError("If the given function is a list, it "
"should only contains function names as strings.")
if not isinstance(func, dict) or \
not all(isinstance(key, str) and
(isinstance(value, str) or
isinstance(value, list) and all(isinstance(v, str) for v in value))
for key, value in func.items()):
raise ValueError("aggs must be a dict mapping from column name (string) to aggregate "
"functions (list of strings).")
kdf = DataFrame(GroupBy._spark_groupby(self, func, ())) # type: DataFrame
# The codes below basically converts:
#
# A B
# sum min min max
# 0 12.0 1.0 2.0 8.0
#
# to:
# A B
# max NaN 8.0
# min 1.0 2.0
# sum 12.0 NaN
#
# Aggregated output is usually pretty much small. So it is fine to directly use pandas API.
pdf = kdf.to_pandas().stack()
pdf.index = pdf.index.droplevel()
pdf.columns.names = [None]
pdf.index.names = [None]
return DataFrame(pdf[list(func.keys())])
agg = aggregate
def corr(self, method='pearson'):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'spearman'}
* pearson : standard correlation coefficient
* spearman : Spearman rank correlation
Returns
-------
y : pandas.DataFrame
See Also
--------
Series.corr
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr('pearson')
dogs cats
dogs 1.000000 -0.851064
cats -0.851064 1.000000
>>> df.corr('spearman')
dogs cats
dogs 1.000000 -0.948683
cats -0.948683 1.000000
Notes
-----
There are behavior differences between Koalas and pandas.
* the `method` argument only accepts 'pearson', 'spearman'
* the data should not contain NaNs. Koalas will return an error.
* Koalas doesn't support the following argument(s).
* `min_periods` argument is not supported
"""
return corr(self, method)
def iteritems(self) -> Iterable:
"""
Iterator over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Returns
-------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
Examples
--------
>>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'],
... columns=['species', 'population'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.iteritems():
... print('label:', label)
... print('content:', content.to_string())
...
label: species
content: panda bear
polar bear
koala marsupial
label: population
content: panda 1864
polar 22000
koala 80000
"""
cols = list(self.columns)
return list((col_name, self[col_name]) for col_name in cols)
def items(self) -> Iterable:
"""This is an alias of ``iteritems``."""
return self.iteritems()
def to_clipboard(self, excel=True, sep=None, **kwargs):
"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
.. note:: This method should only be used if the resulting DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
See Also
--------
read_clipboard : Read text from clipboard.
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
This function also works for Series:
>>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP
>>> df.to_clipboard(sep=',') # doctest: +SKIP
... # Wrote the following to the system clipboard:
... # 0, 1
... # 1, 2
... # 2, 3
... # 3, 4
... # 4, 5
... # 5, 6
... # 6, 7
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args)
def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.',
bold_rows=True, classes=None, escape=True, notebook=False, border=None,
table_id=None, render_links=False):
"""
Render a DataFrame as an HTML table.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links (only works with Pandas 0.24+).
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_string : Convert DataFrame to a string.
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args)
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, justify=None,
max_rows=None, max_cols=None, show_dimensions=False,
decimal='.', line_width=None):
"""
Render a DataFrame to a console-friendly tabular output.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, set max_rows parameter.
Parameters
----------
buf : StringIO-like, optional
Buffer to write to.
columns : sequence, optional, default None
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool, optional
Write out the column names. If a list of strings is given, it
is assumed to be aliases for the column names
index : bool, optional, default True
Whether to print index (row) labels.
na_rep : str, optional, default 'NaN'
String representation of NAN to use.
formatters : list or dict of one-param. functions, optional
Formatter functions to apply to columns' elements by position or
name.
The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional, default None
Formatter function to apply to columns' elements if they are
floats. The result of this function must be a unicode string.
sparsify : bool, optional, default True
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row.
index_names : bool, optional, default True
Prints the names of the indexes.
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset.
max_rows : int, optional
Maximum number of rows to display in the console.
max_cols : int, optional
Maximum number of columns to display in the console.
show_dimensions : bool, default False
Display DataFrame dimensions (number of rows by number of columns).
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
line_width : int, optional
Width to wrap a line in characters.
Returns
-------
str (or unicode, depending on data and options)
String representation of the dataframe.
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2'])
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
>>> print(df.to_string(max_rows=2))
col1 col2
0 1 4
1 2 5
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
if max_rows is not None:
kdf = self.head(max_rows)
else:
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args)
def to_dict(self, orient='dict', into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'],
... columns=['col1', 'col2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df_dict = df.to_dict()
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])]
You can specify the return orientation.
>>> df_dict = df.to_dict('series')
>>> sorted(df_dict.items())
[('col1', row1 1
row2 2
Name: col1, dtype: int64), ('col2', row1 0.50
row2 0.75
Name: col2, dtype: float64)]
>>> df_dict = df.to_dict('split')
>>> sorted(df_dict.items()) # doctest: +ELLIPSIS
[('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])]
>>> df_dict = df.to_dict('records')
>>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS
[[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]]
>>> df_dict = df.to_dict('index')
>>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()])
[('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])]
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS
[defaultdict(<class 'list'>, {'col..., 'col...}), \
defaultdict(<class 'list'>, {'col..., 'col...})]
"""
# Make sure locals() call is at the top of the function so we don't capture local variables.
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args)
def to_latex(self, buf=None, columns=None, col_space=None, header=True, index=True,
na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None,
decimal='.', multicolumn=None, multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice this into a LaTeX
document. Requires usepackage{booktabs}.
.. note:: This method should only be used if the resulting Pandas object is expected
to be small, as all the data is loaded into the driver's memory. If the input
is large, consider alternative formats.
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given, it is assumed to be aliases
for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default ‘NaN’
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns’ elements by position or name. The result of
each function must be a unicode string. List must be of length equal to the number of
columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every multiindex key at
each row. By default, the value will be read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By
default, ‘l’ will be used for all columns except columns of numbers, which default
to ‘r’.
longtable : bool, optional
By default, the value will be read from the pandas config module. Use a longtable
environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX
preamble.
escape : bool, optional
By default, the value will be read from the pandas config module. When set to False
prevents from escaping latex special characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file, defaults to ‘ascii’ on
Python 2 and ‘utf-8’ on Python 3.
decimal : str, default ‘.’
Character recognized as decimal separator, e.g. ‘,’ in Europe.
multicolumn : bool, default True
Use multicolumn to enhance MultiIndex columns. The default will be read from the config
module.
multicolumn_format : str, default ‘l’
The alignment for multicolumns, similar to column_format The default will be read from
the config module.
multirow : bool, default False
Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your
LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read from the pandas config
module.
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']},
... columns=['name', 'mask', 'weapon'])
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args)
# TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic
# when creating arrays)
def transpose(self):
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
.. note:: This method is based on an expensive operation due to the nature
of big data. Internally it needs to generate each row for each value, and
then group twice - it is a huge operation. To prevent misusage, this method
has the 'compute.max_rows' default limit of input length, and raises a ValueError.
>>> from databricks.koalas.config import get_option, set_option
>>> set_option('compute.max_rows', 1000)
>>> ks.DataFrame({'a': range(1001)}).transpose() # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Current DataFrame has more then the given limit 1000 rows.
Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option'
to retrieve to retrieve more than 1000 rows. Note that, before changing the
'compute.max_rows', this operation is considerably expensive.
Returns
-------
DataFrame
The transposed DataFrame.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the coerced dtype. For instance, if int and float have
to be placed in same column, it becomes float. If type coercion is not
possible, it fails.
Also, note that the values in index should be unique because they become
unique column names.
In addition, if Spark 2.3 is used, the types should always be exactly same.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2'])
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T.sort_index() # doctest: +SKIP
>>> df1_transposed # doctest: +SKIP
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes # doctest: +SKIP
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'score': [9.5, 8],
... 'kids': [0, 0],
... 'age': [12, 22]}
>>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age'])
>>> df2
score kids age
0 9.5 0 12
1 8.0 0 22
>>> df2_transposed = df2.T.sort_index() # doctest: +SKIP
>>> df2_transposed # doctest: +SKIP
0 1
age 12.0 22.0
kids 0.0 0.0
score 9.5 8.0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the coerced dtype:
>>> df2.dtypes
score float64
kids int64
age int64
dtype: object
>>> df2_transposed.dtypes # doctest: +SKIP
0 float64
1 float64
dtype: object
"""
max_compute_count = get_option("compute.max_rows")
if max_compute_count is not None:
pdf = self.head(max_compute_count + 1)._to_internal_pandas()
if len(pdf) > max_compute_count:
raise ValueError(
"Current DataFrame has more then the given limit {0} rows. "
"Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' "
"to retrieve to retrieve more than {0} rows. Note that, before changing the "
"'compute.max_rows', this operation is considerably expensive."
.format(max_compute_count))
return DataFrame(pdf.transpose())
# Explode the data to be pairs.
#
# For instance, if the current input DataFrame is as below:
#
# +------+------+------+------+------+
# |index1|index2|(a,x1)|(a,x2)|(b,x3)|
# +------+------+------+------+------+
# | y1| z1| 1| 0| 0|
# | y2| z2| 0| 50| 0|
# | y3| z3| 3| 2| 1|
# +------+------+------+------+------+
#
# Output of `exploded_df` becomes as below:
#
# +-----------------+-----------------+-----------------+-----+
# | index|__index_level_0__|__index_level_1__|value|
# +-----------------+-----------------+-----------------+-----+
# |{"a":["y1","z1"]}| a| x1| 1|
# |{"a":["y1","z1"]}| a| x2| 0|
# |{"a":["y1","z1"]}| b| x3| 0|
# |{"a":["y2","z2"]}| a| x1| 0|
# |{"a":["y2","z2"]}| a| x2| 50|
# |{"a":["y2","z2"]}| b| x3| 0|
# |{"a":["y3","z3"]}| a| x1| 3|
# |{"a":["y3","z3"]}| a| x2| 2|
# |{"a":["y3","z3"]}| b| x3| 1|
# +-----------------+-----------------+-----------------+-----+
pairs = F.explode(F.array(*[
F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(idx)] +
[self[idx]._scol.alias("value")]
) for idx in self._internal.column_index]))
exploded_df = self._sdf.withColumn("pairs", pairs).select(
[F.to_json(F.struct(F.array([scol.cast('string')
for scol in self._internal.index_scols])
.alias('a'))).alias('index'),
F.col("pairs.*")])
# After that, executes pivot with key and its index column.
# Note that index column should contain unique values since column names
# should be unique.
internal_index_columns = [SPARK_INDEX_NAME_FORMAT(i)
for i in range(self._internal.column_index_level)]
pivoted_df = exploded_df.groupBy(internal_index_columns).pivot('index')
transposed_df = pivoted_df.agg(F.first(F.col("value")))
new_data_columns = list(filter(lambda x: x not in internal_index_columns,
transposed_df.columns))
internal = self._internal.copy(
sdf=transposed_df,
data_columns=new_data_columns,
index_map=[(col, None) for col in internal_index_columns],
column_index=[tuple(json.loads(col)['a']) for col in new_data_columns],
column_index_names=None)
return DataFrame(internal)
T = property(transpose)
def transform(self, func):
"""
Call ``func`` on self producing a Series with transformed values
and that has the same length as its input.
.. note:: this API executes the function once to infer the type which is
potentially expensive, for instance, when the dataset is created after
aggregations or sorting.
To avoid this, specify return type in ``func``, for instance, as below:
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
Koalas uses return type hint and does not try to infer the type.
.. note:: the series within ``func`` is actually a pandas series, and
the length of each series is not guaranteed.
Parameters
----------
func : function
Function to use for transforming the data. It must work when pandas Series
is passed.
Returns
-------
DataFrame
A DataFrame that must have the same length as self.
Raises
------
Exception : If the returned DataFrame has a different length than self.
Examples
--------
>>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B'])
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> def square(x) -> ks.Series[np.int32]:
... return x ** 2
>>> df.transform(square)
A B
0 0 1
1 1 4
2 4 9
You can omit the type hint and let Koalas infer its type.
>>> df.transform(lambda x: x ** 2)
A B
0 0 1
1 1 4
2 4 9
For multi-index columns:
>>> df.columns = [('X', 'A'), ('X', 'B')]
>>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
>>> df.transform(lambda x: x ** 2) # doctest: +NORMALIZE_WHITESPACE
X
A B
0 0 1
1 1 4
2 4 9
"""
assert callable(func), "the first argument should be a callable function."
spec = inspect.getfullargspec(func)
return_sig = spec.annotations.get("return", None)
should_infer_schema = return_sig is None
if should_infer_schema:
# Here we execute with the first 1000 to get the return type.
# If the records were less than 1000, it uses pandas API directly for a shortcut.
limit = get_option("compute.shortcut_limit")
pdf = self.head(limit + 1)._to_internal_pandas()
transformed = pdf.transform(func)
kdf = DataFrame(transformed)
if len(pdf) <= limit:
return kdf
applied = []
for input_idx, output_idx in zip(
self._internal.column_index, kdf._internal.column_index):
wrapped = ks.pandas_wraps(
func,
return_col=as_python_type(kdf[output_idx].spark_type))
applied.append(wrapped(self[input_idx]).rename(input_idx))
else:
wrapped = ks.pandas_wraps(func)
applied = []
for idx in self._internal.column_index:
applied.append(wrapped(self[idx]).rename(idx))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
Also support for MultiIndex
>>> df = ks.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df
a b
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('a')
name class
0 falcon bird
1 parrot bird
2 lion mammal
3 monkey mammal
>>> df
b
max_speed
0 389.0
1 24.0
2 80.5
3 NaN
"""
result = self[item]
self._internal = self.drop(item)._internal
return result
# TODO: add axis parameter can work when '1' or 'columns'
def xs(self, key, axis=0, level=None):
"""
Return cross-section from the DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : 0 or 'index', default 0
Axis to retrieve cross-section on.
currently only support 0 or 'index'
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
Returns
-------
DataFrame
Cross-section from the original DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = ks.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE
num_legs num_wings
class locomotion
mammal walks 4 0
"""
from databricks.koalas.series import _col
if not isinstance(key, (str, tuple)):
raise ValueError("'key' should be string or tuple that contains strings")
if not all(isinstance(index, str) for index in key):
raise ValueError("'key' should have index names as only strings "
"or a tuple that contain index names as only strings")
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
if isinstance(key, str):
key = (key,)
if len(key) > len(self._internal.index_scols):
raise KeyError("Key length ({}) exceeds index depth ({})"
.format(len(key), len(self._internal.index_scols)))
if level is None:
level = 0
scols = self._internal.scols[:level] + self._internal.scols[level+len(key):]
rows = [self._internal.scols[lvl] == index
for lvl, index in enumerate(key, level)]
sdf = self._sdf.select(scols) \
.where(reduce(lambda x, y: x & y, rows))
if len(key) == len(self._internal.index_scols):
result = _col(DataFrame(_InternalFrame(sdf=sdf)).T)
result.name = key
else:
internal = self._internal.copy(
sdf=sdf,
index_map=self._internal.index_map[:level] +
self._internal.index_map[level+len(key):])
result = DataFrame(internal)
return result
@property
def index(self):
"""The index (row labels) Column of the DataFrame.
Currently not supported when the DataFrame has no index.
See Also
--------
Index
"""
from databricks.koalas.indexes import Index, MultiIndex
if len(self._internal.index_map) == 1:
return Index(self)
else:
return MultiIndex(self)
@property
def empty(self):
"""
Returns true if the current DataFrame is empty. Otherwise, returns false.
Examples
--------
>>> ks.range(10).empty
False
>>> ks.range(0).empty
True
>>> ks.DataFrame({}, index=list('abc')).empty
True
"""
return len(self._internal.column_index) == 0 or self._sdf.rdd.isEmpty()
@property
def style(self):
"""
Property returning a Styler object containing methods for
building a styled HTML representation fo the DataFrame.
.. note:: currently it collects top 1000 rows and return its
pandas `pandas.io.formats.style.Styler` instance.
Examples
--------
>>> ks.range(1001).style # doctest: +ELLIPSIS
<pandas.io.formats.style.Styler object at ...>
"""
max_results = get_option('compute.max_rows')
pdf = self.head(max_results + 1).to_pandas()
if len(pdf) > max_results:
warnings.warn(
"'style' property will only use top %s rows." % max_results, UserWarning)
return pdf.head(max_results).style
def set_index(self, keys, drop=True, append=False, inplace=False):
"""Set the DataFrame index (row labels) using one or more existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index` and ``np.ndarray``.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
Examples
--------
>>> df = ks.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]},
... columns=['month', 'year', 'sale'])
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
"""
if isinstance(keys, (str, tuple)):
keys = [keys]
else:
keys = list(keys)
columns = set(self.columns)
for key in keys:
if key not in columns:
raise KeyError(key)
keys = [key if isinstance(key, tuple) else (key,) for key in keys]
if drop:
column_index = [idx for idx in self._internal.column_index if idx not in keys]
else:
column_index = self._internal.column_index
if append:
index_map = self._internal.index_map + [(self._internal.column_name_for(idx), idx)
for idx in keys]
else:
index_map = [(self._internal.column_name_for(idx), idx) for idx in keys]
internal = self._internal.copy(index_map=index_map,
column_index=column_index,
data_columns=[self._internal.column_name_for(idx)
for idx in column_index])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=''):
"""Reset the index, or a level of it.
For DataFrame with multi-level index, return new DataFrame with labeling information in
the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None.
For a standard index, the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame
DataFrame with the new index.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
Examples
--------
>>> df = ks.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column. Unlike pandas, Koalas
does not automatically add a sequential index. The following 0, 1, 2, 3 are only
there when we display the DataFrame.
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = ks.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1,
... col_fill='species') # doctest: +NORMALIZE_WHITESPACE
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1,
... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
multi_index = len(self._internal.index_map) > 1
def rename(index):
if multi_index:
return ('level_{}'.format(index),)
else:
if ('index',) not in self._internal.column_index:
return ('index',)
else:
return ('level_{}'.format(index),)
if level is None:
new_index_map = [(column, name if name is not None else rename(i))
for i, (column, name) in enumerate(self._internal.index_map)]
index_map = []
else:
if isinstance(level, (int, str)):
level = [level]
level = list(level)
if all(isinstance(l, int) for l in level):
for lev in level:
if lev >= len(self._internal.index_map):
raise IndexError('Too many levels: Index has only {} level, not {}'
.format(len(self._internal.index_map), lev + 1))
idx = level
elif all(isinstance(lev, str) for lev in level):
idx = []
for l in level:
try:
i = self._internal.index_columns.index(l)
idx.append(i)
except ValueError:
if multi_index:
raise KeyError('Level unknown not found')
else:
raise KeyError('Level unknown must be same as name ({})'
.format(self._internal.index_columns[0]))
else:
raise ValueError('Level should be all int or all string.')
idx.sort()
new_index_map = []
index_map = self._internal.index_map.copy()
for i in idx:
info = self._internal.index_map[i]
index_column, index_name = info
new_index_map.append(
(index_column,
index_name if index_name is not None else rename(i)))
index_map.remove(info)
new_data_scols = [
self._internal.scol_for(column).alias(str(name)) for column, name in new_index_map]
if len(index_map) > 0:
index_scols = [scol_for(self._sdf, column) for column, _ in index_map]
sdf = self._sdf.select(
index_scols + new_data_scols + self._internal.data_scols)
else:
sdf = self._sdf.select(new_data_scols + self._internal.data_scols)
# Now, new internal Spark columns are named as same as index name.
new_index_map = [(column, name) for column, name in new_index_map]
index_map = [(SPARK_INDEX_NAME_FORMAT(0), None)]
sdf = _InternalFrame.attach_default_index(sdf)
if drop:
new_index_map = []
internal = self._internal.copy(
sdf=sdf,
data_columns=[str(name) for _, name in new_index_map] + self._internal.data_columns,
index_map=index_map,
column_index=None)
if self._internal.column_index_level > 1:
column_depth = len(self._internal.column_index[0])
if col_level >= column_depth:
raise IndexError('Too many levels: Index has only {} levels, not {}'
.format(column_depth, col_level + 1))
if any(col_level + len(name) > column_depth for _, name in new_index_map):
raise ValueError('Item must have length equal to number of levels.')
columns = pd.MultiIndex.from_tuples(
[tuple(([col_fill] * col_level)
+ list(name)
+ ([col_fill] * (column_depth - (len(name) + col_level))))
for _, name in new_index_map]
+ self._internal.column_index)
else:
columns = [name for _, name in new_index_map] + self._internal.column_index
if inplace:
self._internal = internal
self.columns = columns
else:
kdf = DataFrame(internal)
kdf.columns = columns
return kdf
def isnull(self):
"""
Detects missing values for items in the current Dataframe.
Return a boolean same-sized Dataframe indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values.
See Also
--------
Dataframe.notnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.isnull()
0 1
0 False False
1 False True
2 False True
3 False False
>>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']])
>>> df.isnull()
0 1 2
0 True False True
1 False True False
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.isnull()
return kdf
isna = isnull
def notnull(self):
"""
Detects non-missing values for items in the current Dataframe.
This function takes a dataframe and indicates whether it's
values are valid (not missing, which is ``NaN`` in numeric
datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike).
See Also
--------
Dataframe.isnull
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)])
>>> df.notnull()
0 1
0 True True
1 True False
2 True False
3 True True
>>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df.notnull()
0 1 2
0 True True True
1 True False True
"""
kdf = self.copy()
for name, ks in kdf.iteritems():
kdf[name] = ks.notnull()
return kdf
notna = notnull
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None):
"""
Shift DataFrame by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input DataFrame, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].shift(periods, fill_value))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: axis should support 1 or 'columns' either at this moment
def diff(self, periods: int = 1, axis: Union[int, str] = 0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another element in the
DataFrame (default is the element in the same column of the previous row).
.. note:: the current implementation of diff uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative values.
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
diffed : DataFrame
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].diff(periods))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: axis should support 1 or 'columns' either at this moment
def nunique(self, axis: Union[int, str] = 0, dropna: bool = True, approx: bool = False,
rsd: float = 0.05) -> pd.Series:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
axis : int, default 0 or 'index'
Can only be set to 0 at the moment.
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
The number of unique values per column as a pandas Series.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]})
>>> df.nunique()
A 3
B 1
Name: 0, dtype: int64
>>> df.nunique(dropna=False)
A 3
B 2
Name: 0, dtype: int64
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> df.nunique(approx=True)
A 3
B 1
Name: 0, dtype: int64
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
res = self._sdf.select([self[column]._nunique(dropna, approx, rsd)
for column in self.columns])
return res.toPandas().T.iloc[:, 0]
def round(self, decimals=0):
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
Returns
-------
DataFrame
See Also
--------
Series.round
Examples
--------
>>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076],
... 'B':[0.992815, 0.645646, 0.149370],
... 'C':[0.173891, 0.577595, 0.491027]},
... columns=['A', 'B', 'C'],
... index=['first', 'second', 'third'])
>>> df
A B C
first 0.028208 0.992815 0.173891
second 0.038683 0.645646 0.577595
third 0.877076 0.149370 0.491027
>>> df.round(2)
A B C
first 0.03 0.99 0.17
second 0.04 0.65 0.58
third 0.88 0.15 0.49
>>> df.round({'A': 1, 'C': 2})
A B C
first 0.0 0.992815 0.17
second 0.0 0.645646 0.58
third 0.9 0.149370 0.49
>>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C'])
>>> df.round(decimals)
A B C
first 0.0 1.0 0.17
second 0.0 1.0 0.58
third 0.9 0.0 0.49
"""
if isinstance(decimals, ks.Series):
decimals_list = [(k if isinstance(k, tuple) else (k,), v)
for k, v in decimals._to_internal_pandas().items()]
elif isinstance(decimals, dict):
decimals_list = [(k if isinstance(k, tuple) else (k,), v)
for k, v in decimals.items()]
elif isinstance(decimals, int):
decimals_list = [(k, decimals) for k in self._internal.column_index]
else:
raise ValueError("decimals must be an integer, a dict-like or a Series")
sdf = self._sdf
for idx, decimal in decimals_list:
if idx in self._internal.column_index:
col = self._internal.column_name_for(idx)
sdf = sdf.withColumn(col, F.round(scol_for(sdf, col), decimal))
return DataFrame(self._internal.copy(sdf=sdf))
def duplicated(self, subset=None, keep='first'):
"""
Return boolean Series denoting duplicate rows, optionally only considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns
keep : {'first', 'last', False}, default 'first'
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
duplicated : Series
Examples
--------
>>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]},
... columns = ['a', 'b', 'c'])
>>> df
a b c
0 1 1 1
1 1 1 1
2 1 1 1
3 3 4 5
>>> df.duplicated().sort_index()
0 False
1 True
2 True
3 False
Name: 0, dtype: bool
Mark duplicates as ``True`` except for the last occurrence.
>>> df.duplicated(keep='last').sort_index()
0 True
1 True
2 False
3 False
Name: 0, dtype: bool
Mark all duplicates as ``True``.
>>> df.duplicated(keep=False).sort_index()
0 True
1 True
2 True
3 False
Name: 0, dtype: bool
"""
from databricks.koalas.series import _col
if len(self._internal.index_names) > 1:
raise ValueError("Now we don't support multi-index Now.")
if subset is None:
subset = self._internal.column_index
else:
if isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
diff = set(subset).difference(set(self._internal.column_index))
if len(diff) > 0:
raise KeyError(', '.join([str(d) if len(d) > 1 else d[0] for d in diff]))
group_cols = [self._internal.column_name_for(idx) for idx in subset]
index_column = self._internal.index_columns[0]
if self._internal.index_names[0] is not None:
name = self._internal.index_names[0]
else:
name = ('0',)
column = str(name) if len(name) > 1 else name[0]
sdf = self._sdf
if column == index_column:
index_column = SPARK_INDEX_NAME_FORMAT(0)
sdf = sdf.select([self._internal.index_scols[0].alias(index_column)]
+ self._internal.data_scols)
if keep == 'first' or keep == 'last':
if keep == 'first':
ord_func = spark.functions.asc
else:
ord_func = spark.functions.desc
window = Window.partitionBy(group_cols).orderBy(ord_func(index_column)).rowsBetween(
Window.unboundedPreceding, Window.currentRow)
sdf = sdf.withColumn(column, F.row_number().over(window) > 1)
elif not keep:
window = Window.partitionBy(group_cols).orderBy(scol_for(sdf, index_column).desc())\
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
sdf = sdf.withColumn(column, F.count(scol_for(sdf, index_column)).over(window) > 1)
else:
raise ValueError("'keep' only support 'first', 'last' and False")
return _col(DataFrame(_InternalFrame(sdf=sdf.select(scol_for(sdf, index_column),
scol_for(sdf, column)),
data_columns=[column],
column_index=[name],
index_map=[(index_column,
self._internal.index_names[0])])))
def to_koalas(self, index_col: Optional[Union[str, List[str]]] = None):
"""
Converts the existing DataFrame into a Koalas DataFrame.
This method is monkey-patched into Spark's DataFrame and can be used
to convert a Spark DataFrame into a Koalas DataFrame. If running on
an existing Koalas DataFrame, the method returns itself.
If a Koalas DataFrame is converted to a Spark DataFrame and then back
to Koalas, it will lose the index information and the original index
will be turned into a normal column.
Parameters
----------
index_col: str or list of str, optional, default: None
Index column of table in Spark.
See Also
--------
DataFrame.to_spark
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2'])
>>> df
col1 col2
0 1 3
1 2 4
>>> spark_df = df.to_spark()
>>> spark_df
DataFrame[col1: bigint, col2: bigint]
>>> kdf = spark_df.to_koalas()
>>> kdf
col1 col2
0 1 3
1 2 4
We can specify the index columns.
>>> kdf = spark_df.to_koalas(index_col='col1')
>>> kdf # doctest: +NORMALIZE_WHITESPACE
col2
col1
1 3
2 4
Calling to_koalas on a Koalas DataFrame simply returns itself.
>>> df.to_koalas()
col1 col2
0 1 3
1 2 4
"""
if isinstance(self, DataFrame):
return self
else:
assert isinstance(self, spark.DataFrame), type(self)
from databricks.koalas.namespace import _get_index_map
index_map = _get_index_map(self, index_col)
internal = _InternalFrame(sdf=self, index_map=index_map)
return DataFrame(internal)
def cache(self):
"""
Yields and caches the current DataFrame.
The Koalas DataFrame is yielded as a protected resource and its corresponding
data is cached which gets uncached after execution goes of the context.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
>>> with df.cache() as cached_df:
... print(cached_df.count())
...
dogs 4
cats 4
dtype: int64
>>> df = df.cache()
>>> df.to_pandas().mean(axis=1)
0 0.25
1 0.30
2 0.30
3 0.15
dtype: float64
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
return _CachedDataFrame(self._internal)
def to_table(self, name: str, format: Optional[str] = None, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
**options):
"""
Write the DataFrame into a Spark table.
Parameters
----------
name : str, required
Table name in Spark.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the table exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options
Additional options passed directly to Spark.
See Also
--------
read_table
DataFrame.to_spark_io
DataFrame.to_parquet
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_table('%s.my_table' % db, partition_cols='date')
"""
self.to_spark().write.saveAsTable(name=name, format=format, mode=mode,
partitionBy=partition_cols, options=options)
def to_delta(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None, **options):
"""
Write the DataFrame out as a Delta Lake table.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
options : dict
All other options passed directly into Delta Lake.
See Also
--------
read_delta
DataFrame.to_parquet
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
Create a new Delta Lake table, partitioned by one column:
>>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date')
Partitioned by two columns:
>>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country'])
Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta:
>>> df.to_delta('%s/to_delta/bar' % path,
... mode='overwrite', replaceWhere='date >= "2019-01-01"')
"""
self.to_spark_io(
path=path, mode=mode, format="delta", partition_cols=partition_cols, options=options)
def to_parquet(self, path: str, mode: str = 'error',
partition_cols: Union[str, List[str], None] = None,
compression: Optional[str] = None):
"""
Write the DataFrame out as a Parquet file or directory.
Parameters
----------
path : str, required
Path to write to.
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when the destination exists already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional, default None
Names of partitioning columns
compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'}
Compression codec to use when saving to file. If None is set, it uses the
value specified in `spark.sql.parquet.compression.codec`.
See Also
--------
read_parquet
DataFrame.to_delta
DataFrame.to_table
DataFrame.to_spark_io
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date')
>>> df.to_parquet(
... '%s/to_parquet/foo.parquet' % path,
... mode = 'overwrite',
... partition_cols=['date', 'country'])
"""
self.to_spark().write.parquet(
path=path, mode=mode, partitionBy=partition_cols, compression=compression)
def to_spark_io(self, path: Optional[str] = None, format: Optional[str] = None,
mode: str = 'error', partition_cols: Union[str, List[str], None] = None,
**options):
"""Write the DataFrame out to a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'error'.
Specifies the behavior of the save operation when data already.
- 'append': Append the new data to existing data.
- 'overwrite': Overwrite existing data.
- 'ignore': Silently ignore this operation if data already exists.
- 'error' or 'errorifexists': Throw an exception if data already exists.
partition_cols : str or list of str, optional
Names of partitioning columns
options : dict
All other options passed directly into Spark's data source.
See Also
--------
read_spark_io
DataFrame.to_delta
DataFrame.to_parquet
DataFrame.to_table
Examples
--------
>>> df = ks.DataFrame(dict(
... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')),
... country=['KR', 'US', 'JP'],
... code=[1, 2 ,3]), columns=['date', 'country', 'code'])
>>> df
date country code
0 2012-01-31 12:00:00 KR 1
1 2012-02-29 12:00:00 US 2
2 2012-03-31 12:00:00 JP 3
>>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json')
"""
self.to_spark().write.save(
path=path, format=format, mode=mode, partitionBy=partition_cols, options=options)
def to_spark(self, index_col: Optional[Union[str, List[str]]] = None):
"""
Return the current DataFrame as a Spark DataFrame.
Parameters
----------
index_col: str or list of str, optional, default: None
Column names to be used in Spark to represent Koalas' index. The index name
in Koalas is ignored. By default, the index is always lost.
See Also
--------
DataFrame.to_koalas
Examples
--------
By default, this method loses the index as below.
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
>>> df.to_spark().show() # doctest: +NORMALIZE_WHITESPACE
+---+---+---+
| a| b| c|
+---+---+---+
| 1| 4| 7|
| 2| 5| 8|
| 3| 6| 9|
+---+---+---+
If `index_col` is set, it keeps the index column as specified.
>>> df.to_spark(index_col="index").show() # doctest: +NORMALIZE_WHITESPACE
+-----+---+---+---+
|index| a| b| c|
+-----+---+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-----+---+---+---+
Keeping index column is useful when you want to call some Spark APIs and
convert it back to Koalas DataFrame without creating a default index, which
can affect performance.
>>> spark_df = df.to_spark(index_col="index")
>>> spark_df = spark_df.filter("a == 2")
>>> spark_df.to_koalas(index_col="index") # doctest: +NORMALIZE_WHITESPACE
a b c
index
1 2 5 8
In case of multi-index, specify a list to `index_col`.
>>> new_df = df.set_index("a", append=True)
>>> new_spark_df = new_df.to_spark(index_col=["index_1", "index_2"])
>>> new_spark_df.show() # doctest: +NORMALIZE_WHITESPACE
+-------+-------+---+---+
|index_1|index_2| b| c|
+-------+-------+---+---+
| 0| 1| 4| 7|
| 1| 2| 5| 8|
| 2| 3| 6| 9|
+-------+-------+---+---+
Likewise, can be converted to back to Koalas DataFrame.
>>> new_spark_df.to_koalas(
... index_col=["index_1", "index_2"]) # doctest: +NORMALIZE_WHITESPACE
b c
index_1 index_2
0 1 4 7
1 2 5 8
2 3 6 9
"""
if index_col is None:
return self._internal.spark_df
else:
if isinstance(index_col, str):
index_col = [index_col]
data_column_names = []
data_columns = []
data_columns_column_index = \
zip(self._internal._data_columns, self._internal.column_index)
# TODO: this code is similar with _InternalFrame.spark_df. Might have to deduplicate.
for i, (column, idx) in enumerate(data_columns_column_index):
scol = self._internal.scol_for(idx)
name = str(i) if idx is None else str(idx) if len(idx) > 1 else idx[0]
data_column_names.append(name)
if column != name:
scol = scol.alias(name)
data_columns.append(scol)
old_index_scols = self._internal.index_scols
if len(index_col) != len(old_index_scols):
raise ValueError(
"length of index columns is %s; however, the length of the given "
"'index_col' is %s." % (len(old_index_scols), len(index_col)))
if any(col in data_column_names for col in index_col):
raise ValueError(
"'index_col' cannot be overlapped with other columns.")
sdf = self._internal.spark_internal_df
new_index_scols = [
index_scol.alias(col) for index_scol, col in zip(old_index_scols, index_col)]
return sdf.select(new_index_scols + data_columns)
def to_pandas(self):
"""
Return a pandas DataFrame.
.. note:: This method should only be used if the resulting Pandas DataFrame is expected
to be small, as all the data is loaded into the driver's memory.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.to_pandas()
dogs cats
0 0.2 0.3
1 0.0 0.6
2 0.6 0.0
3 0.2 0.1
"""
return self._internal.pandas_df.copy()
# Alias to maintain backward compatibility with Spark
toPandas = to_pandas
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though Koalas doesn't check it).
If the values are not callable, (e.g. a Series or a literal),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Examples
--------
>>> df = ks.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence and you can also
create multiple columns within the same assign.
>>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32,
... temp_k=df['temp_c'] + 273.15)
>>> assigned[['temp_c', 'temp_f', 'temp_k']]
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
Notes
-----
Assigning multiple columns within the same ``assign`` is possible
but you cannot refer to newly created or modified columns. This
feature is supported in pandas for Python 3.6 and later but not in
Koalas. In Koalas, all items are computed first, and then assigned.
"""
return self._assign(kwargs)
def _assign(self, kwargs):
assert isinstance(kwargs, dict)
from databricks.koalas.series import Series
for k, v in kwargs.items():
if not (isinstance(v, (Series, spark.Column)) or
callable(v) or pd.api.types.is_scalar(v)):
raise TypeError("Column assignment doesn't support type "
"{0}".format(type(v).__name__))
if callable(v):
kwargs[k] = v(self)
pairs = {(k if isinstance(k, tuple) else (k,)):
(v._scol if isinstance(v, Series)
else v if isinstance(v, spark.Column)
else F.lit(v))
for k, v in kwargs.items()}
scols = []
for idx in self._internal.column_index:
for i in range(len(idx)):
if idx[:len(idx)-i] in pairs:
name = self._internal.column_name_for(idx)
scol = pairs[idx[:len(idx)-i]].alias(name)
break
else:
scol = self._internal.scol_for(idx)
scols.append(scol)
adding_data_columns = []
adding_column_index = []
for idx, scol in pairs.items():
if idx not in set(i[:len(idx)] for i in self._internal.column_index):
name = str(idx) if len(idx) > 1 else idx[0]
scols.append(scol.alias(name))
adding_data_columns.append(name)
adding_column_index.append(idx)
sdf = self._sdf.select(self._internal.index_scols + scols)
level = self._internal.column_index_level
adding_column_index = [tuple(list(idx) + ([''] * (level - len(idx))))
for idx in adding_column_index]
internal = self._internal.copy(
sdf=sdf,
data_columns=(self._internal.data_columns + adding_data_columns),
column_index=(self._internal.column_index + adding_column_index))
return DataFrame(internal)
@staticmethod
def from_records(data: Union[np.array, List[tuple], dict, pd.DataFrame],
index: Union[str, list, np.array] = None, exclude: list = None,
columns: list = None, coerce_float: bool = False, nrows: int = None) \
-> 'DataFrame':
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names associated with them, this
argument provides names for the columns. Otherwise this argument indicates the order of
the columns in the result (any names not found in the data will become all-NA columns)
coerce_float : boolean, default False
Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to
floating point, useful for SQL result sets
nrows : int, default None
Number of rows to read if data is an iterator
Returns
-------
df : DataFrame
Examples
--------
Use dict as input
>>> ks.DataFrame.from_records({'A': [1, 2, 3]})
A
0 1
1 2
2 3
Use list of tuples as input
>>> ks.DataFrame.from_records([(1, 2), (3, 4)])
0 1
0 1 2
1 3 4
Use NumPy array as input
>>> ks.DataFrame.from_records(np.eye(3))
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
return DataFrame(pd.DataFrame.from_records(data, index, exclude, columns, coerce_float,
nrows))
def to_records(self, index=True, convert_datetime64=None,
column_dtypes=None, index_dtypes=None):
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
.. note:: This method should only be used if the resulting NumPy ndarray is
expected to be small, as all the data is loaded into the driver's memory.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
convert_datetime64 : bool, default None
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records() # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False) # doctest: +SKIP
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Specification of dtype for columns is new in Pandas 0.24.0.
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')])
Specification of dtype for index is new in Pandas 0.24.0.
Data types can also be specified for the index:
>>> df.to_records(index_dtypes="<S2") # doctest: +SKIP
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')])
"""
args = locals()
kdf = self
return validate_arguments_and_invoke_function(
kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args)
def copy(self) -> 'DataFrame':
"""
Make a copy of this object's indices and data.
Returns
-------
copy : DataFrame
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df_copy = df.copy()
>>> df_copy
x y z w
0 1 3 5 7
1 2 4 6 8
"""
return DataFrame(self._internal.copy())
def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False):
"""
Remove missing values.
Parameters
----------
axis : {0 or 'index'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.drop : Drop specified labels from columns.
DataFrame.isnull: Indicate missing values.
DataFrame.notnull : Indicate existing (non-missing) values.
Examples
--------
>>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [None, 'Batmobile', 'Bullwhip'],
... "born": [None, "1940-04-25", None]},
... columns=['name', 'toy', 'born'])
>>> df
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred None None
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip None
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if axis == 0 or axis == 'index':
if subset is not None:
if isinstance(subset, str):
idxes = [(subset,)]
elif isinstance(subset, tuple):
idxes = [subset]
else:
idxes = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
invalids = [idx for idx in idxes
if idx not in self._internal.column_index]
if len(invalids) > 0:
raise KeyError(invalids)
else:
idxes = self._internal.column_index
cnt = reduce(lambda x, y: x + y,
[F.when(self[idx].notna()._scol, 1).otherwise(0)
for idx in idxes],
F.lit(0))
if thresh is not None:
pred = cnt >= F.lit(int(thresh))
elif how == 'any':
pred = cnt == F.lit(len(idxes))
elif how == 'all':
pred = cnt > F.lit(0)
else:
if how is not None:
raise ValueError('invalid how option: {h}'.format(h=how))
else:
raise TypeError('must specify how or thresh')
sdf = self._sdf.filter(pred)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
else:
raise NotImplementedError("dropna currently only works for axis=0 or axis='index'")
# TODO: add 'limit' when value parameter exists
def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None):
"""Fill NA/NaN values.
.. note:: the current implementation of 'method' parameter in fillna uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
value : scalar, dict, Series
Value to use to fill holes. alternately a dict/Series of values
specifying which value to use for each column.
DataFrame is not supported.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series pad / ffill: propagate last valid
observation forward to next valid backfill / bfill:
use NEXT valid observation to fill gap
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 1.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 1.0 4
"""
sdf = self._sdf
if value is not None:
if axis is None:
axis = 0
if not (axis == 0 or axis == "index"):
raise NotImplementedError("fillna currently only works for axis=0 or axis='index'")
if not isinstance(value, (float, int, str, bool, dict, pd.Series)):
raise TypeError("Unsupported type %s" % type(value))
if isinstance(value, pd.Series):
value = value.to_dict()
if isinstance(value, dict):
for v in value.values():
if not isinstance(v, (float, int, str, bool)):
raise TypeError("Unsupported type %s" % type(v))
value = {self._internal.column_name_for(key): value for key, value in value.items()}
if limit is not None:
raise ValueError('limit parameter for value is not support now')
sdf = sdf.fillna(value)
internal = self._internal.copy(sdf=sdf)
else:
if method is None:
raise ValueError("Must specify a fillna 'value' or 'method' parameter.")
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].fillna(value=value, method=method, axis=axis,
inplace=False, limit=limit))
sdf = self._sdf.select(self._internal.index_scols + [col._scol for col in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[col._internal.data_columns[0]
for col in applied],
column_index=[col._internal.column_index[0]
for col in applied])
if inplace:
self._internal = internal
else:
return DataFrame(internal)
# TODO: add 'downcast' when value parameter exists
def bfill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`bfill```.
.. note:: the current implementation of 'bfiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values backward.
>>> df.bfill()
A B C D
0 3.0 2.0 1.0 0
1 3.0 4.0 1.0 1
2 NaN 3.0 1.0 5
3 NaN 3.0 1.0 4
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit)
# TODO: add 'downcast' when value parameter exists
def ffill(self, axis=None, inplace=False, limit=None):
"""
Synonym for `DataFrame.fillna()` with ``method=`ffill```.
.. note:: the current implementation of 'ffiff' uses Spark's Window
without specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
axis : {0 or `index`}
1 and `columns` are not supported.
inplace : boolean, default False
Fill in place (do not create a new object)
limit : int, default None
If method is specified, this is the maximum number of consecutive NaN values to
forward/backward fill. In other words, if there is a gap with more than this number of
consecutive NaNs, it will only be partially filled. If method is not specified,
this is the maximum number of entries along the entire axis where NaNs will be filled.
Must be greater than 0 if not None
Returns
-------
DataFrame
DataFrame with NA entries filled.
Examples
--------
>>> df = ks.DataFrame({
... 'A': [None, 3, None, None],
... 'B': [2, 4, None, 3],
... 'C': [None, None, None, 1],
... 'D': [0, 1, 5, 4]
... },
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 1.0 4
Propagate non-null values forward.
>>> df.ffill()
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 1.0 4
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit)
def replace(self, to_replace=None, value=None, subset=None, inplace=False,
limit=None, regex=False, method='pad'):
"""
Returns a new DataFrame replacing a value with another value.
Parameters
----------
to_replace : int, float, string, or list
Value to be replaced. If the value is a dict, then value is ignored and
to_replace must be a mapping from column name (string) to replacement value.
The value to be replaced must be an int, float, or string.
value : int, float, string, or list
Value to use to replace holes. The replacement value must be an int, float,
or string. If value is a list, value should be of the same length with to_replace.
subset : string, list
Optional list of column names to consider. Columns specified in subset that
do not have matching data type are ignored. For example, if value is a string,
and subset contains a non-string column, then the non-string column is simply ignored.
inplace : boolean, default False
Fill in place (do not create a new object)
Returns
-------
DataFrame
Object after replacement.
Examples
--------
>>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'],
... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']},
... columns=['name', 'weapon'])
>>> df
name weapon
0 Ironman Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
Scalar `to_replace` and `value`
>>> df.replace('Ironman', 'War-Machine')
name weapon
0 War-Machine Mark-45
1 Captain America Shield
2 Thor Mjolnir
3 Hulk Smash
List like `to_replace` and `value`
>>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True)
>>> df
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Mjolnir
3 Hulk Smash
Replacing value by specifying column
>>> df.replace('Mjolnir', 'Stormbuster', subset='weapon')
name weapon
0 Rescue Mark-45
1 Hawkeye Shield
2 Thor Stormbuster
3 Hulk Smash
Dict like `to_replace`
>>> df = ks.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']},
... columns=['A', 'B', 'C'])
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
Notes
-----
One difference between this implementation and pandas is that it is necessary
to specify the column name when you are passing dictionary in `to_replace`
parameter. Calling `replace` on its index such as `df.replace({0: 10, 1: 100})` will
throw an error. Instead specify column-name like `df.replace({'A': {0: 10, 1: 100}})`.
"""
if method != 'pad':
raise NotImplementedError("replace currently works only for method='pad")
if limit is not None:
raise NotImplementedError("replace currently works only when limit=None")
if regex is not False:
raise NotImplementedError("replace currently doesn't supports regex")
if value is not None and not isinstance(value, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(value)))
if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)):
raise TypeError("Unsupported type {}".format(type(to_replace)))
if isinstance(value, list) and isinstance(to_replace, list):
if len(value) != len(to_replace):
raise ValueError('Length of to_replace and value must be same')
# TODO: Do we still need to support this argument?
if subset is None:
subset = self._internal.column_index
elif isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
subset = [self._internal.column_name_for(idx) for idx in subset]
sdf = self._sdf
if isinstance(to_replace, dict) and value is None and \
(not any(isinstance(i, dict) for i in to_replace.values())):
sdf = sdf.replace(to_replace, value, subset)
elif isinstance(to_replace, dict):
for name, replacement in to_replace.items():
if isinstance(name, str):
name = (name,)
df_column = self._internal.column_name_for(name)
if isinstance(replacement, dict):
sdf = sdf.replace(replacement, subset=df_column)
else:
sdf = sdf.withColumn(df_column,
F.when(scol_for(sdf, df_column) == replacement, value)
.otherwise(scol_for(sdf, df_column)))
else:
sdf = sdf.replace(to_replace, value, subset)
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) \
-> 'DataFrame':
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Parameters
----------
lower : float or int, default None
Minimum threshold value. All values below this threshold will be set to it.
upper : float or int, default None
Maximum threshold value. All values above this threshold will be set to it.
Returns
-------
DataFrame
DataFrame with the values outside the clip boundaries replaced.
Examples
--------
>>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3)
A
0 1
1 2
2 3
Notes
-----
One difference between this implementation and pandas is that running
pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported
between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1)
will output the original DataFrame, simply ignoring the incompatible types.
"""
if is_list_like(lower) or is_list_like(upper):
raise ValueError("List-like value are not supported for 'lower' and 'upper' at the " +
"moment")
if lower is None and upper is None:
return self
numeric_types = (DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType,
ShortType)
numeric_columns = [(c, self._internal.scol_for(c)) for c in self.columns
if isinstance(self._internal.spark_type_for(c), numeric_types)]
if lower is not None:
numeric_columns = [(c, F.when(scol < lower, lower).otherwise(scol).alias(c))
for c, scol in numeric_columns]
if upper is not None:
numeric_columns = [(c, F.when(scol > upper, upper).otherwise(scol).alias(c))
for c, scol in numeric_columns]
nonnumeric_columns = [self._internal.scol_for(c) for c in self.columns
if not isinstance(self._internal.spark_type_for(c), numeric_types)]
sdf = self._sdf.select([scol for _, scol in numeric_columns] + nonnumeric_columns)
return ks.DataFrame(sdf)[list(self.columns)]
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
Examples
--------
>>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return DataFrame(self._internal.copy(sdf=self._sdf.limit(n)))
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None):
"""
Create a spreadsheet-style pivot table as a DataFrame. The levels in
the pivot table will be stored in MultiIndex objects (hierarchical
indexes) on the index and columns of the result DataFrame.
Parameters
----------
values : column to aggregate.
They should be either a list less than three or a string.
index : column (string) or list of columns
If an array is passed, it must be the same length as the data.
The list should contain string.
columns : column
Columns used in the pivot operation. Only one column is supported and
it should be a string.
aggfunc : function (string), dict, default mean
If dict is passed, the resulting pivot table will have
columns concatenated by "_" where the first part is the value
of columns and the second part is the column name in values
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
Returns
-------
table : DataFrame
Examples
--------
>>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]},
... columns=['A', 'B', 'C', 'D', 'E'])
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum')
>>> table # doctest: +NORMALIZE_WHITESPACE
C large small
A B
foo one 4.0 1
two NaN 6
bar two 7.0 6
one 4.0 5
We can also fill missing values using the `fill_value` parameter.
>>> table = df.pivot_table(values='D', index=['A', 'B'],
... columns='C', aggfunc='sum', fill_value=0)
>>> table # doctest: +NORMALIZE_WHITESPACE
C large small
A B
foo one 4 1
two 0 6
bar two 7 6
one 4 5
We can also calculate multiple types of aggregations for any given
value column.
>>> table = df.pivot_table(values=['D'], index =['C'],
... columns="A", aggfunc={'D': 'mean'})
>>> table # doctest: +NORMALIZE_WHITESPACE
D
A bar foo
C
small 5.5 2.333333
large 5.5 2.000000
The next example aggregates on multiple values.
>>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'],
... aggfunc={'D': 'mean', 'E': 'sum'})
>>> table # doctest: +NORMALIZE_WHITESPACE
D E
A bar foo bar foo
C
small 5.5 2.333333 17 13
large 5.5 2.000000 15 9
"""
if not isinstance(columns, (str, tuple)):
raise ValueError("columns should be string or tuple.")
if not isinstance(values, (str, tuple)) and not isinstance(values, list):
raise ValueError('values should be string or list of one column.')
if not isinstance(aggfunc, str) and \
(not isinstance(aggfunc, dict) or
not all(isinstance(key, (str, tuple)) and isinstance(value, str)
for key, value in aggfunc.items())):
raise ValueError("aggfunc must be a dict mapping from column name (string or tuple) "
"to aggregate functions (string).")
if isinstance(aggfunc, dict) and index is None:
raise NotImplementedError("pivot_table doesn't support aggfunc"
" as dict and without index.")
if isinstance(values, list) and index is None:
raise NotImplementedError("values can't be a list without index.")
if columns not in self.columns:
raise ValueError("Wrong columns {}.".format(columns))
if isinstance(values, list):
values = [col if isinstance(col, tuple) else (col,) for col in values]
if not all(isinstance(self._internal.spark_type_for(col), NumericType)
for col in values):
raise TypeError('values should be a numeric type.')
else:
values = values if isinstance(values, tuple) else (values,)
if not isinstance(self._internal.spark_type_for(values), NumericType):
raise TypeError('values should be a numeric type.')
if isinstance(aggfunc, str):
if isinstance(values, list):
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(value), aggfunc))
for value in values]
else:
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(values), aggfunc))]
elif isinstance(aggfunc, dict):
aggfunc = {key if isinstance(key, tuple) else (key,): value
for key, value in aggfunc.items()}
agg_cols = [F.expr('{1}(`{0}`) as `{0}`'
.format(self._internal.column_name_for(key), value))
for key, value in aggfunc.items()]
agg_columns = [key for key, _ in aggfunc.items()]
if set(agg_columns) != set(values):
raise ValueError("Columns in aggfunc must be the same as values.")
if index is None:
sdf = self._sdf.groupBy() \
.pivot(pivot_col=self._internal.column_name_for(columns)).agg(*agg_cols)
elif isinstance(index, list):
index = [idx if isinstance(idx, tuple) else (idx,) for idx in index]
sdf = self._sdf.groupBy([self._internal.scol_for(idx) for idx in index]) \
.pivot(pivot_col=self._internal.column_name_for(columns)).agg(*agg_cols)
else:
raise ValueError("index should be a None or a list of columns.")
if fill_value is not None and isinstance(fill_value, (int, float)):
sdf = sdf.fillna(fill_value)
if index is not None:
if isinstance(values, list):
index_columns = [self._internal.column_name_for(idx) for idx in index]
data_columns = [column for column in sdf.columns if column not in index_columns]
if len(values) > 1:
# If we have two values, Spark will return column's name
# in this format: column_values, where column contains
# their values in the DataFrame and values is
# the column list passed to the pivot_table().
# E.g. if column is b and values is ['b','e'],
# then ['2_b', '2_e', '3_b', '3_e'].
# We sort the columns of Spark DataFrame by values.
data_columns.sort(key=lambda x: x.split('_', 1)[1])
sdf = sdf.select(index_columns + data_columns)
column_name_to_index = dict(zip(self._internal.data_columns,
self._internal.column_index))
column_index = [tuple(list(column_name_to_index[name.split('_')[1]])
+ [name.split('_')[0]])
for name in data_columns]
index_map = list(zip(index_columns, index))
column_index_names = (([None] * column_index_level(values))
+ [str(columns) if len(columns) > 1 else columns[0]])
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
kdf = DataFrame(internal)
else:
column_index = [tuple(list(values[0]) + [column]) for column in data_columns]
index_map = list(zip(index_columns, index))
column_index_names = (([None] * len(values[0]))
+ [str(columns) if len(columns) > 1 else columns[0]])
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
kdf = DataFrame(internal)
return kdf
else:
index_columns = [self._internal.column_name_for(idx) for idx in index]
index_map = list(zip(index_columns, index))
data_columns = [column for column in sdf.columns if column not in index_columns]
column_index_names = [str(columns) if len(columns) > 1 else columns[0]]
internal = _InternalFrame(sdf=sdf,
index_map=index_map, data_columns=data_columns,
column_index_names=column_index_names)
return DataFrame(internal)
else:
if isinstance(values, list):
index_values = values[-1]
else:
index_values = values
index_map = []
for i, index_value in enumerate(index_values):
colname = SPARK_INDEX_NAME_FORMAT(i)
sdf = sdf.withColumn(colname, F.lit(index_value))
index_map.append((colname, None))
column_index_names = [str(columns) if len(columns) > 1 else columns[0]]
internal = _InternalFrame(sdf=sdf,
index_map=index_map,
column_index_names=column_index_names)
return DataFrame(internal)
def pivot(self, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation.
Parameters
----------
index : string, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : string
Column to use to make new frame's columns.
values : string, object or a list of the previous
Column(s) to use for populating new frame's values.
Returns
-------
DataFrame
Returns reshaped DataFrame.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
Examples
--------
>>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']},
... columns=['foo', 'bar', 'baz', 'zoo'])
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE
bar A B C
0 1.0 NaN NaN
1 NaN 2.0 NaN
2 NaN NaN 3.0
3 4.0 NaN NaN
4 NaN 5.0 NaN
5 NaN NaN 6.0
Notice that, unlike pandas raises an ValueError when duplicated values are found,
Koalas' pivot still works with its first value it meets during operation because pivot
is an expensive operation and it is preferred to permissively execute over failing fast
when processing large data.
>>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz'])
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
>>> df.pivot(index='foo', columns='bar', values='baz').sort_index()
... # doctest: +NORMALIZE_WHITESPACE
bar A B C
foo
one 1.0 NaN NaN
two NaN 3.0 4.0
"""
if columns is None:
raise ValueError("columns should be set.")
if values is None:
raise ValueError("values should be set.")
should_use_existing_index = index is not None
if should_use_existing_index:
df = self
index = [index]
else:
df = self.copy()
df['__DUMMY__'] = F.monotonically_increasing_id()
df.set_index('__DUMMY__', append=True, inplace=True)
df.reset_index(level=range(len(df._internal.index_map) - 1), inplace=True)
index = df._internal.column_index[:len(df._internal.index_map)]
df = df.pivot_table(
index=index, columns=columns, values=values, aggfunc='first')
if should_use_existing_index:
return df
else:
index_columns = df._internal.index_columns
# Note that the existing indexing column won't exist in the pivoted DataFrame.
internal = df._internal.copy(
index_map=[(index_column, None) for index_column in index_columns])
return DataFrame(internal)
@property
def columns(self):
"""The column labels of the DataFrame."""
if self._internal.column_index_level > 1:
columns = pd.MultiIndex.from_tuples(self._internal.column_index)
else:
columns = pd.Index([idx[0] for idx in self._internal.column_index])
if self._internal.column_index_names is not None:
columns.names = self._internal.column_index_names
return columns
@columns.setter
def columns(self, columns):
if isinstance(columns, pd.MultiIndex):
column_index = columns.tolist()
old_names = self._internal.column_index
if len(old_names) != len(column_index):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(column_index)))
column_index_names = columns.names
data_columns = [str(idx) if len(idx) > 1 else idx[0] for idx in column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
self._internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
else:
old_names = self._internal.column_index
if len(old_names) != len(columns):
raise ValueError(
"Length mismatch: Expected axis has %d elements, new values have %d elements"
% (len(old_names), len(columns)))
column_index = [col if isinstance(col, tuple) else (col,) for col in columns]
if isinstance(columns, pd.Index):
column_index_names = columns.names
else:
column_index_names = None
data_columns = [str(idx) if len(idx) > 1 else idx[0] for idx in column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
self._internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index,
column_index_names=column_index_names)
@property
def dtypes(self):
"""Return the dtypes in the DataFrame.
This returns a Series with the data type of each column. The result's index is the original
DataFrame's columns. Columns with mixed types are stored with the object dtype.
Returns
-------
pd.Series
The data type of each column.
Examples
--------
>>> df = ks.DataFrame({'a': list('abc'),
... 'b': list(range(1, 4)),
... 'c': np.arange(3, 6).astype('i1'),
... 'd': np.arange(4.0, 7.0, dtype='float64'),
... 'e': [True, False, True],
... 'f': pd.date_range('20130101', periods=3)},
... columns=['a', 'b', 'c', 'd', 'e', 'f'])
>>> df.dtypes
a object
b int64
c int8
d float64
e bool
f datetime64[ns]
dtype: object
"""
return pd.Series([self[idx].dtype for idx in self._internal.column_index],
index=pd.Index([idx if len(idx) > 1 else idx[0]
for idx in self._internal.column_index]))
def select_dtypes(self, include=None, exclude=None):
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied. It also takes Spark SQL
DDL type strings, for instance, 'string' and 'date'.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes()
Traceback (most recent call last):
...
ValueError: at least one of include or exclude must be nonempty
* If ``include`` and ``exclude`` have overlapping elements
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df.select_dtypes(include='a', exclude='a')
Traceback (most recent call last):
...
TypeError: string dtypes are not allowed, use 'object' instead
Notes
-----
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3,
... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd'])
>>> df
a b c d
0 1 True 1.0 a
1 2 False 2.0 b
2 1 True 1.0 a
3 2 False 2.0 b
4 1 True 1.0 a
5 2 False 2.0 b
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'], exclude=['int'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c d
0 True 1.0 a
1 False 2.0 b
2 True 1.0 a
3 False 2.0 b
4 True 1.0 a
5 False 2.0 b
Spark SQL DDL type strings can be used as well.
>>> df.select_dtypes(exclude=['string'])
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
"""
from pyspark.sql.types import _parse_datatype_string
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
if not any((include, exclude)):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# can't both include AND exclude!
if set(include).intersection(set(exclude)):
raise ValueError('include and exclude overlap on {inc_ex}'.format(
inc_ex=set(include).intersection(set(exclude))))
# Handle Spark types
include_spark_type = []
for inc in include:
try:
include_spark_type.append(_parse_datatype_string(inc))
except:
pass
exclude_spark_type = []
for exc in exclude:
try:
exclude_spark_type.append(_parse_datatype_string(exc))
except:
pass
# Handle Pandas types
include_numpy_type = []
for inc in include:
try:
include_numpy_type.append(infer_dtype_from_object(inc))
except:
pass
exclude_numpy_type = []
for exc in exclude:
try:
exclude_numpy_type.append(infer_dtype_from_object(exc))
except:
pass
columns = []
column_index = []
for idx in self._internal.column_index:
if len(include) > 0:
should_include = (
infer_dtype_from_object(self[idx].dtype.name) in include_numpy_type or
self._internal.spark_type_for(idx) in include_spark_type)
else:
should_include = not (
infer_dtype_from_object(self[idx].dtype.name) in exclude_numpy_type or
self._internal.spark_type_for(idx) in exclude_spark_type)
if should_include:
columns.append(self._internal.column_name_for(idx))
column_index.append(idx)
return DataFrame(self._internal.copy(
sdf=self._sdf.select(self._internal.index_scols +
[self._internal.scol_for(col) for col in columns]),
data_columns=columns, column_index=column_index))
def count(self, axis=None):
"""
Count non-NA cells for each column.
The values `None`, `NaN` are considered NA.
Parameters
----------
axis : {0 or ‘index’, 1 or ‘columns’}, default 0
If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are
generated for each row.
Returns
-------
pandas.Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = ks.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]},
... columns=["Person", "Age", "Single"])
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
>>> df.count(axis=1)
0 3
1 2
2 3
3 3
4 3
Name: 0, dtype: int64
"""
return self._reduce_for_stat_function(
_Frame._count_expr, name="count", axis=axis, numeric_only=False)
def drop(self, labels=None, axis=1,
columns: Union[str, Tuple[str, ...], List[str], List[Tuple[str, ...]]] = None):
"""
Drop specified labels from columns.
Remove columns by specifying label names and axis=1 or columns.
When specifying both labels and columns, only labels will be dropped.
Removing rows is yet to be implemented.
Parameters
----------
labels : single label or list-like
Column labels to drop.
axis : {1 or 'columns'}, default 1
.. dropna currently only works for axis=1 'columns'
axis=0 is yet to be implemented.
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
Returns
-------
dropped : DataFrame
See Also
--------
Series.dropna
Examples
--------
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> df
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('x', axis=1)
y z w
0 3 5 7
1 4 6 8
>>> df.drop(['y', 'z'], axis=1)
x w
0 1 7
1 2 8
>>> df.drop(columns=['y', 'z'])
x w
0 1 7
1 2 8
Also support for MultiIndex
>>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]},
... columns=['x', 'y', 'z', 'w'])
>>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')]
>>> df.columns = pd.MultiIndex.from_tuples(columns)
>>> df # doctest: +NORMALIZE_WHITESPACE
a b
x y z w
0 1 3 5 7
1 2 4 6 8
>>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE
b
z w
0 5 7
1 6 8
Notes
-----
Currently only axis = 1 is supported in this function,
axis = 0 is yet to be implemented.
"""
if labels is not None:
axis = self._validate_axis(axis)
if axis == 1:
return self.drop(columns=labels)
raise NotImplementedError("Drop currently only works for axis=1")
elif columns is not None:
if isinstance(columns, str):
columns = [(columns,)] # type: ignore
elif isinstance(columns, tuple):
columns = [columns]
else:
columns = [col if isinstance(col, tuple) else (col,) # type: ignore
for col in columns]
drop_column_index = set(idx for idx in self._internal.column_index
for col in columns
if idx[:len(col)] == col)
if len(drop_column_index) == 0:
raise KeyError(columns)
cols, idxes = zip(*((column, idx)
for column, idx
in zip(self._internal.data_columns, self._internal.column_index)
if idx not in drop_column_index))
internal = self._internal.copy(
sdf=self._sdf.select(
self._internal.index_scols + [self._internal.scol_for(idx) for idx in idxes]),
data_columns=list(cols),
column_index=list(idxes))
return DataFrame(internal)
else:
raise ValueError("Need to specify at least one of 'labels' or 'columns'")
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
Examples
--------
>>> df = ks.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},
... columns=['x', 'y', 'z'])
>>> df
x y z
0 0 a a
1 1 b b
2 2 b b
>>> df.get('x')
0 0
1 1
2 2
Name: x, dtype: int64
>>> df.get(['x', 'y'])
x y
0 0 a
1 1 b
2 2 b
"""
try:
return self._pd_getitem(key)
except (KeyError, ValueError, IndexError):
return default
def _sort(self, by: List[Column], ascending: Union[bool, List[bool]],
inplace: bool, na_position: str):
if isinstance(ascending, bool):
ascending = [ascending] * len(by)
if len(ascending) != len(by):
raise ValueError('Length of ascending ({}) != length of by ({})'
.format(len(ascending), len(by)))
if na_position not in ('first', 'last'):
raise ValueError("invalid na_position: '{}'".format(na_position))
# Mapper: Get a spark column function for (ascending, na_position) combination
# Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847.
mapper = {
(True, 'first'): lambda x: Column(getattr(x._jc, "asc_nulls_first")()),
(True, 'last'): lambda x: Column(getattr(x._jc, "asc_nulls_last")()),
(False, 'first'): lambda x: Column(getattr(x._jc, "desc_nulls_first")()),
(False, 'last'): lambda x: Column(getattr(x._jc, "desc_nulls_last")()),
}
by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)]
kdf = DataFrame(self._internal.copy(sdf=self._sdf.sort(*by))) # type: ks.DataFrame
if inplace:
self._internal = kdf._internal
return None
else:
return kdf
def sort_values(self, by: Union[str, List[str]], ascending: Union[bool, List[bool]] = True,
inplace: bool = False, na_position: str = 'last') -> Optional['DataFrame']:
"""
Sort by the values along either axis.
Parameters
----------
by : str or list of str
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({
... 'col1': ['A', 'B', None, 'D', 'C'],
... 'col2': [2, 9, 8, 7, 4],
... 'col3': [0, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df
col1 col2 col3
0 A 2 0
1 B 9 9
2 None 8 4
3 D 7 2
4 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 B 9 9
4 C 4 3
3 D 7 2
2 None 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
3 D 7 2
4 C 4 3
1 B 9 9
0 A 2 0
2 None 8 4
Sort by multiple columns
>>> df = ks.DataFrame({
... 'col1': ['A', 'A', 'B', None, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... },
... columns=['col1', 'col2', 'col3'])
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 None 8 4
"""
if isinstance(by, str):
by = [by]
by = [self[colname]._scol for colname in by]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
def sort_index(self, axis: int = 0,
level: Optional[Union[int, List[int]]] = None, ascending: bool = True,
inplace: bool = False, kind: str = None, na_position: str = 'last') \
-> Optional['DataFrame']:
"""
Sort object by labels (along an axis)
Parameters
----------
axis : index, columns to direct sorting. Currently, only axis = 0 is supported.
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : str, default None
Koalas does not allow specifying the sorting algorithm at the moment, default None
na_position : {‘first’, ‘last’}, default ‘last’
first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for
MultiIndex.
Returns
-------
sorted_obj : DataFrame
Examples
--------
>>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan])
>>> df.sort_index()
A
a 1.0
b 2.0
NaN NaN
>>> df.sort_index(ascending=False)
A
b 2.0
a 1.0
NaN NaN
>>> df.sort_index(na_position='first')
A
NaN NaN
a 1.0
b 2.0
>>> df.sort_index(inplace=True)
>>> df
A
a 1.0
b 2.0
NaN NaN
>>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]},
... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]],
... columns=['A', 'B'])
>>> df.sort_index()
A B
a 0 3 0
1 2 1
b 0 1 2
1 0 3
>>> df.sort_index(level=1) # doctest: +SKIP
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
>>> df.sort_index(level=[1, 0])
A B
a 0 3 0
b 0 1 2
a 1 2 1
b 1 0 3
"""
if axis != 0:
raise ValueError("No other axes than 0 are supported at the moment")
if kind is not None:
raise ValueError("Specifying the sorting algorithm is supported at the moment.")
if level is None or (is_list_like(level) and len(level) == 0): # type: ignore
by = self._internal.index_scols
elif is_list_like(level):
by = [self._internal.index_scols[l] for l in level] # type: ignore
else:
by = [self._internal.index_scols[level]]
return self._sort(by=by, ascending=ascending,
inplace=inplace, na_position=na_position)
# TODO: add keep = First
def nlargest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant in Pandas.
In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(n=3, columns='X')
X Y
5 7.0 11
4 6.0 10
3 5.0 9
>>> df.nlargest(n=3, columns=['Y', 'X'])
X Y
6 NaN 12
5 7.0 11
4 6.0 10
"""
kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
# TODO: add keep = First
def nsmallest(self, n: int, columns: 'Any') -> 'DataFrame':
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``,
but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer,
the two would have same performance.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan],
... 'Y': [6, 7, 8, 9, 10, 11, 12]})
>>> df
X Y
0 1.0 6
1 2.0 7
2 3.0 8
3 5.0 9
4 6.0 10
5 7.0 11
6 NaN 12
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE
X Y
0 1.0 6
1 2.0 7
2 3.0 8
"""
kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame]
assert kdf is not None
return kdf.head(n=n)
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable or dict
The sequence of values to test. If values is a dict,
the keys must be the column names, which must match.
Series and DataFrame are not supported.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'],
... columns=['num_legs', 'num_wings'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
"""
if isinstance(values, (pd.DataFrame, pd.Series)):
raise NotImplementedError("DataFrame and Series are not supported")
if isinstance(values, dict) and not set(values.keys()).issubset(self.columns):
raise AttributeError(
"'DataFrame' object has no attribute %s"
% (set(values.keys()).difference(self.columns)))
_select_columns = self._internal.index_columns.copy()
if isinstance(values, dict):
for col in self.columns:
if col in values:
_select_columns.append(self._internal.scol_for(col)
.isin(values[col]).alias(col))
else:
_select_columns.append(F.lit(False).alias(col))
elif is_list_like(values):
_select_columns += [
self._internal.scol_for(col).isin(list(values)).alias(col)
for col in self.columns]
else:
raise TypeError('Values should be iterable, Series, DataFrame or dict.')
return DataFrame(self._internal.copy(sdf=self._sdf.select(_select_columns)))
@property
def shape(self):
"""
Return a tuple representing the dimensionality of the DataFrame.
Examples
--------
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self), len(self.columns)
def merge(self, right: 'DataFrame', how: str = 'inner',
on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
left_on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
right_on: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]] = None,
left_index: bool = False, right_index: bool = False,
suffixes: Tuple[str, str] = ('_x', '_y')) -> 'DataFrame':
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = df1.merge(df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y'])
lkey value_x rkey value_y
0 bar 2 bar 6
5 baz 3 baz 7
1 foo 1 foo 5
2 foo 1 foo 8
3 foo 5 foo 5
4 foo 5 foo 8
>>> left_kdf = ks.DataFrame({'A': [1, 2]})
>>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True)
A B
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left')
A B
0 1 None
1 2 x
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right')
A B
1 2.0 x
2 NaN y
>>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer')
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
_to_list = lambda os: (os if os is None
else [os] if isinstance(os, tuple)
else [(os,)] if isinstance(os, str)
else [o if isinstance(o, tuple) else (o,) # type: ignore
for o in os])
if isinstance(right, ks.Series):
right = right.to_frame()
if on:
if left_on or right_on:
raise ValueError('Can only pass argument "on" OR "left_on" and "right_on", '
'not a combination of both.')
left_keys = _to_list(on)
right_keys = _to_list(on)
else:
# TODO: need special handling for multi-index.
if left_index:
left_keys = self._internal.index_columns
else:
left_keys = _to_list(left_on)
if right_index:
right_keys = right._internal.index_columns
else:
right_keys = _to_list(right_on)
if left_keys and not right_keys:
raise ValueError('Must pass right_on or right_index=True')
if right_keys and not left_keys:
raise ValueError('Must pass left_on or left_index=True')
if not left_keys and not right_keys:
common = list(self.columns.intersection(right.columns))
if len(common) == 0:
raise ValueError(
'No common columns to perform merge on. Merge options: '
'left_on=None, right_on=None, left_index=False, right_index=False')
left_keys = _to_list(common)
right_keys = _to_list(common)
if len(left_keys) != len(right_keys): # type: ignore
raise ValueError('len(left_keys) must equal len(right_keys)')
if how == 'full':
warnings.warn("Warning: While Koalas will accept 'full', you should use 'outer' " +
"instead to be compatible with the pandas merge API", UserWarning)
if how == 'outer':
# 'outer' in pandas equals 'full' in Spark
how = 'full'
if how not in ('inner', 'left', 'right', 'full'):
raise ValueError("The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']")
left_table = self._sdf.alias('left_table')
right_table = right._sdf.alias('right_table')
left_scol_for = lambda idx: scol_for(left_table, self._internal.column_name_for(idx))
right_scol_for = lambda idx: scol_for(right_table, right._internal.column_name_for(idx))
left_key_columns = [left_scol_for(idx) for idx in left_keys] # type: ignore
right_key_columns = [right_scol_for(idx) for idx in right_keys] # type: ignore
join_condition = reduce(lambda x, y: x & y,
[lkey == rkey for lkey, rkey
in zip(left_key_columns, right_key_columns)])
joined_table = left_table.join(right_table, join_condition, how=how)
# Unpack suffixes tuple for convenience
left_suffix = suffixes[0]
right_suffix = suffixes[1]
# Append suffixes to columns with the same name to avoid conflicts later
duplicate_columns = (set(self._internal.column_index)
& set(right._internal.column_index))
exprs = []
data_columns = []
column_index = []
for idx in self._internal.column_index:
col = self._internal.column_name_for(idx)
scol = left_scol_for(idx)
if idx in duplicate_columns:
if idx in left_keys and idx in right_keys: # type: ignore
right_scol = right_scol_for(idx)
if how == 'right':
scol = right_scol
elif how == 'full':
scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col)
else:
pass
else:
col = col + left_suffix
scol = scol.alias(col)
idx = tuple([idx[0] + left_suffix] + list(idx[1:]))
exprs.append(scol)
data_columns.append(col)
column_index.append(idx)
for idx in right._internal.column_index:
col = right._internal.column_name_for(idx)
scol = right_scol_for(idx)
if idx in duplicate_columns:
if idx in left_keys and idx in right_keys: # type: ignore
continue
else:
col = col + right_suffix
scol = scol.alias(col)
idx = tuple([idx[0] + right_suffix] + list(idx[1:]))
exprs.append(scol)
data_columns.append(col)
column_index.append(idx)
left_index_scols = self._internal.index_scols
right_index_scols = right._internal.index_scols
# Retain indices if they are used for joining
if left_index:
if right_index:
if how in ('inner', 'left'):
exprs.extend(left_index_scols)
index_map = self._internal.index_map
elif how == 'right':
exprs.extend(right_index_scols)
index_map = right._internal.index_map
else:
index_map = []
for (col, name), left_scol, right_scol in zip(self._internal.index_map,
left_index_scols,
right_index_scols):
scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol)
exprs.append(scol.alias(col))
index_map.append((col, name))
else:
exprs.extend(right_index_scols)
index_map = right._internal.index_map
elif right_index:
exprs.extend(left_index_scols)
index_map = self._internal.index_map
else:
index_map = []
selected_columns = joined_table.select(*exprs)
internal = _InternalFrame(sdf=selected_columns,
index_map=index_map if index_map else None,
data_columns=data_columns,
column_index=column_index)
return DataFrame(internal)
def join(self, right: 'DataFrame',
on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None,
how: str = 'left', lsuffix: str = '', rsuffix: str = '') -> 'DataFrame':
"""
Join columns of another DataFrame.
Join columns with `right` DataFrame either on index or on a key column. Efficiently join
multiple DataFrame objects by index at once by passing a list.
Parameters
----------
right: DataFrame, Series
on: str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index in `right`, otherwise
joins index-on-index. If multiple values given, the `right` DataFrame must have a
MultiIndex. Can pass an array as the join key if it is not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation.
how: {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use `left` frame’s index (or column if on is specified).
* right: use `right`’s index.
* outer: form union of `left` frame’s index (or column if on is specified) with
right’s index, and sort it. lexicographically.
* inner: form intersection of `left` frame’s index (or column if on is specified)
with `right`’s index, preserving the order of the `left`’s one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from `right` frame's overlapping columns.
Returns
-------
DataFrame
A dataframe containing columns from both the `left` and `right`.
See Also
--------
DataFrame.merge: For column(s)-on-columns(s) operations.
Notes
-----
Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame
objects.
Examples
--------
>>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
... 'A': ['A0', 'A1', 'A2', 'A3']},
... columns=['key', 'A'])
>>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']},
... columns=['key', 'B'])
>>> kdf1
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
>>> kdf2
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right')
>>> join_kdf.sort_values(by=join_kdf.columns)
key_left A key_right B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 None None
If we want to join using the key columns, we need to set key to be the index in both df and
right. The joined DataFrame will have key as its index.
>>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key'))
>>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 None
Another option to join using the key columns is to use the on parameter. DataFrame.join
always uses right’s index but we can use any column in df. This method preserves the
original DataFrame’s index in the result.
>>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key')
>>> join_kdf.sort_index()
key A B
0 K3 A3 None
1 K0 A0 B0
2 K1 A1 B1
3 K2 A2 B2
"""
if isinstance(right, ks.Series):
common = list(self.columns.intersection([right.name]))
else:
common = list(self.columns.intersection(right.columns))
if len(common) > 0 and not lsuffix and not rsuffix:
raise ValueError(
"columns overlap but no suffix specified: "
"{rename}".format(rename=common))
if on:
self = self.set_index(on)
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix)).reset_index()
else:
join_kdf = self.merge(right, left_index=True, right_index=True, how=how,
suffixes=(lsuffix, rsuffix))
return join_kdf
def append(self, other: 'DataFrame', ignore_index: bool = False,
verify_integrity: bool = False, sort: bool = False) -> 'DataFrame':
"""
Append rows of other to the end of caller, returning a new object.
Columns in other that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
sort : boolean, default False
Currently not supported.
Returns
-------
appended : DataFrame
Examples
--------
>>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df.append(df)
A B
0 1 2
1 3 4
0 1 2
1 3 4
>>> df.append(df, ignore_index=True)
A B
0 1 2
1 3 4
2 1 2
3 3 4
"""
if isinstance(other, ks.Series):
raise ValueError("DataFrames.append() does not support appending Series to DataFrames")
if sort:
raise ValueError("The 'sort' parameter is currently not supported")
if not ignore_index:
index_scols = self._internal.index_scols
if len(index_scols) != len(other._internal.index_scols):
raise ValueError("Both DataFrames have to have the same number of index levels")
if verify_integrity and len(index_scols) > 0:
if (self._sdf.select(index_scols)
.intersect(other._sdf.select(other._internal.index_scols))
.count()) > 0:
raise ValueError("Indices have overlapping values")
# Lazy import to avoid circular dependency issues
from databricks.koalas.namespace import concat
return concat([self, other], ignore_index=ignore_index)
# TODO: add 'filter_func' and 'errors' parameter
def update(self, other: 'DataFrame', join: str = 'left', overwrite: bool = True):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or Series
join : 'left', default 'left'
Only left join is implemented, keeping the index and columns of the original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values with values from `other`.
* False: only update values that are NA in the original DataFrame.
Returns
-------
None : method directly changes calling object
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C'])
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B'])
>>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
If `other` contains None the corresponding values are not updated in the original dataframe.
>>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B'])
>>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B'])
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
if join != 'left':
raise NotImplementedError("Only left join is supported")
if isinstance(other, ks.Series):
other = DataFrame(other)
update_columns = list(set(self._internal.column_index)
.intersection(set(other._internal.column_index)))
update_sdf = self.join(other[update_columns], rsuffix='_new')._sdf
for column_index in update_columns:
column_name = self._internal.column_name_for(column_index)
old_col = scol_for(update_sdf, column_name)
new_col = scol_for(update_sdf, other._internal.column_name_for(column_index) + '_new')
if overwrite:
update_sdf = update_sdf.withColumn(column_name, F.when(new_col.isNull(), old_col)
.otherwise(new_col))
else:
update_sdf = update_sdf.withColumn(column_name, F.when(old_col.isNull(), new_col)
.otherwise(old_col))
internal = self._internal.copy(sdf=update_sdf.select([scol_for(update_sdf, col)
for col in self._internal.columns]))
self._internal = internal
def sample(self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False,
random_state: Optional[int] = None) -> 'DataFrame':
"""
Return a random sample of items from an axis of object.
Please call this function using named argument by specifying the ``frac`` argument.
You can use `random_state` for reproducibility. However, note that different from pandas,
specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The
result set depends on not only the seed, but also how the data is distributed across
machines and to some extent network randomness when shuffle operations are involved. Even
in the simplest case, the result set will depend on the system's CPU core count.
Parameters
----------
n : int, optional
Number of items to return. This is currently NOT supported. Use frac instead.
frac : float, optional
Fraction of axis items to return.
replace : bool, default False
Sample with or without replacement.
random_state : int, optional
Seed for the random number generator (if int).
Returns
-------
Series or DataFrame
A new object of same type as caller containing the sampled items.
Examples
--------
>>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'],
... columns=['num_legs', 'num_wings', 'num_specimen_seen'])
>>> df # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
A random 25% sample of the ``DataFrame``.
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement,
so the same items could appear more than once.
>>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP
falcon 2
spider 8
spider 8
Name: num_legs, dtype: int64
Specifying the exact number of items to return is not supported at the moment.
>>> df.sample(n=5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Function sample currently does not support specifying ...
"""
# Note: we don't run any of the doctests because the result can change depending on the
# system's core count.
if n is not None:
raise NotImplementedError("Function sample currently does not support specifying "
"exact number of items to return. Use frac instead.")
if frac is None:
raise ValueError("frac must be specified.")
sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state)
return DataFrame(self._internal.copy(sdf=sdf))
def astype(self, dtype) -> 'DataFrame':
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64')
>>> df
a b
0 1 1
1 2 2
2 3 3
Convert to float type:
>>> df.astype('float')
a b
0 1.0 1.0
1 2.0 2.0
2 3.0 3.0
Convert to int64 type back:
>>> df.astype('int64')
a b
0 1 1
1 2 2
2 3 3
Convert column a to float type:
>>> df.astype({'a': float})
a b
0 1.0 1
1 2.0 2
2 3.0 3
"""
results = []
if is_dict_like(dtype):
for col_name in dtype.keys():
if col_name not in self.columns:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
for col_name, col in self.items():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name]))
else:
results.append(col)
else:
for col_name, col in self.items():
results.append(col.astype(dtype=dtype))
sdf = self._sdf.select(
self._internal.index_scols + list(map(lambda ser: ser._scol, results)))
return DataFrame(self._internal.copy(sdf=sdf))
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(prefix, str)
data_columns = [prefix + self._internal.column_name_for(idx)
for idx in self._internal.column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
column_index = [tuple([prefix + i for i in idx]) for idx in self._internal.column_index]
internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index)
return DataFrame(internal)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add before each label.
Returns
-------
DataFrame
New DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B'])
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
assert isinstance(suffix, str)
data_columns = [self._internal.column_name_for(idx) + suffix
for idx in self._internal.column_index]
sdf = self._sdf.select(
self._internal.index_scols +
[self._internal.scol_for(idx).alias(name)
for idx, name in zip(self._internal.column_index, data_columns)])
column_index = [tuple([i + suffix for i in idx]) for idx in self._internal.column_index]
internal = self._internal.copy(sdf=sdf,
data_columns=data_columns,
column_index=column_index)
return DataFrame(internal)
# TODO: include, and exclude should be implemented.
def describe(self, percentiles: Optional[List[float]] = None) -> 'DataFrame':
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75]
A list of percentiles to be computed.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``.
Currently only numeric data is supported.
Examples
--------
Describing a numeric ``Series``.
>>> s = ks.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: 0, dtype: float64
Describing a ``DataFrame``. Only numeric fields are returned.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0],
... 'object': ['a', 'b', 'c']
... },
... columns=['numeric1', 'numeric2', 'object'])
>>> df.describe()
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
25% 1.0 4.0
50% 2.0 5.0
75% 3.0 6.0
max 3.0 6.0
Describing a ``DataFrame`` and selecting custom percentiles.
>>> df = ks.DataFrame({'numeric1': [1, 2, 3],
... 'numeric2': [4.0, 5.0, 6.0]
... },
... columns=['numeric1', 'numeric2'])
>>> df.describe(percentiles = [0.85, 0.15])
numeric1 numeric2
count 3.0 3.0
mean 2.0 5.0
std 1.0 1.0
min 1.0 4.0
15% 1.0 4.0
50% 2.0 5.0
85% 3.0 6.0
max 3.0 6.0
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric1.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.0
50% 2.0
75% 3.0
max 3.0
Name: numeric1, dtype: float64
Describing a column from a ``DataFrame`` by accessing it as
an attribute and selecting custom percentiles.
>>> df.numeric1.describe(percentiles = [0.85, 0.15])
count 3.0
mean 2.0
std 1.0
min 1.0
15% 1.0
50% 2.0
85% 3.0
max 3.0
Name: numeric1, dtype: float64
"""
exprs = []
data_columns = []
for col in self.columns:
kseries = self[col]
spark_type = kseries.spark_type
if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType):
exprs.append(F.nanvl(kseries._scol, F.lit(None)).alias(kseries.name))
data_columns.append(kseries.name)
elif isinstance(spark_type, NumericType):
exprs.append(kseries._scol)
data_columns.append(kseries.name)
if len(exprs) == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
if any((p < 0.0) or (p > 1.0) for p in percentiles):
raise ValueError("Percentiles should all be in the interval [0, 1]")
# appending 50% if not in percentiles already
percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles
else:
percentiles = [0.25, 0.5, 0.75]
formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)]
stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"]
sdf = self._sdf.select(*exprs).summary(stats)
internal = _InternalFrame(sdf=sdf.replace("stddev", "std", subset='summary'),
data_columns=data_columns,
index_map=[('summary', None)])
return DataFrame(internal).astype('float64')
def _cum(self, func, skipna: bool):
# This is used for cummin, cummax, cumxum, etc.
if func == F.min:
func = "cummin"
elif func == F.max:
func = "cummax"
elif func == F.sum:
func = "cumsum"
elif func.__name__ == "cumprod":
func = "cumprod"
applied = []
for column in self.columns:
applied.append(getattr(self[column], func)(skipna))
sdf = self._sdf.select(
self._internal.index_scols + [c._scol for c in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
# TODO: implements 'keep' parameters
def drop_duplicates(self, subset=None, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
Returns
-------
DataFrame
>>> df = ks.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_values(['a', 'b'])
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_values(['a', 'b'])
a b
0 1 a
1 2 a
3 2 c
4 3 d
"""
if subset is None:
subset = self._internal.column_index
elif isinstance(subset, str):
subset = [(subset,)]
elif isinstance(subset, tuple):
subset = [subset]
else:
subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset]
sdf = self._sdf.drop_duplicates(subset=[self._internal.column_name_for(idx)
for idx in subset])
internal = self._internal.copy(sdf=sdf)
if inplace:
self._internal = internal
else:
return DataFrame(internal)
def reindex(self, labels: Optional[Any] = None, index: Optional[Any] = None,
columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None,
copy: Optional[bool] = True, fill_value: Optional[Any] = None) -> 'DataFrame':
"""
Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ks.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
... # doctest: +NORMALIZE_WHITESPACE
http_status user_agent
Chrome 200 NaN
Comodo Dragon 0 NaN
IE10 404 NaN
Iceweasel 0 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index() # doctest: +NORMALIZE_WHITESPACE
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
"""
if axis is not None and (index is not None or columns is not None):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if labels is not None:
if axis in ('index', 0, None):
index = labels
elif axis in ('columns', 1):
columns = labels
else:
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
if index is not None and not is_list_like(index):
raise TypeError("Index must be called with a collection of some kind, "
"%s was passed" % type(index))
if columns is not None and not is_list_like(columns):
raise TypeError("Columns must be called with a collection of some kind, "
"%s was passed" % type(columns))
df = self.copy()
if index is not None:
df = DataFrame(df._reindex_index(index))
if columns is not None:
df = DataFrame(df._reindex_columns(columns))
# Process missing values.
if fill_value is not None:
df = df.fillna(fill_value)
# Copy
if copy:
return df.copy()
else:
self._internal = df._internal
return self
def _reindex_index(self, index):
# When axis is index, we can mimic pandas' by a right outer join.
index_column = self._internal.index_columns
assert len(index_column) <= 1, "Index should be single column or not set."
if len(index_column) == 1:
kser = ks.Series(list(index))
index_column = index_column[0]
labels = kser._kdf._sdf.select(kser._scol.alias(index_column))
else:
index_column = None
labels = ks.Series(index).to_frame()._sdf
joined_df = self._sdf.join(labels, on=index_column, how="right")
new_data_columns = filter(lambda x: x not in index_column, joined_df.columns)
if index_column is not None:
index_map = [(index_column, None)] # type: List[IndexMap]
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns),
index_map=index_map)
else:
internal = self._internal.copy(
sdf=joined_df,
data_columns=list(new_data_columns))
return internal
def _reindex_columns(self, columns):
level = self._internal.column_index_level
if level > 1:
label_columns = list(columns)
for col in label_columns:
if not isinstance(col, tuple):
raise TypeError('Expected tuple, got {}'.format(type(col)))
else:
label_columns = [(col,) for col in columns]
for col in label_columns:
if len(col) != level:
raise ValueError("shape (1,{}) doesn't match the shape (1,{})"
.format(len(col), level))
scols, columns, idx = [], [], []
null_columns = False
for label in label_columns:
if label in self._internal.column_index:
scols.append(self._internal.scol_for(label))
columns.append(self._internal.column_name_for(label))
else:
scols.append(F.lit(np.nan).alias(str(label)))
columns.append(str(label))
null_columns = True
idx.append(label)
if null_columns:
sdf = self._sdf.select(self._internal.index_scols + list(scols))
return self._internal.copy(sdf=sdf, data_columns=columns, column_index=idx)
def melt(self, id_vars=None, value_vars=None, var_name='variable',
value_name='value'):
"""
Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ks.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> ks.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ks.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
if id_vars is None:
id_vars = []
if not isinstance(id_vars, (list, tuple, np.ndarray)):
id_vars = list(id_vars)
data_columns = self._internal.data_columns
if value_vars is None:
value_vars = []
if not isinstance(value_vars, (list, tuple, np.ndarray)):
value_vars = list(value_vars)
if len(value_vars) == 0:
value_vars = data_columns
data_columns = [data_column for data_column in data_columns if data_column not in id_vars]
sdf = self._sdf
pairs = F.explode(F.array(*[
F.struct(*(
[F.lit(column).alias(var_name)] +
[self._internal.scol_for(column).alias(value_name)])
) for column in data_columns if column in value_vars]))
columns = (id_vars +
[F.col("pairs.%s" % var_name), F.col("pairs.%s" % value_name)])
exploded_df = sdf.withColumn("pairs", pairs).select(columns)
return DataFrame(exploded_df)
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
Name: all, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
column_index = self._internal.column_index
for idx in column_index:
col = self[idx]._scol
all_col = F.min(F.coalesce(col.cast('boolean'), F.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.any, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
value_column = "value"
cols = []
for idx, applied_col in zip(column_index, applied):
cols.append(F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(idx)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_index_names is None
else (self._internal.column_index_names[i],))
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(SPARK_INDEX_NAME_FORMAT(i), index_column_name(i))
for i in range(self._internal.column_index_level)],
column_index=None,
column_index_names=None)
return DataFrame(internal)[value_column].rename("all")
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
Create a dataframe from a dictionary.
>>> df = ks.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return a boolean.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
Name: any, dtype: bool
Returns
-------
Series
"""
if axis not in [0, 'index']:
raise ValueError('axis should be either 0 or "index" currently.')
applied = []
column_index = self._internal.column_index
for idx in column_index:
col = self[idx]._scol
all_col = F.max(F.coalesce(col.cast('boolean'), F.lit(False)))
applied.append(F.when(all_col.isNull(), False).otherwise(all_col))
# TODO: there is a similar logic to transpose in, for instance,
# DataFrame.all, Series.quantile. Maybe we should deduplicate it.
sdf = self._sdf
value_column = "value"
cols = []
for idx, applied_col in zip(column_index, applied):
cols.append(F.struct(
[F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(idx)] +
[applied_col.alias(value_column)]))
sdf = sdf.select(
F.array(*cols).alias("arrays")
).select(F.explode(F.col("arrays")))
sdf = sdf.selectExpr("col.*")
index_column_name = lambda i: (None if self._internal.column_index_names is None
else (self._internal.column_index_names[i],))
internal = self._internal.copy(
sdf=sdf,
data_columns=[value_column],
index_map=[(SPARK_INDEX_NAME_FORMAT(i), index_column_name(i))
for i in range(self._internal.column_index_level)],
column_index=None,
column_index_names=None)
return DataFrame(internal)[value_column].rename("any")
# TODO: add axis, numeric_only, pct, na_option parameter
def rank(self, method='average', ascending=True):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
"""
applied = []
for idx in self._internal.column_index:
applied.append(self[idx].rank(method=method, ascending=ascending))
sdf = self._sdf.select(self._internal.index_columns + [column._scol for column in applied])
internal = self._internal.copy(sdf=sdf,
data_columns=[c._internal.data_columns[0] for c in applied],
column_index=[c._internal.column_index[0] for c in applied])
return DataFrame(internal)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
if sum(x is not None for x in (items, like, regex)) > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive")
if axis not in ('index', 0, 'columns', 1, None):
raise ValueError("No axis named %s for object type %s." % (axis, type(axis)))
index_scols = self._internal.index_scols
sdf = self._sdf
if items is not None:
if is_list_like(items):
items = list(items)
else:
raise ValueError("items should be a list-like object.")
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
col = None
for item in items:
if col is None:
col = index_scols[0] == F.lit(item)
else:
col = col | (index_scols[0] == F.lit(item))
sdf = sdf.filter(col)
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
return self[items]
elif like is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].contains(like))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
column_index = self._internal.column_index
output_idx = [idx for idx in column_index if any(like in i for i in idx)]
return self[output_idx]
elif regex is not None:
if axis in ('index', 0):
# TODO: support multi-index here
if len(index_scols) != 1:
raise ValueError("Single index must be specified.")
sdf = sdf.filter(index_scols[0].rlike(regex))
return DataFrame(self._internal.copy(sdf=sdf))
elif axis in ('columns', 1, None):
column_index = self._internal.column_index
matcher = re.compile(regex)
output_idx = [idx for idx in column_index
if any(matcher.search(i) is not None for i in idx)]
return self[output_idx]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def rename(self,
mapper=None,
index=None,
columns=None,
axis='index',
inplace=False,
level=None,
errors='ignore'):
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series
will be left as-is. Extra labels listed don’t throw an error.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to that axis’ values.
Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`
and `columns`.
index : dict-like or function
Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").
columns : dict-like or function
Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").
axis : int or str, default 'index'
Axis to target with mapper. Can be either the axis name ('index', 'columns') or
number (0, 1).
inplace : bool, default False
Whether to return a new DataFrame.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`
contains labels that are not present in the Index being transformed. If 'ignore',
existing keys will be renamed and extra keys will be ignored.
Returns
-------
DataFrame with the renamed axis labels.
Raises:
-------
`KeyError`
If any of the labels is not found in the selected axis and "errors='raise'".
Examples
--------
>>> kdf1 = ks.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> kdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE
a c
0 1 4
1 2 5
2 3 6
>>> kdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> def str_lower(s) -> str:
... return str.lower(s)
>>> kdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE
a b
0 1 4
1 2 5
2 3 6
>>> def mul10(x) -> int:
... return x * 10
>>> kdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])
>>> kdf2 = ks.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
>>> kdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE
x y
A B C D
0 1 2 3 4
1 5 6 7 8
>>> kdf3 = ks.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))
>>> kdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE
a b
x a 1 2
b 3 4
y c 5 6
d 7 8
"""
def gen_mapper_fn(mapper):
if isinstance(mapper, dict):
if len(mapper) == 0:
if errors == 'raise':
raise KeyError('Index include label which is not in the `mapper`.')
else:
return DataFrame(self._internal)
type_set = set(map(lambda x: type(x), mapper.values()))
if len(type_set) > 1:
raise ValueError("Mapper dict should have the same value type.")
spark_return_type = as_spark_type(list(type_set)[0])
def mapper_fn(x):
if x in mapper:
return mapper[x]
else:
if errors == 'raise':
raise KeyError('Index include value which is not in the `mapper`')
return x
elif callable(mapper):
spark_return_type = _infer_return_type(mapper).tpe
def mapper_fn(x):
return mapper(x)
else:
raise ValueError("`mapper` or `index` or `columns` should be "
"either dict-like or function type.")
return mapper_fn, spark_return_type
index_mapper_fn = None
index_mapper_ret_stype = None
columns_mapper_fn = None
if mapper:
if axis == 'index' or axis == 0:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(mapper)
elif axis == 'columns' or axis == 1:
columns_mapper_fn, columns_mapper_ret_stype = gen_mapper_fn(mapper)
else:
raise ValueError("argument axis should be either the axis name "
"(‘index’, ‘columns’) or number (0, 1)")
else:
if index:
index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(index)
if columns:
columns_mapper_fn, _ = gen_mapper_fn(columns)
if not index and not columns:
raise ValueError("Either `index` or `columns` should be provided.")
internal = self._internal
if index_mapper_fn:
# rename index labels, if `level` is None, rename all index columns, otherwise only
# rename the corresponding level index.
# implement this by transform the underlying spark dataframe,
# Example:
# suppose the kdf index column in underlying spark dataframe is "index_0", "index_1",
# if rename level 0 index labels, will do:
# ``kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))``
# if rename all index labels (`level` is None), then will do:
# ```
# kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))
# .withColumn("index_1", mapper_fn_udf(col("index_1"))
# ```
index_columns = internal.index_columns
num_indices = len(index_columns)
if level:
if level < 0 or level >= num_indices:
raise ValueError("level should be an integer between [0, num_indices)")
def gen_new_index_column(level):
index_col_name = index_columns[level]
index_mapper_udf = pandas_udf(lambda s: s.map(index_mapper_fn),
returnType=index_mapper_ret_stype)
return index_mapper_udf(scol_for(internal.sdf, index_col_name))
sdf = internal.sdf
if level is None:
for i in range(num_indices):
sdf = sdf.withColumn(index_columns[i], gen_new_index_column(i))
else:
sdf = sdf.withColumn(index_columns[level], gen_new_index_column(level))
internal = internal.copy(sdf=sdf)
if columns_mapper_fn:
# rename column name.
# Will modify the `_internal._column_index` and transform underlying spark dataframe
# to the same column name with `_internal._column_index`.
if level:
if level < 0 or level >= internal.column_index_level:
raise ValueError("level should be an integer between [0, column_index_level)")
def gen_new_column_index_entry(column_index_entry):
if isinstance(column_index_entry, tuple):
if level is None:
# rename all level columns
return tuple(map(columns_mapper_fn, column_index_entry))
else:
# only rename specified level column
entry_list = list(column_index_entry)
entry_list[level] = columns_mapper_fn(entry_list[level])
return tuple(entry_list)
else:
return columns_mapper_fn(column_index_entry)
new_column_index = list(map(gen_new_column_index_entry, internal.column_index))
if internal.column_index_level == 1:
new_data_columns = [col[0] for col in new_column_index]
else:
new_data_columns = [str(col) for col in new_column_index]
new_data_scols = [scol_for(internal.sdf, old_col_name).alias(new_col_name)
for old_col_name, new_col_name
in zip(internal.data_columns, new_data_columns)]
sdf = internal.sdf.select(*(internal.index_scols + new_data_scols))
internal = internal.copy(sdf=sdf, column_index=new_column_index,
data_columns=new_data_columns)
if inplace:
self._internal = internal
return self
else:
return DataFrame(internal)
def _get_from_multiindex_column(self, key):
""" Select columns from multi-index columns.
:param key: the multi-index column keys represented by tuple
:return: DataFrame or Series
"""
from databricks.koalas.series import Series
assert isinstance(key, tuple)
indexes = [(idx, idx) for idx in self._internal.column_index]
for k in key:
indexes = [(index, idx[1:]) for index, idx in indexes if idx[0] == k]
if len(indexes) == 0:
raise KeyError(k)
recursive = False
if all(len(idx) > 0 and idx[0] == '' for _, idx in indexes):
# If the head is '', drill down recursively.
recursive = True
for i, (col, idx) in enumerate(indexes):
indexes[i] = (col, tuple([str(key), *idx[1:]]))
column_index_names = None
if self._internal.column_index_names is not None:
# Manage column index names
level = column_index_level([idx for _, idx in indexes])
column_index_names = self._internal.column_index_names[-level:]
if all(len(idx) == 0 for _, idx in indexes):
try:
idxes = set(idx for idx, _ in indexes)
assert len(idxes) == 1
index = list(idxes)[0]
kdf_or_ser = \
Series(self._internal.copy(scol=self._internal.scol_for(index),
column_index=[index]),
anchor=self)
except AnalysisException:
raise KeyError(key)
else:
kdf_or_ser = DataFrame(self._internal.copy(
data_columns=[self._internal.column_name_for(idx) for idx, _ in indexes],
column_index=[idx for _, idx in indexes],
column_index_names=column_index_names))
if recursive:
kdf_or_ser = kdf_or_ser._get_from_multiindex_column((str(key),))
return kdf_or_ser
def _pd_getitem(self, key):
from databricks.koalas.series import Series
if key is None:
raise KeyError("none key")
if isinstance(key, str):
return self._get_from_multiindex_column((key,))
if isinstance(key, tuple):
return self._get_from_multiindex_column(key)
elif np.isscalar(key):
raise NotImplementedError(key)
elif isinstance(key, slice):
return self.loc[key]
if isinstance(key, (pd.Series, np.ndarray, pd.Index)):
raise NotImplementedError(key)
if isinstance(key, list):
return self.loc[:, key]
if isinstance(key, DataFrame):
# TODO Should not implement alignment, too dangerous?
return Series(self._internal.copy(scol=self._internal.scol_for(key)), anchor=self)
if isinstance(key, Series):
# TODO Should not implement alignment, too dangerous?
# It is assumed to be only a filter, otherwise .loc should be used.
bcol = key._scol.cast("boolean")
return DataFrame(self._internal.copy(sdf=self._sdf.filter(bcol)))
raise NotImplementedError(key)
def _to_internal_pandas(self):
"""
Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only.
"""
return self._internal.pandas_df
def __repr__(self):
max_display_count = get_option("display.max_rows")
if max_display_count is None:
return self._to_internal_pandas().to_string()
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
pdf = pdf.iloc[:max_display_count]
if pdf_length > max_display_count:
repr_string = pdf.to_string(show_dimensions=True)
match = REPR_PATTERN.search(repr_string)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
footer = ("\n\n[Showing only the first {nrows} rows x {ncols} columns]"
.format(nrows=nrows, ncols=ncols))
return REPR_PATTERN.sub(footer, repr_string)
return pdf.to_string()
def _repr_html_(self):
max_display_count = get_option("display.max_rows")
# pandas 0.25.1 has a regression about HTML representation so 'bold_rows'
# has to be set as False explicitly. See https://github.com/pandas-dev/pandas/issues/28204
bold_rows = not (LooseVersion("0.25.1") == LooseVersion(pd.__version__))
if max_display_count is None:
return self._to_internal_pandas().to_html(notebook=True, bold_rows=bold_rows)
pdf = self.head(max_display_count + 1)._to_internal_pandas()
pdf_length = len(pdf)
pdf = pdf[:max_display_count]
if pdf_length > max_display_count:
repr_html = pdf.to_html(show_dimensions=True, notebook=True, bold_rows=bold_rows)
match = REPR_HTML_PATTERN.search(repr_html)
if match is not None:
nrows = match.group("rows")
ncols = match.group("columns")
by = chr(215)
footer = ('\n<p>Showing only the first {rows} rows {by} {cols} columns</p>\n</div>'
.format(rows=nrows,
by=by,
cols=ncols))
return REPR_HTML_PATTERN.sub(footer, repr_html)
return pdf.to_html(notebook=True, bold_rows=bold_rows)
def __getitem__(self, key):
return self._pd_getitem(key)
def __setitem__(self, key, value):
from databricks.koalas.series import Series
if (isinstance(value, Series) and value._kdf is not self) or \
(isinstance(value, DataFrame) and value is not self):
# Different Series or DataFrames
level = self._internal.column_index_level
if isinstance(value, Series):
value = value.to_frame()
value.columns = pd.MultiIndex.from_tuples(
[tuple(list(value._internal.column_index[0]) + ([''] * (level - 1)))])
else:
assert isinstance(value, DataFrame)
value_level = value._internal.column_index_level
if value_level > level:
value.columns = pd.MultiIndex.from_tuples(
[idx[level:] for idx in value._internal.column_index])
elif value_level < level:
value.columns = pd.MultiIndex.from_tuples(
[tuple(list(idx) + ([''] * (level - value_level)))
for idx in value._internal.column_index])
if isinstance(key, str):
key = [(key,)]
elif isinstance(key, tuple):
key = [key]
else:
key = [k if isinstance(k, tuple) else (k,) for k in key]
def assign_columns(kdf, this_column_index, that_column_index):
assert len(key) == len(that_column_index)
# Note that here intentionally uses `zip_longest` that combine
# that_columns.
for k, this_idx, that_idx \
in zip_longest(key, this_column_index, that_column_index):
yield (kdf[that_idx], tuple(['that', *k]))
if this_idx is not None and this_idx[1:] != k:
yield (kdf[this_idx], this_idx)
kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left")
elif isinstance(key, list):
assert isinstance(value, DataFrame)
# Same DataFrames.
field_names = value.columns
kdf = self._assign({k: value[c] for k, c in zip(key, field_names)})
else:
# Same Series.
kdf = self._assign({key: value})
self._internal = kdf._internal
def __getattr__(self, key: str) -> Any:
if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"):
raise AttributeError(key)
if hasattr(_MissingPandasLikeDataFrame, key):
property_or_func = getattr(_MissingPandasLikeDataFrame, key)
if isinstance(property_or_func, property):
return property_or_func.fget(self) # type: ignore
else:
return partial(property_or_func, self)
try:
return self._get_from_multiindex_column((key,))
except KeyError:
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__.__name__, key))
def __len__(self):
return self._sdf.count()
def __dir__(self):
fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f]
return super(DataFrame, self).__dir__() + fields
def __iter__(self):
return iter(self.columns)
@classmethod
def _validate_axis(cls, axis=0):
if axis not in (0, 1, 'index', 'columns', None):
raise ValueError('No axis named {0}'.format(axis))
# convert to numeric axis
return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis)
if sys.version_info >= (3, 7):
def __class_getitem__(cls, params):
# This is a workaround to support variadic generic in DataFrame in Python 3.7.
# See https://github.com/python/typing/issues/193
# we always wraps the given type hints by a tuple to mimic the variadic generic.
return super(cls, DataFrame).__class_getitem__(Tuple[params])
elif (3, 5) <= sys.version_info < (3, 7):
# This is a workaround to support variadic generic in DataFrame in Python 3.5+
# The implementation is in its metaclass so this flag is needed to distinguish
# Koalas DataFrame.
is_dataframe = None
def _reduce_spark_multi(sdf, aggs):
"""
Performs a reduction on a dataframe, the functions being known sql aggregate functions.
"""
assert isinstance(sdf, spark.DataFrame)
sdf0 = sdf.agg(*aggs)
l = sdf0.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row)
assert len(l2) == len(aggs), (row, l2)
return l2
class _CachedDataFrame(DataFrame):
"""
Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally
it caches the corresponding Spark DataFrame.
"""
def __init__(self, internal):
self._cached = internal._sdf.cache()
super(_CachedDataFrame, self).__init__(internal)
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.unpersist()
def unpersist(self):
"""
The `unpersist` function is used to uncache the Koalas DataFrame when it
is not used with `with` statement.
Examples
--------
>>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df = df.cache()
To uncache the dataframe, use `unpersist` function
>>> df.unpersist()
"""
if self._cached.is_cached:
self._cached.unpersist()
| 1 | 12,221 | Seems the default value of `melt`'s `var_name` at namespace.py should be changed as well. | databricks-koalas | py |
@@ -608,6 +608,17 @@ def test_query_parser_path_params_with_slashes():
assert params == {"ResourceArn": resource_arn}
+def test_parse_cloudtrail_with_botocore():
+ _botocore_parser_integration_test(
+ service="cloudtrail",
+ action="DescribeTrails",
+ headers={
+ "X-Amz-Target": "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101.DescribeTrails"
+ },
+ trailNameList=["t1"],
+ )
+
+
# TODO Add additional tests (or even automate the creation)
# - Go to the Boto3 Docs (https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/index.html)
# - Look for boto3 request syntax definition for services that use the protocol you want to test | 1 | from datetime import datetime
from urllib.parse import urlencode
from botocore.serialize import create_serializer
from localstack.aws.api import HttpRequest
from localstack.aws.protocol.parser import QueryRequestParser, RestJSONRequestParser, create_parser
from localstack.aws.spec import load_service
from localstack.utils.common import to_bytes
def test_query_parser():
"""Basic test for the QueryParser with a simple example (SQS SendMessage request)."""
parser = QueryRequestParser(load_service("sqs"))
request = HttpRequest(
body=to_bytes(
"Action=SendMessage&Version=2012-11-05&"
"QueueUrl=http%3A%2F%2Flocalhost%3A4566%2F000000000000%2Ftf-acc-test-queue&"
"MessageBody=%7B%22foo%22%3A+%22bared%22%7D&"
"DelaySeconds=2"
),
method="POST",
headers={},
path="",
)
operation, params = parser.parse(request)
assert operation.name == "SendMessage"
assert params == {
"QueueUrl": "http://localhost:4566/000000000000/tf-acc-test-queue",
"MessageBody": '{"foo": "bared"}',
"DelaySeconds": 2,
}
def test_query_parser_flattened_map():
"""Simple test with a flattened map (SQS SetQueueAttributes request)."""
parser = QueryRequestParser(load_service("sqs"))
request = HttpRequest(
body=to_bytes(
"Action=SetQueueAttributes&Version=2012-11-05&"
"QueueUrl=http%3A%2F%2Flocalhost%3A4566%2F000000000000%2Ftf-acc-test-queue&"
"Attribute.1.Name=DelaySeconds&"
"Attribute.1.Value=10&"
"Attribute.2.Name=MaximumMessageSize&"
"Attribute.2.Value=131072&"
"Attribute.3.Name=MessageRetentionPeriod&"
"Attribute.3.Value=259200&"
"Attribute.4.Name=ReceiveMessageWaitTimeSeconds&"
"Attribute.4.Value=20&"
"Attribute.5.Name=RedrivePolicy&"
"Attribute.5.Value=%7B%22deadLetterTargetArn%22%3A%22arn%3Aaws%3Asqs%3Aus-east-1%3A80398EXAMPLE%3AMyDeadLetterQueue%22%2C%22maxReceiveCount%22%3A%221000%22%7D&"
"Attribute.6.Name=VisibilityTimeout&Attribute.6.Value=60"
),
method="POST",
headers={},
path="",
)
operation, params = parser.parse(request)
assert operation.name == "SetQueueAttributes"
assert params == {
"QueueUrl": "http://localhost:4566/000000000000/tf-acc-test-queue",
"Attributes": {
"DelaySeconds": "10",
"MaximumMessageSize": "131072",
"MessageRetentionPeriod": "259200",
"ReceiveMessageWaitTimeSeconds": "20",
"RedrivePolicy": '{"deadLetterTargetArn":"arn:aws:sqs:us-east-1:80398EXAMPLE:MyDeadLetterQueue","maxReceiveCount":"1000"}',
"VisibilityTimeout": "60",
},
}
def test_query_parser_non_flattened_map():
"""Simple test with a flattened map (SQS SetQueueAttributes request)."""
parser = QueryRequestParser(load_service("sns"))
request = HttpRequest(
body=to_bytes(
"Action=SetEndpointAttributes&"
"EndpointArn=arn%3Aaws%3Asns%3Aus-west-2%3A123456789012%3Aendpoint%2FGCM%2Fgcmpushapp%2F5e3e9847-3183-3f18-a7e8-671c3a57d4b3&"
"Attributes.entry.1.key=CustomUserData&"
"Attributes.entry.1.value=My+custom+userdata&"
"Version=2010-03-31&"
"AUTHPARAMS"
),
method="POST",
headers={},
path="",
)
operation, params = parser.parse(request)
assert operation.name == "SetEndpointAttributes"
assert params == {
"Attributes": {"CustomUserData": "My custom userdata"},
"EndpointArn": "arn:aws:sns:us-west-2:123456789012:endpoint/GCM/gcmpushapp/5e3e9847-3183-3f18-a7e8-671c3a57d4b3",
}
def test_query_parser_non_flattened_list_structure():
"""Simple test with a non-flattened list structure (CloudFormation CreateChangeSet)."""
parser = QueryRequestParser(load_service("cloudformation"))
request = HttpRequest(
body=to_bytes(
"Action=CreateChangeSet&"
"ChangeSetName=SampleChangeSet&"
"Parameters.member.1.ParameterKey=KeyName&"
"Parameters.member.1.UsePreviousValue=true&"
"Parameters.member.2.ParameterKey=Purpose&"
"Parameters.member.2.ParameterValue=production&"
"StackName=arn:aws:cloudformation:us-east-1:123456789012:stack/SampleStack/1a2345b6-0000-00a0-a123-00abc0abc000&"
"UsePreviousTemplate=true&"
"Version=2010-05-15&"
"X-Amz-Algorithm=AWS4-HMAC-SHA256&"
"X-Amz-Credential=[Access-key-ID-and-scope]&"
"X-Amz-Date=20160316T233349Z&"
"X-Amz-SignedHeaders=content-type;host&"
"X-Amz-Signature=[Signature]"
),
method="POST",
headers={},
path="",
)
operation, params = parser.parse(request)
assert operation.name == "CreateChangeSet"
assert params == {
"StackName": "arn:aws:cloudformation:us-east-1:123456789012:stack/SampleStack/1a2345b6-0000-00a0-a123-00abc0abc000",
"UsePreviousTemplate": True,
"Parameters": [
{"ParameterKey": "KeyName", "UsePreviousValue": True},
{"ParameterKey": "Purpose", "ParameterValue": "production"},
],
"ChangeSetName": "SampleChangeSet",
}
def test_query_parser_non_flattened_list_structure_changed_name():
"""Simple test with a non-flattened list structure where the name of the list differs from the shape's name
(CloudWatch PutMetricData)."""
parser = QueryRequestParser(load_service("cloudwatch"))
request = HttpRequest(
body=to_bytes(
"Action=PutMetricData&"
"Version=2010-08-01&"
"Namespace=TestNamespace&"
"MetricData.member.1.MetricName=buffers&"
"MetricData.member.1.Unit=Bytes&"
"MetricData.member.1.Value=231434333&"
"MetricData.member.1.Dimensions.member.1.Name=InstanceType&"
"MetricData.member.1.Dimensions.member.1.Value=m1.small&"
"AUTHPARAMS"
),
method="POST",
headers={},
path="",
)
operation, params = parser.parse(request)
assert operation.name == "PutMetricData"
assert params == {
"MetricData": [
{
"Dimensions": [{"Name": "InstanceType", "Value": "m1.small"}],
"MetricName": "buffers",
"Unit": "Bytes",
"Value": 231434333.0,
}
],
"Namespace": "TestNamespace",
}
def test_query_parser_flattened_list_structure():
"""Simple test with a flattened list of structures."""
parser = QueryRequestParser(load_service("sqs"))
request = HttpRequest(
body=to_bytes(
"Action=DeleteMessageBatch&"
"Version=2012-11-05&"
"QueueUrl=http%3A%2F%2Flocalhost%3A4566%2F000000000000%2Ftf-acc-test-queue&"
"DeleteMessageBatchRequestEntry.1.Id=bar&"
"DeleteMessageBatchRequestEntry.1.ReceiptHandle=foo&"
"DeleteMessageBatchRequestEntry.2.Id=bar&"
"DeleteMessageBatchRequestEntry.2.ReceiptHandle=foo"
),
method="POST",
headers={},
path="",
)
operation, params = parser.parse(request)
assert operation.name == "DeleteMessageBatch"
assert params == {
"QueueUrl": "http://localhost:4566/000000000000/tf-acc-test-queue",
"Entries": [{"Id": "bar", "ReceiptHandle": "foo"}, {"Id": "bar", "ReceiptHandle": "foo"}],
}
def _botocore_parser_integration_test(
service: str, action: str, headers: dict = None, expected: dict = None, **kwargs
):
# Load the appropriate service
service = load_service(service)
# Use the serializer from botocore to serialize the request params
serializer = create_serializer(service.protocol)
serialized_request = serializer.serialize_to_request(kwargs, service.operation_model(action))
body = serialized_request["body"]
query_string = urlencode(serialized_request.get("query_string") or "", doseq=False)
if service.protocol in ["query", "ec2"]:
# Serialize the body as query parameter
body = urlencode(serialized_request["body"])
# Use our parser to parse the serialized body
parser = create_parser(service)
operation_model, parsed_request = parser.parse(
HttpRequest(
method=serialized_request.get("method") or "GET",
path=serialized_request.get("url_path") or "",
query_string=query_string,
headers=headers,
body=body,
)
)
# Check if the result is equal to the given "expected" dict or the kwargs (if "expected" has not been set)
assert parsed_request == (expected or kwargs)
def test_query_parser_sqs_with_botocore():
_botocore_parser_integration_test(
service="sqs",
action="SendMessage",
QueueUrl="string",
MessageBody="string",
DelaySeconds=123,
MessageAttributes={
"string": {
"StringValue": "string",
"BinaryValue": b"bytes",
"StringListValues": [
"string",
],
"BinaryListValues": [
b"bytes",
],
"DataType": "string",
}
},
MessageSystemAttributes={
"string": {
"StringValue": "string",
"BinaryValue": b"bytes",
"StringListValues": [
"string",
],
"BinaryListValues": [
b"bytes",
],
"DataType": "string",
}
},
MessageDeduplicationId="string",
MessageGroupId="string",
)
def test_query_parser_empty_required_members_sqs_with_botocore():
_botocore_parser_integration_test(
service="sqs",
action="SendMessageBatch",
QueueUrl="string",
Entries=[],
expected={"QueueUrl": "string", "Entries": None},
)
def test_query_parser_cloudformation_with_botocore():
_botocore_parser_integration_test(
service="cloudformation",
action="CreateStack",
StackName="string",
TemplateBody="string",
TemplateURL="string",
Parameters=[
{
"ParameterKey": "string",
"ParameterValue": "string",
"UsePreviousValue": True,
"ResolvedValue": "string",
},
],
DisableRollback=False,
RollbackConfiguration={
"RollbackTriggers": [
{"Arn": "string", "Type": "string"},
],
"MonitoringTimeInMinutes": 123,
},
TimeoutInMinutes=123,
NotificationARNs=[
"string",
],
Capabilities=[
"CAPABILITY_IAM",
],
ResourceTypes=[
"string",
],
RoleARN="12345678901234567890",
OnFailure="DO_NOTHING",
StackPolicyBody="string",
StackPolicyURL="string",
Tags=[
{"Key": "string", "Value": "string"},
],
ClientRequestToken="string",
EnableTerminationProtection=False,
)
def test_restxml_parser_route53_with_botocore():
_botocore_parser_integration_test(
service="route53",
action="CreateHostedZone",
Name="string",
VPC={"VPCRegion": "us-east-1", "VPCId": "string"},
CallerReference="string",
HostedZoneConfig={"Comment": "string", "PrivateZone": True},
DelegationSetId="string",
)
def test_json_parser_cognito_with_botocore():
_botocore_parser_integration_test(
service="cognito-idp",
action="CreateUserPool",
headers={"X-Amz-Target": "AWSCognitoIdentityProviderService.CreateUserPool"},
PoolName="string",
Policies={
"PasswordPolicy": {
"MinimumLength": 123,
"RequireUppercase": True,
"RequireLowercase": True,
"RequireNumbers": True,
"RequireSymbols": True,
"TemporaryPasswordValidityDays": 123,
}
},
LambdaConfig={
"PreSignUp": "12345678901234567890",
"CustomMessage": "12345678901234567890",
"PostConfirmation": "12345678901234567890",
"PreAuthentication": "12345678901234567890",
"PostAuthentication": "12345678901234567890",
"DefineAuthChallenge": "12345678901234567890",
"CreateAuthChallenge": "12345678901234567890",
"VerifyAuthChallengeResponse": "12345678901234567890",
"PreTokenGeneration": "12345678901234567890",
"UserMigration": "12345678901234567890",
"CustomSMSSender": {"LambdaVersion": "V1_0", "LambdaArn": "12345678901234567890"},
"CustomEmailSender": {"LambdaVersion": "V1_0", "LambdaArn": "12345678901234567890"},
"KMSKeyID": "12345678901234567890",
},
AutoVerifiedAttributes=[
"phone_number",
],
AliasAttributes=[
"phone_number",
],
UsernameAttributes=[
"phone_number",
],
SmsVerificationMessage="string",
EmailVerificationMessage="string",
EmailVerificationSubject="string",
VerificationMessageTemplate={
"SmsMessage": "string",
"EmailMessage": "string",
"EmailSubject": "string",
"EmailMessageByLink": "string",
"EmailSubjectByLink": "string",
"DefaultEmailOption": "CONFIRM_WITH_LINK",
},
SmsAuthenticationMessage="string",
MfaConfiguration="OFF",
DeviceConfiguration={
"ChallengeRequiredOnNewDevice": True,
"DeviceOnlyRememberedOnUserPrompt": True,
},
EmailConfiguration={
"SourceArn": "12345678901234567890",
"ReplyToEmailAddress": "string",
"EmailSendingAccount": "COGNITO_DEFAULT",
"From": "string",
"ConfigurationSet": "string",
},
SmsConfiguration={"SnsCallerArn": "12345678901234567890", "ExternalId": "string"},
UserPoolTags={"string": "string"},
AdminCreateUserConfig={
"AllowAdminCreateUserOnly": True,
"UnusedAccountValidityDays": 123,
"InviteMessageTemplate": {
"SMSMessage": "string",
"EmailMessage": "string",
"EmailSubject": "string",
},
},
Schema=[
{
"Name": "string",
"AttributeDataType": "String",
"DeveloperOnlyAttribute": True,
"Mutable": True,
"Required": True,
"NumberAttributeConstraints": {"MinValue": "string", "MaxValue": "string"},
"StringAttributeConstraints": {"MinLength": "string", "MaxLength": "string"},
},
],
UserPoolAddOns={"AdvancedSecurityMode": "OFF"},
UsernameConfiguration={"CaseSensitive": True},
AccountRecoverySetting={
"RecoveryMechanisms": [
{"Priority": 123, "Name": "verified_email"},
]
},
)
def test_restjson_parser_xray_with_botocore():
_botocore_parser_integration_test(
service="xray",
action="PutTelemetryRecords",
TelemetryRecords=[
{
"Timestamp": datetime(2015, 1, 1),
"SegmentsReceivedCount": 123,
"SegmentsSentCount": 123,
"SegmentsSpilloverCount": 123,
"SegmentsRejectedCount": 123,
"BackendConnectionErrors": {
"TimeoutCount": 123,
"ConnectionRefusedCount": 123,
"HTTPCode4XXCount": 123,
"HTTPCode5XXCount": 123,
"UnknownHostCount": 123,
"OtherCount": 123,
},
},
],
EC2InstanceId="string",
Hostname="string",
ResourceARN="string",
)
def test_restjson_path_location_opensearch_with_botocore():
_botocore_parser_integration_test(
service="opensearch",
action="DeleteDomain",
DomainName="test-domain",
)
def test_restjson_query_location_opensearch_with_botocore():
_botocore_parser_integration_test(
service="opensearch",
action="ListVersions",
NextToken="test-token",
)
def test_restjson_opensearch_with_botocore():
_botocore_parser_integration_test(
service="opensearch",
action="UpdateDomainConfig",
DomainName="string",
ClusterConfig={
"InstanceType": "m3.medium.search",
"InstanceCount": 123,
"DedicatedMasterEnabled": True,
"ZoneAwarenessEnabled": True,
"ZoneAwarenessConfig": {"AvailabilityZoneCount": 123},
"DedicatedMasterType": "m3.medium.search",
"DedicatedMasterCount": 123,
"WarmEnabled": True,
"WarmType": "ultrawarm1.medium.search",
"WarmCount": 123,
"ColdStorageOptions": {"Enabled": True},
},
EBSOptions={"EBSEnabled": False, "VolumeType": "standard", "VolumeSize": 123, "Iops": 123},
SnapshotOptions={"AutomatedSnapshotStartHour": 123},
VPCOptions={
"SubnetIds": [
"string",
],
"SecurityGroupIds": [
"string",
],
},
CognitoOptions={
"Enabled": True,
"UserPoolId": "string",
"IdentityPoolId": "string",
"RoleArn": "12345678901234567890",
},
AdvancedOptions={"string": "string"},
AccessPolicies="string",
LogPublishingOptions={
"string": {"CloudWatchLogsLogGroupArn": "12345678901234567890", "Enabled": True}
},
EncryptionAtRestOptions={"Enabled": False, "KmsKeyId": "string"},
DomainEndpointOptions={
"EnforceHTTPS": True,
"TLSSecurityPolicy": "Policy-Min-TLS-1-0-2019-07",
"CustomEndpointEnabled": True,
"CustomEndpoint": "string",
"CustomEndpointCertificateArn": "12345678901234567890",
},
NodeToNodeEncryptionOptions={"Enabled": True},
AdvancedSecurityOptions={
"Enabled": True,
"InternalUserDatabaseEnabled": True,
"MasterUserOptions": {
"MasterUserARN": "12345678901234567890",
"MasterUserName": "string",
"MasterUserPassword": "12345678",
},
"SAMLOptions": {
"Enabled": True,
"Idp": {"MetadataContent": "string", "EntityId": "12345678"},
"MasterUserName": "string",
"MasterBackendRole": "string",
"SubjectKey": "string",
"RolesKey": "string",
"SessionTimeoutMinutes": 123,
},
},
AutoTuneOptions={
"DesiredState": "ENABLED",
"RollbackOnDisable": "DEFAULT_ROLLBACK",
"MaintenanceSchedules": [
{
"StartAt": datetime(2015, 1, 1),
"Duration": {"Value": 123, "Unit": "HOURS"},
"CronExpressionForRecurrence": "string",
},
],
},
)
def test_restjson_awslambda_invoke_with_botocore():
_botocore_parser_integration_test(
service="lambda",
action="Invoke",
headers={},
expected={"FunctionName": "test-function", "Payload": ""},
FunctionName="test-function",
)
def test_ec2_parser_ec2_with_botocore():
_botocore_parser_integration_test(
service="ec2",
action="CreateImage",
BlockDeviceMappings=[
{
"DeviceName": "string",
"VirtualName": "string",
"Ebs": {
"DeleteOnTermination": True,
"Iops": 123,
"SnapshotId": "string",
"VolumeSize": 123,
"VolumeType": "standard",
"KmsKeyId": "string",
"Throughput": 123,
"OutpostArn": "string",
"Encrypted": True,
},
"NoDevice": "string",
},
],
Description="string",
DryRun=True | False,
InstanceId="string",
Name="string",
NoReboot=True | False,
TagSpecifications=[
{
"ResourceType": "capacity-reservation",
"Tags": [
{"Key": "string", "Value": "string"},
],
},
],
)
def test_query_parser_path_params_with_slashes():
parser = RestJSONRequestParser(load_service("qldb"))
resource_arn = "arn:aws:qldb:eu-central-1:000000000000:ledger/c-c67c827a"
request = HttpRequest(
body=b"",
method="GET",
headers={},
path=f"/tags/{resource_arn}",
)
operation, params = parser.parse(request)
assert operation.name == "ListTagsForResource"
assert params == {"ResourceArn": resource_arn}
# TODO Add additional tests (or even automate the creation)
# - Go to the Boto3 Docs (https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/index.html)
# - Look for boto3 request syntax definition for services that use the protocol you want to test
# - Take request syntax, remove the "or" ("|") and call the helper function with these named params
| 1 | 14,296 | I think botocore's serializer should already create the correct headers. However, they are currently not used in `_botocore_parser_integration_test`(line #217). Maybe we could remove the headers here and just use a fallback in the `_botocore_parser_integration_test` (i.e. use the given headers if they are set, otherwise use the headers generated by botocore's serializer)? | localstack-localstack | py |
@@ -83,7 +83,7 @@ export default AbstractEditController.extend(ChargeActions, PatientSubmodule, {
closeModalOnConfirm: false,
confirmAction: 'deleteCharge',
title: this.get('i18n').t('procedures.titles.deleteMedicationUsed'),
- message: this.get('i18n').t('procedures.messages.deleteMedication'),
+ message: this.get('i18n').t('messages.delete_singular', { name: 'medication' }),
chargeToDelete: charge,
updateButtonAction: 'confirm',
updateButtonText: this.get('i18n').t('buttons.ok') | 1 | import AbstractEditController from 'hospitalrun/controllers/abstract-edit-controller';
import ChargeActions from 'hospitalrun/mixins/charge-actions';
import Ember from 'ember';
import PatientSubmodule from 'hospitalrun/mixins/patient-submodule';
export default AbstractEditController.extend(ChargeActions, PatientSubmodule, {
visitsController: Ember.inject.controller('visits'),
chargePricingCategory: 'Procedure',
chargeRoute: 'procedures.charge',
anesthesiaTypes: Ember.computed.alias('visitsController.anesthesiaTypes'),
anesthesiologistList: Ember.computed.alias('visitsController.anesthesiologistList'),
cptCodeList: Ember.computed.alias('visitsController.cptCodeList'),
medicationList: null,
physicianList: Ember.computed.alias('visitsController.physicianList'),
procedureList: Ember.computed.alias('visitsController.procedureList'),
procedureLocations: Ember.computed.alias('visitsController.procedureLocations'),
lookupListsToUpdate: [{
name: 'anesthesiaTypes',
property: 'model.anesthesiaType',
id: 'anesthesia_types'
}, {
name: 'anesthesiologistList',
property: 'model.anesthesiologist',
id: 'anesthesiologists'
}, {
name: 'cptCodeList',
property: 'model.cptCode',
id: 'cpt_code_list'
}, {
name: 'physicianList',
property: 'model.assistant',
id: 'physician_list'
}, {
name: 'physicianList',
property: 'model.physician',
id: 'physician_list'
}, {
name: 'procedureList',
property: 'model.description',
id: 'procedure_list'
}, {
name: 'procedureLocations',
property: 'model.location',
id: 'procedure_locations'
}],
editController: Ember.inject.controller('visits/edit'),
pricingList: null, // This gets filled in by the route
pricingTypes: Ember.computed.alias('visitsController.procedurePricingTypes'),
newProcedure: false,
title: function() {
let isNew = this.get('model.isNew');
if (isNew) {
return this.get('i18n').t('procedures.titles.add');
}
return this.get('i18n').t('procedures.titles.edit');
}.property('model.isNew'),
updateCapability: 'add_procedure',
actions: {
showAddMedication() {
let newCharge = this.get('store').createRecord('proc-charge', {
dateCharged: new Date(),
newMedicationCharge: true,
quantity: 1
});
this.send('openModal', 'procedures.medication', newCharge);
},
showEditMedication(charge) {
let medicationList = this.get('medicationList');
let selectedMedication = medicationList.findBy('id', charge.get('medication.id'));
charge.set('itemName', selectedMedication.name);
this.send('openModal', 'procedures.medication', charge);
},
showDeleteMedication(charge) {
this.send('openModal', 'dialog', Ember.Object.create({
closeModalOnConfirm: false,
confirmAction: 'deleteCharge',
title: this.get('i18n').t('procedures.titles.deleteMedicationUsed'),
message: this.get('i18n').t('procedures.messages.deleteMedication'),
chargeToDelete: charge,
updateButtonAction: 'confirm',
updateButtonText: this.get('i18n').t('buttons.ok')
}));
}
},
beforeUpdate() {
return new Ember.RSVP.Promise(function(resolve, reject) {
this.updateCharges().then(function() {
if (this.get('model.isNew')) {
this.addChildToVisit(this.get('model'), 'procedures').then(resolve, reject);
} else {
resolve();
}
}.bind(this), reject);
}.bind(this));
},
afterUpdate() {
let alertTitle = this.get('i18n').t('procedures.titles.saved');
let alertMessage = this.get('i18n').t('procedures.messages.saved');
this.saveVisitIfNeeded(alertTitle, alertMessage);
}
});
| 1 | 13,485 | This code is passing a non localized string when it should be passing in a localized string or it should use the name of the item being deleted. | HospitalRun-hospitalrun-frontend | js |
@@ -8,12 +8,12 @@ export default class Provider {
constructor (opts) {
this.opts = opts
this.provider = opts.provider
- this.id = this.provider
+ this.id = opts.id || this.provider
this.name = this.opts.name || _getName(this.id)
}
auth () {
- return fetch(`${this.opts.host}/${this.provider}/authorize`, {
+ return fetch(`${this.opts.host}/${this.id}/auth`, {
method: 'get',
credentials: 'include',
headers: { | 1 | 'use strict'
const _getName = (id) => {
return id.split('-').map((s) => s.charAt(0).toUpperCase() + s.slice(1)).join(' ')
}
export default class Provider {
constructor (opts) {
this.opts = opts
this.provider = opts.provider
this.id = this.provider
this.name = this.opts.name || _getName(this.id)
}
auth () {
return fetch(`${this.opts.host}/${this.provider}/authorize`, {
method: 'get',
credentials: 'include',
headers: {
'Accept': 'application/json',
'Content-Type': 'application.json'
}
})
.then((res) => {
return res.json()
.then((payload) => {
return payload.isAuthenticated
})
})
}
list (directory = 'root') {
return fetch(`${this.opts.host}/${this.provider}/list/${directory}`, {
method: 'get',
credentials: 'include',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
})
.then((res) => res.json())
}
logout (redirect = location.href) {
return fetch(`${this.opts.host}/${this.provider}/logout?redirect=${redirect}`, {
method: 'get',
credentials: 'include',
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
})
}
}
| 1 | 9,387 | in case you want to rename one of the plugins when instantiating, from options? | transloadit-uppy | js |
@@ -22,6 +22,7 @@ from libcodechecker.logger import add_verbose_arguments
from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('ANALYZE')
+CTU_FUNC_MAP_CMD = 'clang-func-mapping'
class OrderedCheckersAction(argparse.Action): | 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Execute analysis over an already existing build.json compilation database.
"""
import argparse
import json
import os
import shutil
import sys
from libcodechecker import generic_package_context
from libcodechecker import util
from libcodechecker.analyze import log_parser
from libcodechecker.analyze import analyzer
from libcodechecker.analyze.analyzers import analyzer_types
from libcodechecker.logger import add_verbose_arguments
from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('ANALYZE')
class OrderedCheckersAction(argparse.Action):
"""
Action to store enabled and disabled checkers
and keep ordering from command line.
Create separate lists based on the checker names for
each analyzer.
"""
# Users can supply invocation to 'codechecker-analyze' as follows:
# -e core -d core.uninitialized -e core.uninitialized.Assign
# We must support having multiple '-e' and '-d' options and the order
# specified must be kept when the list of checkers are assembled for Clang.
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(OrderedCheckersAction, self).__init__(option_strings, dest,
**kwargs)
def __call__(self, parser, namespace, value, option_string=None):
if 'ordered_checkers' not in namespace:
namespace.ordered_checkers = []
ordered_checkers = namespace.ordered_checkers
ordered_checkers.append((value, self.dest == 'enable'))
namespace.ordered_checkers = ordered_checkers
def get_argparser_ctor_args():
"""
This method returns a dict containing the kwargs for constructing an
argparse.ArgumentParser (either directly or as a subparser).
"""
return {
'prog': 'CodeChecker analyze',
'formatter_class': argparse.ArgumentDefaultsHelpFormatter,
# Description is shown when the command's help is queried directly
'description': "Use the previously created JSON Compilation Database "
"to perform an analysis on the project, outputting "
"analysis results in a machine-readable format.",
# Epilogue is shown after the arguments when the help is queried
# directly.
'epilog': "Compilation databases can be created by instrumenting your "
"project's build via 'codechecker-log'. To transform the "
"results of the analysis to a human-friendly format, please "
"see the commands 'codechecker-parse' or "
"'codechecker-store'.",
# Help is shown when the "parent" CodeChecker command lists the
# individual subcommands.
'help': "Execute the supported code analyzers for the files "
"recorded in a JSON Compilation Database."
}
def add_arguments_to_parser(parser):
"""
Add the subcommand's arguments to the given argparse.ArgumentParser.
"""
parser.add_argument('logfile',
type=str,
nargs='+',
help="Path to the JSON compilation command database "
"files which were created during the build. "
"The analyzers will check only the files "
"registered in these build databases.")
parser.add_argument('-j', '--jobs',
type=int,
dest="jobs",
required=False,
default=1,
help="Number of threads to use in analysis. More "
"threads mean faster analysis at the cost of "
"using more memory.")
parser.add_argument('-i', '--ignore', '--skip',
dest="skipfile",
required=False,
default=argparse.SUPPRESS,
help="Path to the Skipfile dictating which project "
"files should be omitted from analysis. Please "
"consult the User guide on how a Skipfile "
"should be laid out.")
parser.add_argument('-o', '--output',
dest="output_path",
required=False,
default=os.path.join(util.get_default_workspace(),
'reports'),
help="Store the analysis output in the given folder.")
parser.add_argument('-t', '--type', '--output-format',
dest="output_format",
required=False,
choices=['plist'],
default='plist',
help="Specify the format the analysis results should "
"use.")
parser.add_argument('-n', '--name',
dest="name",
required=False,
default=argparse.SUPPRESS,
help="Annotate the ran analysis with a custom name in "
"the created metadata file.")
analyzer_opts = parser.add_argument_group("analyzer arguments")
analyzer_opts.add_argument('--analyzers',
nargs='+',
dest='analyzers',
metavar='ANALYZER',
required=False,
choices=analyzer_types.supported_analyzers,
default=argparse.SUPPRESS,
help="Run analysis only with the analyzers "
"specified. Currently supported analyzers "
"are: " +
', '.join(analyzer_types.
supported_analyzers) + ".")
analyzer_opts.add_argument('--add-compiler-defaults',
action='store_true',
default=False,
required=False,
help="Retrieve compiler-specific configuration "
"from the compilers themselves, and use "
"them with Clang. This is used when the "
"compiler on the system is special, e.g. "
"when doing cross-compilation.")
analyzer_opts.add_argument('--saargs',
dest="clangsa_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for the Clang Static "
"Analyzer.")
analyzer_opts.add_argument('--tidyargs',
dest="tidy_args_cfg_file",
required=False,
default=argparse.SUPPRESS,
help="File containing argument which will be "
"forwarded verbatim for Clang-Tidy.")
checkers_opts = parser.add_argument_group(
"checker configuration",
"See 'codechecker-checkers' for the list of available checkers. "
"You can fine-tune which checkers to use in the analysis by setting "
"the enabled and disabled flags starting from the bigger groups "
"and going inwards, e.g. '-e core -d core.uninitialized -e "
"core.uninitialized.Assign' will enable every 'core' checker, but "
"only 'core.uninitialized.Assign' from the 'core.uninitialized' "
"group. Please consult the manual for details. Disabling certain "
"checkers - such as the 'core' group - is unsupported by the LLVM/"
"Clang community, and thus discouraged.")
checkers_opts.add_argument('-e', '--enable',
dest="enable",
metavar='checker/checker-group',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group) "
"to BE USED in the analysis.")
checkers_opts.add_argument('-d', '--disable',
dest="disable",
metavar='checker/checker-group',
default=argparse.SUPPRESS,
action=OrderedCheckersAction,
help="Set a checker (or checker group) "
"to BE PROHIBITED from use in the "
"analysis.")
checkers_opts.add_argument('--enable-all',
dest="enable_all",
action='store_true',
required=False,
default=argparse.SUPPRESS,
help="Force the running analyzers to use "
"almost every checker available. The "
"checker groups 'alpha.', 'debug.' and "
"'osx.' (on Linux) are NOT enabled "
"automatically and must be EXPLICITLY "
"specified. WARNING! Enabling all "
"checkers might result in the analysis "
"losing precision and stability, and "
"could even result in a total failure of "
"the analysis. USE WISELY AND AT YOUR "
"OWN RISK!")
add_verbose_arguments(parser)
parser.set_defaults(func=main)
def main(args):
"""
Perform analysis on the given logfiles and store the results in a machine-
readable format.
"""
context = generic_package_context.get_context()
# Parse the JSON CCDBs and retrieve the compile commands.
actions = []
if len(args.logfile) != 1:
LOG.warning("Only one log file can be processed right now!")
sys.exit(1)
for log_file in args.logfile:
if not os.path.exists(log_file):
LOG.error("The specified logfile '" + log_file + "' does not "
"exist!")
continue
actions += log_parser.parse_log(log_file,
args.add_compiler_defaults)
if len(actions) == 0:
LOG.info("None of the specified build log files contained "
"valid compilation commands. No analysis needed...")
return
if 'enable_all' in args:
LOG.info("'--enable-all' was supplied for this analysis.")
# Run the analysis.
args.output_path = os.path.abspath(args.output_path)
if os.path.isdir(args.output_path):
LOG.info("Previous analysis results in '{0}' have been "
"removed, overwriting with current result".
format(args.output_path))
shutil.rmtree(args.output_path)
os.makedirs(args.output_path)
LOG.debug("Output will be stored to: '" + args.output_path + "'")
metadata = {'action_num': len(actions),
'command': sys.argv,
'versions': {
'codechecker': "{0} ({1})".format(
context.package_git_tag,
context.package_git_hash)},
'working_directory': os.getcwd(),
'output_path': args.output_path}
if 'name' in args:
metadata['name'] = args.name
if 'skipfile' in args:
# Skip information needs to be saved because reports in a header
# can only be skipped by the report-server used in 'store' later
# on if this information is persisted.
with open(args.skipfile, 'r') as skipfile:
metadata['skip_data'] = [l.strip() for l in skipfile.readlines()]
analyzer.perform_analysis(args, context, actions, metadata)
metadata_path = os.path.join(args.output_path, 'metadata.json')
LOG.debug("Analysis metadata write to '" + metadata_path + "'")
with open(metadata_path, 'w') as metafile:
json.dump(metadata, metafile)
# WARN: store command will search for this file!!!!
compile_cmd_json = os.path.join(args.output_path, 'compile_cmd.json')
try:
source = os.path.abspath(args.logfile[0])
target = os.path.abspath(compile_cmd_json)
shutil.copyfile(source, target)
except shutil.Error as serr:
LOG.debug("Compile command json file is the same")
except Exception as ex:
LOG.debug("Copying compile command json file failed.")
LOG.info("Analysis finished.")
LOG.info("To view results in the terminal use the "
"\"CodeChecker parse\" command.")
LOG.info("To store results use the \"CodeChecker store\" command.")
LOG.info("See --help and the user guide for further options about"
" parsing and storing the reports.")
LOG.info("----=================----")
| 1 | 7,099 | This default variable should be moved into a config variable, created by `package_context`, and read from `config\package_layout.json`. | Ericsson-codechecker | c |
@@ -202,4 +202,16 @@ describe('Walkontable.ViewportColumnsCalculator', () => {
expect(calc.stretchAllColumnsWidth.length).toBe(0);
expect(calc.needVerifyLastColumnWidth).toBe(true);
});
+
+ it('should calculate the number of columns based on a default width, ' +
+ 'when the width returned from the function is not a number', () => {
+ const calc = new Walkontable.ViewportColumnsCalculator(200, 0, 1000, () => (void 0 + 1));
+ expect(calc.startColumn).toBe(0);
+ expect(calc.startPosition).toBe(0);
+ expect(calc.endColumn).toBe(3);
+
+ const visibleCalc = new Walkontable.ViewportColumnsCalculator(200, 0, 1000, () => (void 0 + 1), null, true);
+ expect(visibleCalc.startColumn).toBe(0);
+ expect(visibleCalc.endColumn).toBe(3);
+ });
}); | 1 | describe('Walkontable.ViewportColumnsCalculator', () => {
function allColumns20() {
return 20;
}
it('should render first 5 columns in unscrolled container', () => {
const calc = new Walkontable.ViewportColumnsCalculator(100, 0, 1000, allColumns20);
expect(calc.startColumn).toBe(0);
expect(calc.startPosition).toBe(0);
expect(calc.endColumn).toBe(4);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(100, 0, 1000, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(0);
expect(visibleCalc.endColumn).toBe(4);
});
it('should render 6 columns, starting from 3 in container scrolled to half of fourth column', () => {
const calc = new Walkontable.ViewportColumnsCalculator(100, 70, 1000, allColumns20);
expect(calc.startColumn).toBe(3);
expect(calc.startPosition).toBe(60);
expect(calc.endColumn).toBe(8);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(100, 70, 1000, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(4);
expect(visibleCalc.endColumn).toBe(7);
});
it('should render 10 columns, starting from 1 in container scrolled to half of fourth column (with render overrides)', () => {
const overrideFn = function(calc) {
calc.startColumn -= 2;
calc.endColumn += 2;
};
const calc = new Walkontable.ViewportColumnsCalculator(100, 70, 1000, allColumns20, overrideFn);
expect(calc.startColumn).toBe(1);
expect(calc.startPosition).toBe(20);
expect(calc.endColumn).toBe(10);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(100, 70, 1000, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(4);
expect(visibleCalc.endColumn).toBe(7);
});
it('should return number of rendered columns', () => {
const calc = new Walkontable.ViewportColumnsCalculator(100, 50, 1000, allColumns20);
expect(calc.count).toBe(6);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(100, 50, 1000, allColumns20, null, true);
expect(visibleCalc.count).toBe(4);
});
it('should render all columns if their size is smaller than viewport', () => {
const calc = new Walkontable.ViewportColumnsCalculator(200, 0, 8, allColumns20);
expect(calc.startColumn).toBe(0);
expect(calc.endColumn).toBe(7);
expect(calc.count).toBe(8);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(200, 0, 8, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(0);
expect(visibleCalc.endColumn).toBe(7);
expect(visibleCalc.count).toBe(8);
});
it('should render all columns if their size is exactly the viewport', () => {
const calc = new Walkontable.ViewportColumnsCalculator(200, 0, 10, allColumns20);
expect(calc.startColumn).toBe(0);
expect(calc.endColumn).toBe(9);
expect(calc.count).toBe(10);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(200, 0, 10, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(0);
expect(visibleCalc.endColumn).toBe(9);
expect(visibleCalc.count).toBe(10);
});
it('should render all columns if their size is slightly larger than viewport', () => {
const calc = new Walkontable.ViewportColumnsCalculator(199, 0, 10, allColumns20);
expect(calc.startColumn).toBe(0);
expect(calc.endColumn).toBe(9);
expect(calc.count).toBe(10);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(199, 0, 10, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(0);
expect(visibleCalc.endColumn).toBe(8);
expect(visibleCalc.count).toBe(9);
});
it('should set null values if total columns is 0', () => {
const calc = new Walkontable.ViewportColumnsCalculator(200, 0, 0, allColumns20);
expect(calc.startColumn).toBe(null);
expect(calc.startPosition).toBe(null);
expect(calc.endColumn).toBe(null);
expect(calc.count).toBe(0);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(200, 0, 0, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(null);
expect(visibleCalc.endColumn).toBe(null);
});
it('should set null values if total columns is 0 (with overrideFn provided)', () => {
const overrideFn = function(myCalc) {
myCalc.startColumn = 0;
myCalc.endColumn = 0;
};
const calc = new Walkontable.ViewportColumnsCalculator(200, 0, 0, allColumns20, overrideFn);
expect(calc.startColumn).toBe(null);
expect(calc.startPosition).toBe(null);
expect(calc.endColumn).toBe(null);
expect(calc.count).toBe(0);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(200, 0, 0, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(null);
expect(visibleCalc.endColumn).toBe(null);
});
it('should scroll backwards if total columns is reached', () => {
const calc = new Walkontable.ViewportColumnsCalculator(190, 350, 20, allColumns20);
expect(calc.startColumn).toBe(10);
expect(calc.startPosition).toBe(200);
expect(calc.endColumn).toBe(19);
expect(calc.count).toBe(10);
const visibleCalc = new Walkontable.ViewportColumnsCalculator(190, 350, 20, allColumns20, null, true);
expect(visibleCalc.startColumn).toBe(11);
expect(visibleCalc.endColumn).toBe(19);
});
it('should update stretchAllRatio after refreshStretching call (stretch: all)', () => {
const calc = new Walkontable.ViewportColumnsCalculator(250, 0, 20, allColumns20, null, true, 'all');
expect(calc.stretchAllRatio).toBe(0);
expect(calc.stretchLastWidth).toBe(0);
calc.refreshStretching(414);
expect(calc.stretchAllRatio).toBe(1.035);
expect(calc.stretchLastWidth).toBe(0);
});
it('should update stretchAllRatio after refreshStretching call (stretch: last)', () => {
const calc = new Walkontable.ViewportColumnsCalculator(250, 0, 5, allColumns20, null, true, 'last');
expect(calc.stretchAllRatio).toBe(0);
expect(calc.stretchLastWidth).toBe(0);
calc.refreshStretching(414);
expect(calc.stretchAllRatio).toBe(0);
expect(calc.stretchLastWidth).toBe(334);
});
it('should return valid stretched column width (stretch: all)', () => {
const calc = new Walkontable.ViewportColumnsCalculator(250, 0, 5, allColumns20, null, true, 'all');
expect(calc.getStretchedColumnWidth(0, 50)).toBe(null);
expect(calc.needVerifyLastColumnWidth).toBe(true);
calc.refreshStretching(417);
expect(calc.getStretchedColumnWidth(0, allColumns20())).toBe(83);
expect(calc.getStretchedColumnWidth(1, allColumns20())).toBe(83);
expect(calc.getStretchedColumnWidth(2, allColumns20())).toBe(83);
expect(calc.getStretchedColumnWidth(3, allColumns20())).toBe(83);
expect(calc.needVerifyLastColumnWidth).toBe(true);
expect(calc.getStretchedColumnWidth(4, allColumns20())).toBe(85);
expect(calc.needVerifyLastColumnWidth).toBe(false);
});
it('should return valid stretched column width (stretch: last)', () => {
const calc = new Walkontable.ViewportColumnsCalculator(250, 0, 5, allColumns20, null, true, 'last');
expect(calc.getStretchedColumnWidth(0, 50)).toBe(null);
calc.refreshStretching(417);
expect(calc.getStretchedColumnWidth(0, allColumns20())).toBe(null);
expect(calc.getStretchedColumnWidth(1, allColumns20())).toBe(null);
expect(calc.getStretchedColumnWidth(2, allColumns20())).toBe(null);
expect(calc.getStretchedColumnWidth(3, allColumns20())).toBe(null);
expect(calc.getStretchedColumnWidth(4, allColumns20())).toBe(337);
});
it('call refreshStretching should clear stretchAllColumnsWidth and needVerifyLastColumnWidth property', () => {
const calc = new Walkontable.ViewportColumnsCalculator(250, 0, 5, allColumns20, null, true, 'all');
expect(calc.stretchAllColumnsWidth.length).toBe(0);
expect(calc.needVerifyLastColumnWidth).toBe(true);
calc.refreshStretching(417);
calc.getStretchedColumnWidth(0, allColumns20());
calc.getStretchedColumnWidth(1, allColumns20());
calc.getStretchedColumnWidth(2, allColumns20());
calc.getStretchedColumnWidth(3, allColumns20());
calc.getStretchedColumnWidth(4, allColumns20());
expect(calc.stretchAllColumnsWidth.length).toBe(5);
expect(calc.needVerifyLastColumnWidth).toBe(false);
calc.refreshStretching(201);
expect(calc.stretchAllColumnsWidth.length).toBe(0);
expect(calc.needVerifyLastColumnWidth).toBe(true);
});
});
| 1 | 14,921 | Can I ask you to add a new line after `const` assignment? I believe that this increases the code readability by encapsulating assignment and logic (expecting) blocks. | handsontable-handsontable | js |
@@ -313,6 +313,7 @@ void CudaInternal::initialize(int cuda_device_id, cudaStream_t stream) {
enum { WordSize = sizeof(size_type) };
+#ifndef KOKKOS_IMPL_TURN_OFF_CUDA_HOST_INIT_CHECK
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
if (!HostSpace::execution_space::is_initialized()) {
#else | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
/*--------------------------------------------------------------------------*/
/* Kokkos interfaces */
#include <Kokkos_Macros.hpp>
#ifdef KOKKOS_ENABLE_CUDA
#include <Kokkos_Core.hpp>
#include <Cuda/Kokkos_Cuda_Error.hpp>
#include <Cuda/Kokkos_Cuda_BlockSize_Deduction.hpp>
#include <Cuda/Kokkos_Cuda_Instance.hpp>
#include <Cuda/Kokkos_Cuda_Locks.hpp>
#include <impl/Kokkos_Error.hpp>
#include <impl/Kokkos_Profiling_Interface.hpp>
/*--------------------------------------------------------------------------*/
/* Standard 'C' libraries */
#include <cstdlib>
/* Standard 'C++' libraries */
#include <vector>
#include <iostream>
#include <sstream>
#include <string>
#ifdef KOKKOS_IMPL_DEBUG_CUDA_SERIAL_EXECUTION
namespace Kokkos {
namespace Impl {
bool CudaInternal::kokkos_impl_cuda_use_serial_execution_v = false;
void CudaInternal::cuda_set_serial_execution(bool val) {
CudaInternal::kokkos_impl_cuda_use_serial_execution_v = val;
}
bool CudaInternal::cuda_use_serial_execution() {
return CudaInternal::kokkos_impl_cuda_use_serial_execution_v;
}
} // namespace Impl
} // namespace Kokkos
void kokkos_impl_cuda_set_serial_execution(bool val) {
Kokkos::Impl::CudaInternal::cuda_set_serial_execution(val);
}
bool kokkos_impl_cuda_use_serial_execution() {
return Kokkos::Impl::CudaInternal::cuda_use_serial_execution();
}
#endif
#ifdef KOKKOS_ENABLE_CUDA_RELOCATABLE_DEVICE_CODE
__device__ __constant__ unsigned long kokkos_impl_cuda_constant_memory_buffer
[Kokkos::Impl::CudaTraits::ConstantMemoryUsage / sizeof(unsigned long)];
#endif
/*--------------------------------------------------------------------------*/
namespace Kokkos {
namespace Impl {
namespace {
__global__ void query_cuda_kernel_arch(int *d_arch) {
#if defined(__CUDA_ARCH__)
*d_arch = __CUDA_ARCH__;
#else
*d_arch = 0;
#endif
}
/** Query what compute capability is actually launched to the device: */
int cuda_kernel_arch() {
int *d_arch = 0;
cudaMalloc((void **)&d_arch, sizeof(int));
query_cuda_kernel_arch<<<1, 1>>>(d_arch);
int arch = 0;
cudaMemcpy(&arch, d_arch, sizeof(int), cudaMemcpyDefault);
cudaFree(d_arch);
return arch;
}
#ifdef KOKKOS_ENABLE_CUDA_UVM
bool cuda_launch_blocking() {
const char *env = getenv("CUDA_LAUNCH_BLOCKING");
if (env == 0) return false;
return atoi(env);
}
#endif
} // namespace
void cuda_device_synchronize() { CUDA_SAFE_CALL(cudaDeviceSynchronize()); }
void cuda_internal_error_throw(cudaError e, const char *name, const char *file,
const int line) {
std::ostringstream out;
out << name << " error( " << cudaGetErrorName(e)
<< "): " << cudaGetErrorString(e);
if (file) {
out << " " << file << ":" << line;
}
throw_runtime_exception(out.str());
}
//----------------------------------------------------------------------------
// Some significant cuda device properties:
//
// cudaDeviceProp::name : Text label for device
// cudaDeviceProp::major : Device major number
// cudaDeviceProp::minor : Device minor number
// cudaDeviceProp::warpSize : number of threads per warp
// cudaDeviceProp::multiProcessorCount : number of multiprocessors
// cudaDeviceProp::sharedMemPerBlock : capacity of shared memory per block
// cudaDeviceProp::totalConstMem : capacity of constant memory
// cudaDeviceProp::totalGlobalMem : capacity of global memory
// cudaDeviceProp::maxGridSize[3] : maximum grid size
//
// Section 4.4.2.4 of the CUDA Toolkit Reference Manual
//
// struct cudaDeviceProp {
// char name[256];
// size_t totalGlobalMem;
// size_t sharedMemPerBlock;
// int regsPerBlock;
// int warpSize;
// size_t memPitch;
// int maxThreadsPerBlock;
// int maxThreadsDim[3];
// int maxGridSize[3];
// size_t totalConstMem;
// int major;
// int minor;
// int clockRate;
// size_t textureAlignment;
// int deviceOverlap;
// int multiProcessorCount;
// int kernelExecTimeoutEnabled;
// int integrated;
// int canMapHostMemory;
// int computeMode;
// int concurrentKernels;
// int ECCEnabled;
// int pciBusID;
// int pciDeviceID;
// int tccDriver;
// int asyncEngineCount;
// int unifiedAddressing;
// int memoryClockRate;
// int memoryBusWidth;
// int l2CacheSize;
// int maxThreadsPerMultiProcessor;
// };
namespace {
class CudaInternalDevices {
public:
enum { MAXIMUM_DEVICE_COUNT = 64 };
struct cudaDeviceProp m_cudaProp[MAXIMUM_DEVICE_COUNT];
int m_cudaDevCount;
CudaInternalDevices();
static const CudaInternalDevices &singleton();
};
CudaInternalDevices::CudaInternalDevices() {
// See 'cudaSetDeviceFlags' for host-device thread interaction
// Section 4.4.2.6 of the CUDA Toolkit Reference Manual
CUDA_SAFE_CALL(cudaGetDeviceCount(&m_cudaDevCount));
if (m_cudaDevCount > MAXIMUM_DEVICE_COUNT) {
Kokkos::abort(
"Sorry, you have more GPUs per node than we thought anybody would ever "
"have. Please report this to github.com/kokkos/kokkos.");
}
for (int i = 0; i < m_cudaDevCount; ++i) {
CUDA_SAFE_CALL(cudaGetDeviceProperties(m_cudaProp + i, i));
}
}
const CudaInternalDevices &CudaInternalDevices::singleton() {
static CudaInternalDevices self;
return self;
}
} // namespace
int CudaInternal::was_initialized = 0;
int CudaInternal::was_finalized = 0;
//----------------------------------------------------------------------------
void CudaInternal::print_configuration(std::ostream &s) const {
const CudaInternalDevices &dev_info = CudaInternalDevices::singleton();
#if defined(KOKKOS_ENABLE_CUDA)
s << "macro KOKKOS_ENABLE_CUDA : defined" << std::endl;
#endif
#if defined(CUDA_VERSION)
s << "macro CUDA_VERSION = " << CUDA_VERSION << " = version "
<< CUDA_VERSION / 1000 << "." << (CUDA_VERSION % 1000) / 10 << std::endl;
#endif
for (int i = 0; i < dev_info.m_cudaDevCount; ++i) {
s << "Kokkos::Cuda[ " << i << " ] " << dev_info.m_cudaProp[i].name
<< " capability " << dev_info.m_cudaProp[i].major << "."
<< dev_info.m_cudaProp[i].minor << ", Total Global Memory: "
<< human_memory_size(dev_info.m_cudaProp[i].totalGlobalMem)
<< ", Shared Memory per Block: "
<< human_memory_size(dev_info.m_cudaProp[i].sharedMemPerBlock);
if (m_cudaDev == i) s << " : Selected";
s << std::endl;
}
}
//----------------------------------------------------------------------------
CudaInternal::~CudaInternal() {
if (m_stream || m_scratchSpace || m_scratchFlags || m_scratchUnified ||
m_scratchConcurrentBitset) {
std::cerr << "Kokkos::Cuda ERROR: Failed to call Kokkos::Cuda::finalize()"
<< std::endl;
std::cerr.flush();
}
m_cudaDev = -1;
m_cudaArch = -1;
m_multiProcCount = 0;
m_maxWarpCount = 0;
m_maxBlock = 0;
m_maxSharedWords = 0;
m_maxConcurrency = 0;
m_scratchSpaceCount = 0;
m_scratchFlagsCount = 0;
m_scratchUnifiedCount = 0;
m_scratchUnifiedSupported = 0;
m_streamCount = 0;
m_scratchSpace = 0;
m_scratchFlags = 0;
m_scratchUnified = 0;
m_scratchConcurrentBitset = 0;
m_stream = 0;
}
int CudaInternal::verify_is_initialized(const char *const label) const {
if (m_cudaDev < 0) {
std::cerr << "Kokkos::Cuda::" << label << " : ERROR device not initialized"
<< std::endl;
}
return 0 <= m_cudaDev;
}
CudaInternal &CudaInternal::singleton() {
static CudaInternal self;
return self;
}
void CudaInternal::fence() const { cudaStreamSynchronize(m_stream); }
void CudaInternal::initialize(int cuda_device_id, cudaStream_t stream) {
if (was_finalized)
Kokkos::abort("Calling Cuda::initialize after Cuda::finalize is illegal\n");
was_initialized = 1;
if (is_initialized()) return;
enum { WordSize = sizeof(size_type) };
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
if (!HostSpace::execution_space::is_initialized()) {
#else
if (!HostSpace::execution_space::impl_is_initialized()) {
#endif
const std::string msg(
"Cuda::initialize ERROR : HostSpace::execution_space is not "
"initialized");
throw_runtime_exception(msg);
}
const CudaInternalDevices &dev_info = CudaInternalDevices::singleton();
const bool ok_init = 0 == m_scratchSpace || 0 == m_scratchFlags;
const bool ok_id =
0 <= cuda_device_id && cuda_device_id < dev_info.m_cudaDevCount;
// Need device capability 3.0 or better
const bool ok_dev =
ok_id && (3 <= dev_info.m_cudaProp[cuda_device_id].major &&
0 <= dev_info.m_cudaProp[cuda_device_id].minor);
if (ok_init && ok_dev) {
const struct cudaDeviceProp &cudaProp = dev_info.m_cudaProp[cuda_device_id];
m_cudaDev = cuda_device_id;
CUDA_SAFE_CALL(cudaSetDevice(m_cudaDev));
Kokkos::Impl::cuda_device_synchronize();
// Query what compute capability architecture a kernel executes:
m_cudaArch = cuda_kernel_arch();
int compiled_major = m_cudaArch / 100;
int compiled_minor = (m_cudaArch % 100) / 10;
if (compiled_major < 5 && cudaProp.major >= 5) {
std::stringstream ss;
ss << "Kokkos::Cuda::initialize ERROR: running kernels compiled for "
"compute capability "
<< compiled_major << "." << compiled_minor
<< " (< 5.0) on device with compute capability " << cudaProp.major
<< "." << cudaProp.minor
<< " (>=5.0), this would give incorrect results!" << std::endl;
std::string msg = ss.str();
Kokkos::abort(msg.c_str());
}
if (Kokkos::show_warnings() && (compiled_major != cudaProp.major ||
compiled_minor != cudaProp.minor)) {
std::cerr << "Kokkos::Cuda::initialize WARNING: running kernels compiled "
"for compute capability "
<< compiled_major << "." << compiled_minor
<< " on device with compute capability " << cudaProp.major
<< "." << cudaProp.minor
<< " , this will likely reduce potential performance."
<< std::endl;
}
// number of multiprocessors
m_multiProcCount = cudaProp.multiProcessorCount;
//----------------------------------
// Maximum number of warps,
// at most one warp per thread in a warp for reduction.
m_maxWarpCount = cudaProp.maxThreadsPerBlock / Impl::CudaTraits::WarpSize;
if (Impl::CudaTraits::WarpSize < m_maxWarpCount) {
m_maxWarpCount = Impl::CudaTraits::WarpSize;
}
m_maxSharedWords = cudaProp.sharedMemPerBlock / WordSize;
//----------------------------------
// Maximum number of blocks:
m_maxBlock = cudaProp.maxGridSize[0];
m_shmemPerSM = cudaProp.sharedMemPerMultiprocessor;
m_maxShmemPerBlock = cudaProp.sharedMemPerBlock;
m_regsPerSM = cudaProp.regsPerMultiprocessor;
m_maxBlocksPerSM =
m_cudaArch < 500
? 16
: (m_cudaArch < 750 ? 32 : (m_cudaArch == 750 ? 16 : 32));
m_maxThreadsPerSM = cudaProp.maxThreadsPerMultiProcessor;
m_maxThreadsPerBlock = cudaProp.maxThreadsPerBlock;
//----------------------------------
m_scratchUnifiedSupported = cudaProp.unifiedAddressing;
if (Kokkos::show_warnings() && !m_scratchUnifiedSupported) {
std::cerr << "Kokkos::Cuda device " << cudaProp.name << " capability "
<< cudaProp.major << "." << cudaProp.minor
<< " does not support unified virtual address space"
<< std::endl;
}
//----------------------------------
// Multiblock reduction uses scratch flags for counters
// and scratch space for partial reduction values.
// Allocate some initial space. This will grow as needed.
{
const unsigned reduce_block_count =
m_maxWarpCount * Impl::CudaTraits::WarpSize;
(void)scratch_unified(16 * sizeof(size_type));
(void)scratch_flags(reduce_block_count * 2 * sizeof(size_type));
(void)scratch_space(reduce_block_count * 16 * sizeof(size_type));
}
//----------------------------------
// Concurrent bitset for obtaining unique tokens from within
// an executing kernel.
{
m_maxConcurrency = m_maxThreadsPerSM * cudaProp.multiProcessorCount;
const int32_t buffer_bound =
Kokkos::Impl::concurrent_bitset::buffer_bound(m_maxConcurrency);
// Allocate and initialize uint32_t[ buffer_bound ]
typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>
Record;
Record *const r =
Record::allocate(Kokkos::CudaSpace(), "InternalScratchBitset",
sizeof(uint32_t) * buffer_bound);
Record::increment(r);
m_scratchConcurrentBitset = reinterpret_cast<uint32_t *>(r->data());
CUDA_SAFE_CALL(cudaMemset(m_scratchConcurrentBitset, 0,
sizeof(uint32_t) * buffer_bound));
}
//----------------------------------
} else {
std::ostringstream msg;
msg << "Kokkos::Cuda::initialize(" << cuda_device_id << ") FAILED";
if (!ok_init) {
msg << " : Already initialized";
}
if (!ok_id) {
msg << " : Device identifier out of range "
<< "[0.." << dev_info.m_cudaDevCount << "]";
} else if (!ok_dev) {
msg << " : Device ";
msg << dev_info.m_cudaProp[cuda_device_id].major;
msg << ".";
msg << dev_info.m_cudaProp[cuda_device_id].minor;
msg << " has insufficient capability, required 3.0 or better";
}
Kokkos::Impl::throw_runtime_exception(msg.str());
}
#ifdef KOKKOS_ENABLE_CUDA_UVM
if (Kokkos::show_warnings() && !cuda_launch_blocking()) {
std::cerr << "Kokkos::Cuda::initialize WARNING: Cuda is allocating into "
"UVMSpace by default"
<< std::endl;
std::cerr << " without setting "
"CUDA_LAUNCH_BLOCKING=1."
<< std::endl;
std::cerr << " The code must call "
"Cuda().fence() after each kernel"
<< std::endl;
std::cerr << " or will likely crash when "
"accessing data on the host."
<< std::endl;
}
const char *env_force_device_alloc =
getenv("CUDA_MANAGED_FORCE_DEVICE_ALLOC");
bool force_device_alloc;
if (env_force_device_alloc == 0)
force_device_alloc = false;
else
force_device_alloc = atoi(env_force_device_alloc) != 0;
const char *env_visible_devices = getenv("CUDA_VISIBLE_DEVICES");
bool visible_devices_one = true;
if (env_visible_devices == 0) visible_devices_one = false;
if (Kokkos::show_warnings() &&
(!visible_devices_one && !force_device_alloc)) {
std::cerr << "Kokkos::Cuda::initialize WARNING: Cuda is allocating into "
"UVMSpace by default"
<< std::endl;
std::cerr << " without setting "
"CUDA_MANAGED_FORCE_DEVICE_ALLOC=1 or "
<< std::endl;
std::cerr
<< " setting CUDA_VISIBLE_DEVICES."
<< std::endl;
std::cerr << " This could on multi GPU "
"systems lead to severe performance"
<< std::endl;
std::cerr << " penalties." << std::endl;
}
#endif
#ifdef KOKKOS_ENABLE_PRE_CUDA_10_DEPRECATION_API
cudaThreadSetCacheConfig(cudaFuncCachePreferShared);
#else
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
#endif
// Init the array for used for arbitrarily sized atomics
if (stream == 0) Impl::initialize_host_cuda_lock_arrays();
m_stream = stream;
}
//----------------------------------------------------------------------------
typedef Cuda::size_type ScratchGrain[Impl::CudaTraits::WarpSize];
enum { sizeScratchGrain = sizeof(ScratchGrain) };
Cuda::size_type *CudaInternal::scratch_flags(const Cuda::size_type size) const {
if (verify_is_initialized("scratch_flags") &&
m_scratchFlagsCount * sizeScratchGrain < size) {
m_scratchFlagsCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>
Record;
if (m_scratchFlags) Record::decrement(Record::get_record(m_scratchFlags));
Record *const r =
Record::allocate(Kokkos::CudaSpace(), "InternalScratchFlags",
(sizeof(ScratchGrain) * m_scratchFlagsCount));
Record::increment(r);
m_scratchFlags = reinterpret_cast<size_type *>(r->data());
CUDA_SAFE_CALL(
cudaMemset(m_scratchFlags, 0, m_scratchFlagsCount * sizeScratchGrain));
}
return m_scratchFlags;
}
Cuda::size_type *CudaInternal::scratch_space(const Cuda::size_type size) const {
if (verify_is_initialized("scratch_space") &&
m_scratchSpaceCount * sizeScratchGrain < size) {
m_scratchSpaceCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>
Record;
if (m_scratchSpace) Record::decrement(Record::get_record(m_scratchSpace));
Record *const r =
Record::allocate(Kokkos::CudaSpace(), "InternalScratchSpace",
(sizeof(ScratchGrain) * m_scratchSpaceCount));
Record::increment(r);
m_scratchSpace = reinterpret_cast<size_type *>(r->data());
}
return m_scratchSpace;
}
Cuda::size_type *CudaInternal::scratch_unified(
const Cuda::size_type size) const {
if (verify_is_initialized("scratch_unified") && m_scratchUnifiedSupported &&
m_scratchUnifiedCount * sizeScratchGrain < size) {
m_scratchUnifiedCount = (size + sizeScratchGrain - 1) / sizeScratchGrain;
typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaHostPinnedSpace,
void>
Record;
if (m_scratchUnified)
Record::decrement(Record::get_record(m_scratchUnified));
Record *const r = Record::allocate(
Kokkos::CudaHostPinnedSpace(), "InternalScratchUnified",
(sizeof(ScratchGrain) * m_scratchUnifiedCount));
Record::increment(r);
m_scratchUnified = reinterpret_cast<size_type *>(r->data());
}
return m_scratchUnified;
}
Cuda::size_type *CudaInternal::scratch_functor(
const Cuda::size_type size) const {
if (verify_is_initialized("scratch_functor") && m_scratchFunctorSize < size) {
m_scratchFunctorSize = size;
typedef Kokkos::Impl::SharedAllocationRecord<Kokkos::CudaSpace, void>
Record;
if (m_scratchFunctor)
Record::decrement(Record::get_record(m_scratchFunctor));
Record *const r = Record::allocate(
Kokkos::CudaSpace(), "InternalScratchFunctor", m_scratchFunctorSize);
Record::increment(r);
m_scratchFunctor = reinterpret_cast<size_type *>(r->data());
}
return m_scratchFunctor;
}
//----------------------------------------------------------------------------
void CudaInternal::finalize() {
was_finalized = 1;
if (0 != m_scratchSpace || 0 != m_scratchFlags) {
Impl::finalize_host_cuda_lock_arrays();
if (m_stream != 0) cudaStreamDestroy(m_stream);
typedef Kokkos::Impl::SharedAllocationRecord<CudaSpace> RecordCuda;
typedef Kokkos::Impl::SharedAllocationRecord<CudaHostPinnedSpace>
RecordHost;
RecordCuda::decrement(RecordCuda::get_record(m_scratchFlags));
RecordCuda::decrement(RecordCuda::get_record(m_scratchSpace));
RecordHost::decrement(RecordHost::get_record(m_scratchUnified));
RecordCuda::decrement(RecordCuda::get_record(m_scratchConcurrentBitset));
if (m_scratchFunctorSize > 0)
RecordCuda::decrement(RecordCuda::get_record(m_scratchFunctor));
m_cudaDev = -1;
m_multiProcCount = 0;
m_maxWarpCount = 0;
m_maxBlock = 0;
m_maxSharedWords = 0;
m_scratchSpaceCount = 0;
m_scratchFlagsCount = 0;
m_scratchUnifiedCount = 0;
m_streamCount = 0;
m_scratchSpace = 0;
m_scratchFlags = 0;
m_scratchUnified = 0;
m_scratchConcurrentBitset = 0;
m_stream = 0;
}
}
//----------------------------------------------------------------------------
Cuda::size_type cuda_internal_multiprocessor_count() {
return CudaInternal::singleton().m_multiProcCount;
}
CudaSpace::size_type cuda_internal_maximum_concurrent_block_count() {
#if defined(KOKKOS_ARCH_KEPLER)
// Compute capability 3.0 through 3.7
enum : int { max_resident_blocks_per_multiprocessor = 16 };
#else
// Compute capability 5.0 through 6.2
enum : int { max_resident_blocks_per_multiprocessor = 32 };
#endif
return CudaInternal::singleton().m_multiProcCount *
max_resident_blocks_per_multiprocessor;
};
Cuda::size_type cuda_internal_maximum_warp_count() {
return CudaInternal::singleton().m_maxWarpCount;
}
Cuda::size_type cuda_internal_maximum_grid_count() {
return CudaInternal::singleton().m_maxBlock;
}
Cuda::size_type cuda_internal_maximum_shared_words() {
return CudaInternal::singleton().m_maxSharedWords;
}
Cuda::size_type *cuda_internal_scratch_space(const Cuda &instance,
const Cuda::size_type size) {
return instance.impl_internal_space_instance()->scratch_space(size);
}
Cuda::size_type *cuda_internal_scratch_flags(const Cuda &instance,
const Cuda::size_type size) {
return instance.impl_internal_space_instance()->scratch_flags(size);
}
Cuda::size_type *cuda_internal_scratch_unified(const Cuda &instance,
const Cuda::size_type size) {
return instance.impl_internal_space_instance()->scratch_unified(size);
}
} // namespace Impl
} // namespace Kokkos
//----------------------------------------------------------------------------
namespace Kokkos {
Cuda::size_type Cuda::detect_device_count() {
return Impl::CudaInternalDevices::singleton().m_cudaDevCount;
}
int Cuda::concurrency() {
return Impl::CudaInternal::singleton().m_maxConcurrency;
}
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
int Cuda::is_initialized()
#else
int Cuda::impl_is_initialized()
#endif
{
return Impl::CudaInternal::singleton().is_initialized();
}
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
void Cuda::initialize(const Cuda::SelectDevice config, size_t num_instances)
#else
void Cuda::impl_initialize(const Cuda::SelectDevice config,
size_t num_instances)
#endif
{
Impl::CudaInternal::singleton().initialize(config.cuda_device_id, 0);
#if defined(KOKKOS_ENABLE_PROFILING)
Kokkos::Profiling::initialize();
#endif
}
std::vector<unsigned> Cuda::detect_device_arch() {
const Impl::CudaInternalDevices &s = Impl::CudaInternalDevices::singleton();
std::vector<unsigned> output(s.m_cudaDevCount);
for (int i = 0; i < s.m_cudaDevCount; ++i) {
output[i] = s.m_cudaProp[i].major * 100 + s.m_cudaProp[i].minor;
}
return output;
}
Cuda::size_type Cuda::device_arch() {
const int dev_id = Impl::CudaInternal::singleton().m_cudaDev;
int dev_arch = 0;
if (0 <= dev_id) {
const struct cudaDeviceProp &cudaProp =
Impl::CudaInternalDevices::singleton().m_cudaProp[dev_id];
dev_arch = cudaProp.major * 100 + cudaProp.minor;
}
return dev_arch;
}
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
void Cuda::finalize()
#else
void Cuda::impl_finalize()
#endif
{
Impl::CudaInternal::singleton().finalize();
#if defined(KOKKOS_ENABLE_PROFILING)
Kokkos::Profiling::finalize();
#endif
}
Cuda::Cuda() : m_space_instance(&Impl::CudaInternal::singleton()) {
Impl::CudaInternal::singleton().verify_is_initialized(
"Cuda instance constructor");
}
Cuda::Cuda(cudaStream_t stream) : m_space_instance(new Impl::CudaInternal) {
Impl::CudaInternal::singleton().verify_is_initialized(
"Cuda instance constructor");
m_space_instance->initialize(Impl::CudaInternal::singleton().m_cudaDev,
stream);
}
void Cuda::print_configuration(std::ostream &s, const bool) {
Impl::CudaInternal::singleton().print_configuration(s);
}
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
bool Cuda::sleep() { return false; }
bool Cuda::wake() { return true; }
#endif
void Cuda::impl_static_fence() { Kokkos::Impl::cuda_device_synchronize(); }
#ifdef KOKKOS_ENABLE_DEPRECATED_CODE
void Cuda::fence() { impl_static_fence(); }
#else
void Cuda::fence() const { m_space_instance->fence(); }
#endif
const char *Cuda::name() { return "Cuda"; }
cudaStream_t Cuda::cuda_stream() const { return m_space_instance->m_stream; }
int Cuda::cuda_device() const { return m_space_instance->m_cudaDev; }
} // namespace Kokkos
namespace Kokkos {
namespace Experimental {
UniqueToken<Kokkos::Cuda, Kokkos::Experimental::UniqueTokenScope::Global>::
UniqueToken(Kokkos::Cuda const &)
: m_buffer(
Kokkos::Impl::CudaInternal::singleton().m_scratchConcurrentBitset),
m_count(Kokkos::Impl::CudaInternal::singleton().m_maxConcurrency) {}
} // namespace Experimental
} // namespace Kokkos
#else
void KOKKOS_CORE_SRC_CUDA_IMPL_PREVENT_LINK_ERROR() {}
#endif // KOKKOS_ENABLE_CUDA
| 1 | 22,043 | So the intention is you configure with `-CMAKE_CXX_FLAGS="-D KOKKOS_IMPL_TURN_OFF_CUDA_HOST_INIT_CHECK"`? | kokkos-kokkos | cpp |
@@ -396,7 +396,7 @@ void AssignHsResidueInfo(RWMol &mol) {
h_label = h_label.substr(3, 1) + h_label.substr(0, 3);
AtomPDBResidueInfo *newInfo = new AtomPDBResidueInfo(
h_label, max_serial, "", info->getResidueName(),
- info->getResidueNumber(), info->getChainId(), "",
+ info->getResidueNumber(), info->getChainId(), "", 1.0, 0.0,
info->getIsHeteroAtom());
mol.getAtomWithIdx(*begin)->setMonomerInfo(newInfo);
| 1 | //
// Copyright (C) 2003-2019 Greg Landrum and Rational Discovery LLC
//
// @@ All Rights Reserved @@
// This file is part of the RDKit.
// The contents are covered by the terms of the BSD license
// which is included in the file license.txt, found at the root
// of the RDKit source tree.
//
#include "RDKitBase.h"
#include <list>
#include "QueryAtom.h"
#include "QueryOps.h"
#include "MonomerInfo.h"
#include <Geometry/Transform3D.h>
#include <Geometry/point.h>
#include <boost/foreach.hpp>
#include <boost/lexical_cast.hpp>
namespace RDKit {
// Local utility functionality:
namespace {
Atom *getAtomNeighborNot(ROMol *mol, const Atom *atom, const Atom *other) {
PRECONDITION(mol, "bad molecule");
PRECONDITION(atom, "bad atom");
PRECONDITION(atom->getDegree() > 1, "bad degree");
PRECONDITION(other, "bad atom");
Atom *res = nullptr;
ROMol::ADJ_ITER nbrIdx, endNbrs;
boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(atom);
while (nbrIdx != endNbrs) {
if (*nbrIdx != other->getIdx()) {
res = mol->getAtomWithIdx(*nbrIdx);
break;
}
++nbrIdx;
}
POSTCONDITION(res, "no neighbor found");
return res;
}
void setHydrogenCoords(ROMol *mol, unsigned int hydIdx, unsigned int heavyIdx) {
// we will loop over all the coordinates
PRECONDITION(mol, "bad molecule");
PRECONDITION(heavyIdx != hydIdx, "degenerate atoms");
Atom *hydAtom = mol->getAtomWithIdx(hydIdx);
PRECONDITION(mol->getAtomDegree(hydAtom) == 1, "bad atom degree");
const Bond *bond = mol->getBondBetweenAtoms(heavyIdx, hydIdx);
PRECONDITION(bond, "no bond between atoms");
const Atom *heavyAtom = mol->getAtomWithIdx(heavyIdx);
double bondLength =
PeriodicTable::getTable()->getRb0(1) +
PeriodicTable::getTable()->getRb0(heavyAtom->getAtomicNum());
RDGeom::Point3D dirVect(0, 0, 0);
RDGeom::Point3D perpVect, rotnAxis, nbrPerp;
RDGeom::Point3D nbr1Vect, nbr2Vect, nbr3Vect;
RDGeom::Transform3D tform;
RDGeom::Point3D heavyPos, hydPos;
const Atom *nbr1 = nullptr, *nbr2 = nullptr, *nbr3 = nullptr;
const Bond *nbrBond;
ROMol::ADJ_ITER nbrIdx, endNbrs;
switch (heavyAtom->getDegree()) {
case 1:
// --------------------------------------------------------------------------
// No other atoms present:
// --------------------------------------------------------------------------
// loop over the conformations and set the coordinates
for (auto cfi = mol->beginConformers(); cfi != mol->endConformers();
cfi++) {
if ((*cfi)->is3D()) {
dirVect.z = 1;
} else {
dirVect.x = 1;
}
heavyPos = (*cfi)->getAtomPos(heavyIdx);
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
}
break;
case 2:
// --------------------------------------------------------------------------
// One other neighbor:
// --------------------------------------------------------------------------
nbr1 = getAtomNeighborNot(mol, heavyAtom, hydAtom);
for (auto cfi = mol->beginConformers(); cfi != mol->endConformers();
++cfi) {
heavyPos = (*cfi)->getAtomPos(heavyIdx);
RDGeom::Point3D nbr1Pos = (*cfi)->getAtomPos(nbr1->getIdx());
// get a normalized vector pointing away from the neighbor:
nbr1Vect = nbr1Pos - heavyPos;
if (fabs(nbr1Vect.lengthSq()) < 1e-4) {
// no difference, which likely indicates that we have redundant atoms.
// just put it on top of the heavy atom. This was #678
(*cfi)->setAtomPos(hydIdx, heavyPos);
continue;
}
nbr1Vect.normalize();
nbr1Vect *= -1;
// ok, nbr1Vect points away from the other atom, figure out where
// this H goes:
switch (heavyAtom->getHybridization()) {
case Atom::SP3:
// get a perpendicular to nbr1Vect:
if ((*cfi)->is3D())
perpVect = nbr1Vect.getPerpendicular();
else
perpVect.z = 1.0;
// and move off it:
tform.SetRotation((180 - 109.471) * M_PI / 180., perpVect);
dirVect = tform * nbr1Vect;
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
break;
case Atom::SP2:
// default position is to just take an arbitrary perpendicular:
perpVect = nbr1Vect.getPerpendicular();
if (nbr1->getDegree() > 1) {
// can we use the neighboring atom to establish a perpendicular?
nbrBond = mol->getBondBetweenAtoms(heavyIdx, nbr1->getIdx());
if (nbrBond->getIsAromatic() ||
nbrBond->getBondType() == Bond::DOUBLE) {
nbr2 = getAtomNeighborNot(mol, nbr1, heavyAtom);
nbr2Vect =
nbr1Pos.directionVector((*cfi)->getAtomPos(nbr2->getIdx()));
perpVect = nbr2Vect.crossProduct(nbr1Vect);
}
}
perpVect.normalize();
// rotate the nbr1Vect 60 degrees about perpVect and we're done:
tform.SetRotation(60. * M_PI / 180., perpVect);
dirVect = tform * nbr1Vect;
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
break;
case Atom::SP:
// just lay the H along the vector:
dirVect = nbr1Vect;
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
break;
default:
// FIX: handle other hybridizations
// for now, just lay the H along the vector:
dirVect = nbr1Vect;
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
}
}
break;
case 3:
// --------------------------------------------------------------------------
// Two other neighbors:
// --------------------------------------------------------------------------
boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(heavyAtom);
while (nbrIdx != endNbrs) {
if (*nbrIdx != hydIdx) {
if (!nbr1)
nbr1 = mol->getAtomWithIdx(*nbrIdx);
else
nbr2 = mol->getAtomWithIdx(*nbrIdx);
}
++nbrIdx;
}
TEST_ASSERT(nbr1);
TEST_ASSERT(nbr2);
for (auto cfi = mol->beginConformers(); cfi != mol->endConformers();
++cfi) {
// start along the average of the two vectors:
heavyPos = (*cfi)->getAtomPos(heavyIdx);
nbr1Vect = heavyPos - (*cfi)->getAtomPos(nbr1->getIdx());
nbr2Vect = heavyPos - (*cfi)->getAtomPos(nbr2->getIdx());
if (fabs(nbr1Vect.lengthSq()) < 1e-4 ||
fabs(nbr2Vect.lengthSq()) < 1e-4) {
// no difference, which likely indicates that we have redundant atoms.
// just put it on top of the heavy atom. This was #678
(*cfi)->setAtomPos(hydIdx, heavyPos);
continue;
}
nbr1Vect.normalize();
nbr2Vect.normalize();
dirVect = nbr1Vect + nbr2Vect;
dirVect.normalize();
if ((*cfi)->is3D()) {
switch (heavyAtom->getHybridization()) {
case Atom::SP3:
// get the perpendicular to the neighbors:
nbrPerp = nbr1Vect.crossProduct(nbr2Vect);
// and the perpendicular to that:
rotnAxis = nbrPerp.crossProduct(dirVect);
// and then rotate about that:
rotnAxis.normalize();
tform.SetRotation((109.471 / 2) * M_PI / 180., rotnAxis);
dirVect = tform * dirVect;
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
break;
case Atom::SP2:
// don't need to do anything here, the H atom goes right on the
// direction vector
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
break;
default:
// FIX: handle other hybridizations
// for now, just lay the H along the neighbor vector;
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
break;
}
} else {
// don't need to do anything here, the H atom goes right on the
// direction vector
hydPos = heavyPos + dirVect;
(*cfi)->setAtomPos(hydIdx, hydPos);
}
}
break;
case 4:
// --------------------------------------------------------------------------
// Three other neighbors:
// --------------------------------------------------------------------------
boost::tie(nbrIdx, endNbrs) = mol->getAtomNeighbors(heavyAtom);
if (heavyAtom->hasProp(common_properties::_CIPCode)) {
// if the central atom is chiral, we'll order the neighbors
// by CIP rank:
std::vector<std::pair<unsigned int, int>> nbrs;
while (nbrIdx != endNbrs) {
if (*nbrIdx != hydIdx) {
const Atom *tAtom = mol->getAtomWithIdx(*nbrIdx);
unsigned int cip = 0;
tAtom->getPropIfPresent<unsigned int>(common_properties::_CIPRank,
cip);
nbrs.push_back(std::make_pair(cip, rdcast<int>(*nbrIdx)));
}
++nbrIdx;
}
std::sort(nbrs.begin(), nbrs.end());
nbr1 = mol->getAtomWithIdx(nbrs[0].second);
nbr2 = mol->getAtomWithIdx(nbrs[1].second);
nbr3 = mol->getAtomWithIdx(nbrs[2].second);
} else {
// central atom isn't chiral, so the neighbor ordering isn't important:
while (nbrIdx != endNbrs) {
if (*nbrIdx != hydIdx) {
if (!nbr1) {
nbr1 = mol->getAtomWithIdx(*nbrIdx);
} else if (!nbr2) {
nbr2 = mol->getAtomWithIdx(*nbrIdx);
} else {
nbr3 = mol->getAtomWithIdx(*nbrIdx);
}
}
++nbrIdx;
}
}
TEST_ASSERT(nbr1);
TEST_ASSERT(nbr2);
TEST_ASSERT(nbr3);
for (auto cfi = mol->beginConformers(); cfi != mol->endConformers();
++cfi) {
// use the average of the three vectors:
heavyPos = (*cfi)->getAtomPos(heavyIdx);
nbr1Vect = heavyPos - (*cfi)->getAtomPos(nbr1->getIdx());
nbr2Vect = heavyPos - (*cfi)->getAtomPos(nbr2->getIdx());
nbr3Vect = heavyPos - (*cfi)->getAtomPos(nbr3->getIdx());
if (fabs(nbr1Vect.lengthSq()) < 1e-4 ||
fabs(nbr2Vect.lengthSq()) < 1e-4 ||
fabs(nbr3Vect.lengthSq()) < 1e-4) {
// no difference, which likely indicates that we have redundant atoms.
// just put it on top of the heavy atom. This was #678
(*cfi)->setAtomPos(hydIdx, heavyPos);
continue;
}
nbr1Vect.normalize();
nbr2Vect.normalize();
nbr3Vect.normalize();
// if three neighboring atoms are more or less planar, this
// is going to be in a quasi-random (but almost definitely bad)
// direction...
// correct for this (issue 2951221):
if ((*cfi)->is3D()) {
if (fabs(nbr3Vect.dotProduct(nbr1Vect.crossProduct(nbr2Vect))) <
0.1) {
// compute the normal:
dirVect = nbr1Vect.crossProduct(nbr2Vect);
std::string cipCode;
if (heavyAtom->getPropIfPresent(common_properties::_CIPCode,
cipCode)) {
// the heavy atom is a chiral center, make sure
// that we went go the right direction to preserve
// its chirality. We use the chiral volume for this:
RDGeom::Point3D v1 = dirVect - nbr3Vect;
RDGeom::Point3D v2 = nbr1Vect - nbr3Vect;
RDGeom::Point3D v3 = nbr2Vect - nbr3Vect;
double vol = v1.dotProduct(v2.crossProduct(v3));
// FIX: this is almost certainly wrong and should use the chiral
// tag
if ((cipCode == "S" && vol < 0) || (cipCode == "R" && vol > 0)) {
dirVect *= -1;
}
}
} else {
dirVect = nbr1Vect + nbr2Vect + nbr3Vect;
}
} else {
// we're in flatland
// this was github #908
// We're in a 2D conformation, put the H between the two neighbors
// that have the widest angle between them:
double minDot = nbr1Vect.dotProduct(nbr2Vect);
dirVect = nbr1Vect + nbr2Vect;
if (nbr2Vect.dotProduct(nbr3Vect) < minDot) {
minDot = nbr2Vect.dotProduct(nbr3Vect);
dirVect = nbr2Vect + nbr3Vect;
}
if (nbr1Vect.dotProduct(nbr3Vect) < minDot) {
minDot = nbr1Vect.dotProduct(nbr3Vect);
dirVect = nbr1Vect + nbr3Vect;
}
dirVect *= -1;
}
dirVect.normalize();
hydPos = heavyPos + dirVect * ((*cfi)->is3D() ? bondLength : 1.0);
(*cfi)->setAtomPos(hydIdx, hydPos);
}
break;
default:
// --------------------------------------------------------------------------
// FIX: figure out what to do here
// --------------------------------------------------------------------------
hydPos = heavyPos + dirVect * bondLength;
for (auto cfi = mol->beginConformers(); cfi != mol->endConformers();
++cfi) {
(*cfi)->setAtomPos(hydIdx, hydPos);
}
break;
}
}
void AssignHsResidueInfo(RWMol &mol) {
int max_serial = 0;
unsigned int stopIdx = mol.getNumAtoms();
for (unsigned int aidx = 0; aidx < stopIdx; ++aidx) {
AtomPDBResidueInfo *info =
(AtomPDBResidueInfo *)(mol.getAtomWithIdx(aidx)->getMonomerInfo());
if (info && info->getMonomerType() == AtomMonomerInfo::PDBRESIDUE &&
info->getSerialNumber() > max_serial) {
max_serial = info->getSerialNumber();
}
}
AtomPDBResidueInfo *current_info = 0;
int current_h_id = 0;
for (unsigned int aidx = 0; aidx < stopIdx; ++aidx) {
Atom *newAt = mol.getAtomWithIdx(aidx);
AtomPDBResidueInfo *info = (AtomPDBResidueInfo *)(newAt->getMonomerInfo());
if (info && info->getMonomerType() == AtomMonomerInfo::PDBRESIDUE) {
ROMol::ADJ_ITER begin, end;
boost::tie(begin, end) = mol.getAtomNeighbors(newAt);
while (begin != end) {
if (mol.getAtomWithIdx(*begin)->getAtomicNum() == 1) {
// Make all Hs unique - increment id even for existing
++current_h_id;
// skip if hyrogen already has PDB info
AtomPDBResidueInfo *h_info =
(AtomPDBResidueInfo *)mol.getAtomWithIdx(*begin)
->getMonomerInfo();
if (h_info && h_info->getMonomerType() == AtomMonomerInfo::PDBRESIDUE)
continue;
// the hydrogens have unique names on residue basis (H1, H2, ...)
if (!current_info ||
current_info->getResidueNumber() != info->getResidueNumber() ||
current_info->getChainId() != info->getChainId()) {
current_h_id = 1;
current_info = info;
}
std::string h_label = boost::lexical_cast<std::string>(current_h_id);
if (h_label.length() > 3)
h_label = h_label.substr(h_label.length() - 3, 3);
while (h_label.length() < 3) h_label = h_label + " ";
h_label = "H" + h_label;
// wrap around id to '3H12'
h_label = h_label.substr(3, 1) + h_label.substr(0, 3);
AtomPDBResidueInfo *newInfo = new AtomPDBResidueInfo(
h_label, max_serial, "", info->getResidueName(),
info->getResidueNumber(), info->getChainId(), "",
info->getIsHeteroAtom());
mol.getAtomWithIdx(*begin)->setMonomerInfo(newInfo);
++max_serial;
}
++begin;
}
}
}
}
} // end of unnamed namespace
namespace MolOps {
void addHs(RWMol &mol, bool explicitOnly, bool addCoords,
const UINT_VECT *onlyOnAtoms, bool addResidueInfo) {
// when we hit each atom, clear its computed properties
// NOTE: it is essential that we not clear the ring info in the
// molecule's computed properties. We don't want to have to
// regenerate that. This caused Issue210 and Issue212:
mol.clearComputedProps(false);
// precompute the number of hydrogens we are going to add so that we can
// pre-allocate the necessary space on the conformations of the molecule
// for their coordinates
unsigned int numAddHyds = 0;
for (auto at : mol.atoms()) {
if (!onlyOnAtoms || std::find(onlyOnAtoms->begin(), onlyOnAtoms->end(),
at->getIdx()) != onlyOnAtoms->end()) {
numAddHyds += at->getNumExplicitHs();
if (!explicitOnly) {
numAddHyds += at->getNumImplicitHs();
}
}
}
unsigned int nSize = mol.getNumAtoms() + numAddHyds;
// loop over the conformations of the molecule and allocate new space
// for the H locations (need to do this even if we aren't adding coords so
// that the conformers have the correct number of atoms).
for (auto cfi = mol.beginConformers(); cfi != mol.endConformers(); ++cfi) {
(*cfi)->reserve(nSize);
}
unsigned int stopIdx = mol.getNumAtoms();
for (unsigned int aidx = 0; aidx < stopIdx; ++aidx) {
if (onlyOnAtoms && std::find(onlyOnAtoms->begin(), onlyOnAtoms->end(),
aidx) == onlyOnAtoms->end()) {
continue;
}
Atom *newAt = mol.getAtomWithIdx(aidx);
unsigned int newIdx;
newAt->clearComputedProps();
// always convert explicit Hs
unsigned int onumexpl = newAt->getNumExplicitHs();
for (unsigned int i = 0; i < onumexpl; i++) {
newIdx = mol.addAtom(new Atom(1), false, true);
mol.addBond(aidx, newIdx, Bond::SINGLE);
mol.getAtomWithIdx(newIdx)->updatePropertyCache();
if (addCoords) setHydrogenCoords(&mol, newIdx, aidx);
}
// clear the local property
newAt->setNumExplicitHs(0);
if (!explicitOnly) {
// take care of implicits
for (unsigned int i = 0; i < mol.getAtomWithIdx(aidx)->getNumImplicitHs();
i++) {
newIdx = mol.addAtom(new Atom(1), false, true);
mol.addBond(aidx, newIdx, Bond::SINGLE);
// set the isImplicit label so that we can strip these back
// off later if need be.
mol.getAtomWithIdx(newIdx)->setProp(common_properties::isImplicit, 1);
mol.getAtomWithIdx(newIdx)->updatePropertyCache();
if (addCoords) setHydrogenCoords(&mol, newIdx, aidx);
}
// be very clear about implicits not being allowed in this representation
newAt->setProp(common_properties::origNoImplicit, newAt->getNoImplicit(),
true);
newAt->setNoImplicit(true);
}
// update the atom's derived properties (valence count, etc.)
newAt->updatePropertyCache();
}
// take care of AtomPDBResidueInfo for Hs if root atom has it
if (addResidueInfo) AssignHsResidueInfo(mol);
}
ROMol *addHs(const ROMol &mol, bool explicitOnly, bool addCoords,
const UINT_VECT *onlyOnAtoms, bool addResidueInfo) {
auto *res = new RWMol(mol);
addHs(*res, explicitOnly, addCoords, onlyOnAtoms, addResidueInfo);
return static_cast<ROMol *>(res);
};
namespace {
// returns whether or not an adjustment was made, in case we want that info
bool adjustStereoAtomsIfRequired(RWMol &mol, const Atom *atom,
const Atom *heavyAtom) {
PRECONDITION(atom != nullptr, "bad atom");
PRECONDITION(heavyAtom != nullptr, "bad heavy atom");
// nothing we can do if the degree is only 2 (and we should have covered
// that earlier anyway)
if (heavyAtom->getDegree() == 2) return false;
const auto &cbnd =
mol.getBondBetweenAtoms(atom->getIdx(), heavyAtom->getIdx());
if (!cbnd) return false;
for (const auto &nbri :
boost::make_iterator_range(mol.getAtomBonds(heavyAtom))) {
Bond *bnd = mol[nbri];
if (bnd->getBondType() == Bond::DOUBLE &&
bnd->getStereo() > Bond::STEREOANY) {
auto sAtomIt = std::find(bnd->getStereoAtoms().begin(),
bnd->getStereoAtoms().end(), atom->getIdx());
if (sAtomIt != bnd->getStereoAtoms().end()) {
// sAtomIt points to the position of this atom's index in the list.
// find the index of another atom attached to the heavy atom and
// use it to update sAtomIt
unsigned int dblNbrIdx = bnd->getOtherAtomIdx(heavyAtom->getIdx());
for (const auto &nbri :
boost::make_iterator_range(mol.getAtomNeighbors(heavyAtom))) {
const auto &nbr = mol[nbri];
if (nbr->getIdx() == dblNbrIdx || nbr->getIdx() == atom->getIdx())
continue;
*sAtomIt = nbr->getIdx();
bool madeAdjustment = true;
switch (bnd->getStereo()) {
case Bond::STEREOCIS:
bnd->setStereo(Bond::STEREOTRANS);
break;
case Bond::STEREOTRANS:
bnd->setStereo(Bond::STEREOCIS);
break;
default:
// I think we shouldn't need to do anything with E and Z...
madeAdjustment = false;
break;
}
return madeAdjustment;
}
}
}
}
return false;
}
} // end of anonymous namespace
//
// This routine removes hydrogens (and bonds to them) from the molecular graph.
// Other Atom and bond indices may be affected by the removal.
//
// NOTES:
// - Hydrogens which aren't connected to a heavy atom will not be
// removed. This prevents molecules like "[H][H]" from having
// all atoms removed.
// - Labelled hydrogen (e.g. atoms with atomic number=1, but isotope > 1),
// will not be removed.
// - two coordinate Hs, like the central H in C[H-]C, will not be removed
// - Hs connected to dummy atoms will not be removed
// - Hs that are part of the definition of double bond Stereochemistry
// will not be removed
// - Hs that are not connected to anything else will not be removed
//
void removeHs(RWMol &mol, bool implicitOnly, bool updateExplicitCount,
bool sanitize) {
unsigned int currIdx = 0, origIdx = 0;
std::map<unsigned int, unsigned int> idxMap;
for (ROMol::AtomIterator atIt = mol.beginAtoms(); atIt != mol.endAtoms();
++atIt) {
if ((*atIt)->getAtomicNum() == 1) continue;
(*atIt)->updatePropertyCache(false);
}
while (currIdx < mol.getNumAtoms()) {
Atom *atom = mol.getAtomWithIdx(currIdx);
idxMap[origIdx] = currIdx;
++origIdx;
if (atom->getAtomicNum() == 1) {
bool removeIt = false;
if (!atom->getDegree()) {
BOOST_LOG(rdWarningLog)
<< "WARNING: not removing hydrogen atom without neighbors"
<< std::endl;
} else if (!atom->hasQuery()) {
if (atom->hasProp(common_properties::isImplicit)) {
removeIt = true;
if (atom->getDegree() == 1) {
// by default we remove implicit Hs, but not if they are
// attached to dummy atoms. This was Github #1439
ROMol::ADJ_ITER begin, end;
boost::tie(begin, end) = mol.getAtomNeighbors(atom);
if (mol.getAtomWithIdx(*begin)->getAtomicNum() < 1) {
removeIt = false;
BOOST_LOG(rdWarningLog) << "WARNING: not removing hydrogen atom "
"with only dummy atom neighbors"
<< std::endl;
}
}
} else if (!implicitOnly && !atom->getIsotope() &&
atom->getDegree() == 1) {
ROMol::ADJ_ITER begin, end;
boost::tie(begin, end) = mol.getAtomNeighbors(atom);
auto nbr = mol.getAtomWithIdx(*begin);
if (nbr->getAtomicNum() > 1) {
removeIt = true;
// we're connected to a non-dummy, non H atom. Check to see
// if the neighbor has a double bond and we're the only neighbor
// at this end. This was part of github #1810
if (nbr->getDegree() == 2) {
for (const auto &nbri :
boost::make_iterator_range(mol.getAtomBonds(nbr))) {
const Bond *bnd = mol[nbri];
if (bnd->getBondType() == Bond::DOUBLE &&
(bnd->getStereo() > Bond::STEREOANY ||
mol.getBondBetweenAtoms(atom->getIdx(), nbr->getIdx())
->getBondDir() > Bond::NONE)) {
removeIt = false;
break;
}
}
}
}
}
}
if (removeIt) {
ROMol::OEDGE_ITER beg, end;
boost::tie(beg, end) = mol.getAtomBonds(atom);
// part of the fix for github #2086:
CHECK_INVARIANT(beg != end, "H has no neighbors!");
// note the assumption that the H only has one neighbor... I
// feel no need to handle the case of hypervalent hydrogen!
// :-)
const Bond *bond = mol[*beg];
Atom *heavyAtom = bond->getOtherAtom(atom);
int heavyAtomNum = heavyAtom->getAtomicNum();
const INT_VECT &defaultVs =
PeriodicTable::getTable()->getValenceList(heavyAtomNum);
// we'll update the atom's explicit H count if we were told to
// *or* if the atom is chiral, in which case the H is needed
// in order to complete the coordination
// *or* if the atom has the noImplicit flag set:
if (updateExplicitCount || heavyAtom->getNoImplicit() ||
heavyAtom->getChiralTag() != Atom::CHI_UNSPECIFIED) {
heavyAtom->setNumExplicitHs(heavyAtom->getNumExplicitHs() + 1);
} else {
// this is a special case related to Issue 228 and the
// "disappearing Hydrogen" problem discussed in MolOps::adjustHs
//
// If we remove a hydrogen from an aromatic N or P, or if
// the heavy atom it is connected to is not in its default
// valence state, we need to be *sure* to increment the
// explicit count, even if the H itself isn't marked as explicit
if (((heavyAtomNum == 7 || heavyAtomNum == 15) &&
heavyAtom->getIsAromatic()) ||
(std::find(defaultVs.begin() + 1, defaultVs.end(),
heavyAtom->getTotalValence()) != defaultVs.end())) {
heavyAtom->setNumExplicitHs(heavyAtom->getNumExplicitHs() + 1);
}
}
// One other consequence of removing the H from the graph is
// that we may change the ordering of the bonds about a
// chiral center. This may change the chiral label at that
// atom. We deal with that by explicitly checking here:
if (heavyAtom->getChiralTag() != Atom::CHI_UNSPECIFIED) {
INT_LIST neighborIndices;
boost::tie(beg, end) = mol.getAtomBonds(heavyAtom);
while (beg != end) {
if (mol[*beg]->getIdx() != bond->getIdx()) {
neighborIndices.push_back(mol[*beg]->getIdx());
}
++beg;
}
neighborIndices.push_back(bond->getIdx());
int nSwaps = heavyAtom->getPerturbationOrder(neighborIndices);
// std::cerr << "H: "<<atom->getIdx()<<" hvy:
// "<<heavyAtom->getIdx()<<" swaps: " << nSwaps<<std::endl;
if (nSwaps % 2) {
heavyAtom->invertChirality();
}
}
// if it's a wavy bond, then we need to
// mark the beginning atom with the _UnknownStereo tag.
// so that we know later that something was affecting its
// stereochem
if (bond->getBondDir() == Bond::UNKNOWN &&
bond->getBeginAtomIdx() == heavyAtom->getIdx()) {
heavyAtom->setProp(common_properties::_UnknownStereo, 1);
} else if (bond->getBondDir() == Bond::ENDDOWNRIGHT ||
bond->getBondDir() == Bond::ENDUPRIGHT) {
// if the direction is set on this bond and the atom it's connected to
// has no other single bonds with directions set, then we need to set
// direction on one of the other neighbors in order to avoid double
// bond stereochemistry possibly being lost. This was github #754
bool foundADir = false;
Bond *oBond = nullptr;
boost::tie(beg, end) = mol.getAtomBonds(heavyAtom);
while (beg != end) {
if (mol[*beg]->getIdx() != bond->getIdx() &&
mol[*beg]->getBondType() == Bond::SINGLE) {
if (mol[*beg]->getBondDir() == Bond::NONE) {
oBond = mol[*beg];
} else {
foundADir = true;
}
}
++beg;
}
if (!foundADir && oBond != nullptr) {
bool flipIt = (oBond->getBeginAtom() == heavyAtom) &&
(bond->getBeginAtom() == heavyAtom);
if (flipIt) {
oBond->setBondDir(bond->getBondDir() == Bond::ENDDOWNRIGHT
? Bond::ENDUPRIGHT
: Bond::ENDDOWNRIGHT);
} else {
oBond->setBondDir(bond->getBondDir());
}
}
} else {
// if this atom is one of the stereoatoms for a double bond we need
// to switch the stereo atom on this end to be the other neighbor
// This was part of github #1810
adjustStereoAtomsIfRequired(mol, atom, heavyAtom);
}
mol.removeAtom(atom);
} else {
// only increment the atom idx if we don't remove the atom
currIdx++;
}
} else {
// only increment the atom idx if we don't remove the atom
currIdx++;
bool origNoImplicit;
if (atom->getPropIfPresent(common_properties::origNoImplicit,
origNoImplicit)) {
// we'll get in here if we haven't already processed the atom's implicit
// hydrogens. (this is protection for the case that removeHs() is
// called
// multiple times on a single molecule without intervening addHs()
// calls)
atom->setNoImplicit(origNoImplicit);
atom->clearProp(common_properties::origNoImplicit);
}
}
}
//
// If we didn't only remove implicit Hs, which are guaranteed to
// be the highest numbered atoms, we may have altered atom indices.
// This can screw up derived properties (such as ring members), so
// do some checks:
//
if (!implicitOnly) {
if (sanitize) {
sanitizeMol(mol);
}
}
};
ROMol *removeHs(const ROMol &mol, bool implicitOnly, bool updateExplicitCount,
bool sanitize) {
auto *res = new RWMol(mol);
try {
removeHs(*res, implicitOnly, updateExplicitCount, sanitize);
} catch (MolSanitizeException &se) {
delete res;
throw se;
}
return static_cast<ROMol *>(res);
}
namespace {
bool isQueryH(const Atom *atom) {
PRECONDITION(atom, "bogus atom");
if (atom->getAtomicNum() == 1) {
// the simple case: the atom is flagged as being an H and
// has no query
if (!atom->hasQuery() ||
(!atom->getQuery()->getNegation() &&
atom->getQuery()->getDescription() == "AtomAtomicNum")) {
return true;
}
}
if (atom->getDegree() != 1) {
// only degree 1
return false;
}
if (atom->hasQuery() && atom->getQuery()->getNegation()) {
// we will not merge negated queries
return false;
}
bool hasHQuery = false, hasOr = false;
if (atom->hasQuery()) {
if (atom->getQuery()->getDescription() == "AtomOr") {
hasOr = true;
}
std::list<QueryAtom::QUERYATOM_QUERY::CHILD_TYPE> childStack(
atom->getQuery()->beginChildren(), atom->getQuery()->endChildren());
// the logic gets too complicated if there's an OR in the children, so just
// punt on those (with a warning)
while (!(hasHQuery && hasOr) && childStack.size()) {
QueryAtom::QUERYATOM_QUERY::CHILD_TYPE query = childStack.front();
childStack.pop_front();
if (query->getDescription() == "AtomOr") {
hasOr = true;
} else if (query->getDescription() == "AtomAtomicNum") {
if (static_cast<ATOM_EQUALS_QUERY *>(query.get())->getVal() == 1 &&
!query->getNegation()) {
hasHQuery = true;
}
} else {
QueryAtom::QUERYATOM_QUERY::CHILD_VECT_CI child1;
for (child1 = query->beginChildren(); child1 != query->endChildren();
++child1) {
childStack.push_back(*child1);
}
}
}
// std::cerr<<" !!!1 "<<atom->getIdx()<<" "<<hasHQuery<<"
// "<<hasOr<<std::endl;
if (hasHQuery && hasOr) {
BOOST_LOG(rdWarningLog) << "WARNING: merging explicit H queries involved "
"in ORs is not supported. This query will not "
"be merged"
<< std::endl;
return false;
}
}
return hasHQuery;
}
} // namespace
//
// This routine removes explicit hydrogens (and bonds to them) from
// the molecular graph and adds them as queries to the heavy atoms
// to which they are bound. If the heavy atoms (or atom queries)
// already have hydrogen-count queries, they will be updated.
//
// NOTE:
// - Hydrogens which aren't connected to a heavy atom will not be
// removed. This prevents molecules like "[H][H]" from having
// all atoms removed.
//
// - By default all hydrogens are removed, however if
// merge_unmapped_only is true, any hydrogen participating
// in an atom map will be retained
void mergeQueryHs(RWMol &mol, bool mergeUnmappedOnly) {
std::vector<unsigned int> atomsToRemove;
boost::dynamic_bitset<> hatoms(mol.getNumAtoms());
for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) {
hatoms[i] = isQueryH(mol.getAtomWithIdx(i));
}
unsigned int currIdx = 0, stopIdx = mol.getNumAtoms();
while (currIdx < stopIdx) {
Atom *atom = mol.getAtomWithIdx(currIdx);
if (!hatoms[currIdx]) {
unsigned int numHsToRemove = 0;
ROMol::ADJ_ITER begin, end;
boost::tie(begin, end) = mol.getAtomNeighbors(atom);
while (begin != end) {
if (hatoms[*begin]) {
Atom &bgn = *mol.getAtomWithIdx(*begin);
if (!mergeUnmappedOnly ||
!bgn.hasProp(common_properties::molAtomMapNumber)) {
atomsToRemove.push_back(rdcast<unsigned int>(*begin));
++numHsToRemove;
}
}
++begin;
}
if (numHsToRemove) {
//
// We have H neighbors:
// If we have no H query already:
// - add a generic H query
// else:
// - do nothing
//
// Examples:
// C[H] -> [C;!H0]
// [C;H1][H] -> [C;H1]
// [C;H2][H] -> [C;H2]
//
// FIX: this is going to behave oddly in the case of a contradictory
// SMARTS like: [C;H0][H], where it will give the equivalent of:
// [C;H0] I think this is actually correct, but I can be persuaded
// otherwise.
//
// First we'll search for an H query:
bool hasHQuery = false;
if (!atom->hasQuery()) {
// it wasn't a query atom, we need to replace it so that we can add a
// query:
ATOM_EQUALS_QUERY *tmp = makeAtomNumQuery(atom->getAtomicNum());
auto *newAt = new QueryAtom;
newAt->setQuery(tmp);
newAt->updateProps(*atom);
mol.replaceAtom(atom->getIdx(), newAt);
delete newAt;
atom = mol.getAtomWithIdx(currIdx);
}
if (!hasHQuery) {
for (unsigned int i = 0; i < numHsToRemove; ++i) {
ATOM_EQUALS_QUERY *tmp = makeAtomHCountQuery(i);
tmp->setNegation(true);
atom->expandQuery(tmp);
}
}
} // end of numHsToRemove test
// recurse if needed (was github isusue 544)
if (atom->hasQuery()) {
// std::cerr<<" q: "<<atom->getQuery()->getDescription()<<std::endl;
if (atom->getQuery()->getDescription() == "RecursiveStructure") {
RWMol *rqm = static_cast<RWMol *>(const_cast<ROMol *>(
static_cast<RecursiveStructureQuery *>(atom->getQuery())
->getQueryMol()));
mergeQueryHs(*rqm, mergeUnmappedOnly);
}
// FIX: shouldn't be repeating this code here
std::list<QueryAtom::QUERYATOM_QUERY::CHILD_TYPE> childStack(
atom->getQuery()->beginChildren(), atom->getQuery()->endChildren());
while (childStack.size()) {
QueryAtom::QUERYATOM_QUERY::CHILD_TYPE qry = childStack.front();
childStack.pop_front();
// std::cerr<<" child: "<<qry->getDescription()<<std::endl;
if (qry->getDescription() == "RecursiveStructure") {
// std::cerr<<" recurse"<<std::endl;
RWMol *rqm = static_cast<RWMol *>(const_cast<ROMol *>(
static_cast<RecursiveStructureQuery *>(qry.get())
->getQueryMol()));
mergeQueryHs(*rqm, mergeUnmappedOnly);
// std::cerr<<" back"<<std::endl;
} else if (qry->beginChildren() != qry->endChildren()) {
childStack.insert(childStack.end(), qry->beginChildren(),
qry->endChildren());
}
}
} // end of recursion loop
}
++currIdx;
}
std::sort(atomsToRemove.begin(), atomsToRemove.end());
for (std::vector<unsigned int>::const_reverse_iterator aiter =
atomsToRemove.rbegin();
aiter != atomsToRemove.rend(); ++aiter) {
Atom *atom = mol.getAtomWithIdx(*aiter);
mol.removeAtom(atom);
}
};
ROMol *mergeQueryHs(const ROMol &mol, bool mergeUnmappedOnly) {
auto *res = new RWMol(mol);
mergeQueryHs(*res, mergeUnmappedOnly);
return static_cast<ROMol *>(res);
};
}; // end of namespace MolOps
}; // end of namespace RDKit
| 1 | 19,423 | This was a bug. | rdkit-rdkit | cpp |
@@ -210,6 +210,19 @@ class ImageExtension extends Twig_Extension
$htmlAttributes = $attributes;
unset($htmlAttributes['type'], $htmlAttributes['size']);
+ $useLazyLoading = array_key_exists('lazy', $attributes) ? (bool)$attributes['lazy'] : true;
+ $isAttributeClassExistsAndNotEmpty = array_key_exists('class', $attributes) && $attributes['class'] !== '';
+ $htmlAttributes['class'] = sprintf(
+ '%s%s',
+ $useLazyLoading ? 'lazy' : '',
+ $isAttributeClassExistsAndNotEmpty ? ' ' . $attributes['class'] : ''
+ );
+
+ if ($useLazyLoading) {
+ $htmlAttributes['data-src'] = $htmlAttributes['src'];
+ $htmlAttributes['src'] = '';
+ }
+
return $this->templating->render('@ShopsysFramework/Common/image.html.twig', [
'attr' => $htmlAttributes,
'additionalImagesData' => $additionalImagesData, | 1 | <?php
declare(strict_types=1);
namespace Shopsys\FrameworkBundle\Twig;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Component\Image\ImageFacade;
use Shopsys\FrameworkBundle\Component\Image\ImageLocator;
use Shopsys\FrameworkBundle\Component\Utils\Utils;
use Symfony\Bundle\FrameworkBundle\Templating\EngineInterface;
use Twig_Extension;
use Twig_SimpleFunction;
class ImageExtension extends Twig_Extension
{
protected const NOIMAGE_FILENAME = 'noimage.png';
/**
* @var string
*/
protected $frontDesignImageUrlPrefix;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain
*/
protected $domain;
/**
* @var \Shopsys\FrameworkBundle\Component\Image\ImageLocator
*/
protected $imageLocator;
/**
* @var \Shopsys\FrameworkBundle\Component\Image\ImageFacade
*/
protected $imageFacade;
/**
* @var \Symfony\Component\Templating\EngineInterface
*/
protected $templating;
/**
* @param string $frontDesignImageUrlPrefix
* @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain
* @param \Shopsys\FrameworkBundle\Component\Image\ImageLocator $imageLocator
* @param \Shopsys\FrameworkBundle\Component\Image\ImageFacade $imageFacade
* @param \Symfony\Bundle\FrameworkBundle\Templating\EngineInterface $templating
*/
public function __construct(
$frontDesignImageUrlPrefix,
Domain $domain,
ImageLocator $imageLocator,
ImageFacade $imageFacade,
EngineInterface $templating
) {
$this->frontDesignImageUrlPrefix = rtrim($frontDesignImageUrlPrefix, '/');
$this->domain = $domain;
$this->imageLocator = $imageLocator;
$this->imageFacade = $imageFacade;
$this->templating = $templating;
}
/**
* @return array
*/
public function getFunctions()
{
return [
new Twig_SimpleFunction('imageExists', [$this, 'imageExists']),
new Twig_SimpleFunction('imageUrl', [$this, 'getImageUrl']),
new Twig_SimpleFunction('image', [$this, 'getImageHtml'], ['is_safe' => ['html']]),
new Twig_SimpleFunction('noimage', [$this, 'getNoimageHtml'], ['is_safe' => ['html']]),
new Twig_SimpleFunction('getImages', [$this, 'getImages']),
];
}
/**
* @param \Shopsys\FrameworkBundle\Component\Image\Image|object $imageOrEntity
* @param string|null $type
* @return bool
*/
public function imageExists($imageOrEntity, $type = null)
{
try {
$image = $this->imageFacade->getImageByObject($imageOrEntity, $type);
} catch (\Shopsys\FrameworkBundle\Component\Image\Exception\ImageNotFoundException $e) {
return false;
}
return $this->imageLocator->imageExists($image);
}
/**
* @param \Shopsys\FrameworkBundle\Component\Image\Image|Object $imageOrEntity
* @param string|null $sizeName
* @param string|null $type
* @return string
*/
public function getImageUrl($imageOrEntity, $sizeName = null, $type = null)
{
try {
return $this->imageFacade->getImageUrl($this->domain->getCurrentDomainConfig(), $imageOrEntity, $sizeName, $type);
} catch (\Shopsys\FrameworkBundle\Component\Image\Exception\ImageNotFoundException $e) {
return $this->getEmptyImageUrl();
}
}
/**
* @param Object $entity
* @param string|null $type
* @return \Shopsys\FrameworkBundle\Component\Image\Image[]
*/
public function getImages($entity, $type = null)
{
return $this->imageFacade->getImagesByEntityIndexedById($entity, $type);
}
/**
* @param \Shopsys\FrameworkBundle\Component\Image\Image|Object $imageOrEntity
* @param array $attributes
* @return string
*/
public function getImageHtml($imageOrEntity, array $attributes = [])
{
$this->preventDefault($attributes);
try {
$image = $this->imageFacade->getImageByObject($imageOrEntity, $attributes['type']);
$entityName = $image->getEntityName();
$attributes['src'] = $this->getImageUrl($image, $attributes['size'], $attributes['type']);
$additionalImagesData = $this->imageFacade->getAdditionalImagesData($this->domain->getCurrentDomainConfig(), $image, $attributes['size'], $attributes['type']);
return $this->getImageHtmlByEntityName($attributes, $entityName, $additionalImagesData);
} catch (\Shopsys\FrameworkBundle\Component\Image\Exception\ImageNotFoundException $e) {
return $this->getNoimageHtml($attributes);
}
}
/**
* @param array $attributes
* @return string
*/
public function getNoimageHtml(array $attributes = [])
{
$this->preventDefault($attributes);
$entityName = 'noimage';
$attributes['src'] = $this->getEmptyImageUrl();
$additionalImagesData = [];
return $this->getImageHtmlByEntityName($attributes, $entityName, $additionalImagesData);
}
/**
* @return string
*/
protected function getEmptyImageUrl(): string
{
return $this->domain->getUrl() . $this->frontDesignImageUrlPrefix . '/' . static::NOIMAGE_FILENAME;
}
/**
* @param string $entityName
* @param string|null $type
* @param string|null $sizeName
* @return string
*/
protected function getImageCssClass($entityName, $type, $sizeName)
{
$allClassParts = [
'image',
$entityName,
$type,
$sizeName,
];
$classParts = array_filter($allClassParts);
return implode('-', $classParts);
}
/**
* @return string
*/
public function getName()
{
return 'image_extension';
}
/**
* @param array $attributes
*/
protected function preventDefault(array &$attributes): void
{
Utils::setArrayDefaultValue($attributes, 'type');
Utils::setArrayDefaultValue($attributes, 'size');
Utils::setArrayDefaultValue($attributes, 'alt', '');
Utils::setArrayDefaultValue($attributes, 'title', $attributes['alt']);
}
/**
* @param array $attributes
* @param string $entityName
* @param \Shopsys\FrameworkBundle\Component\Image\AdditionalImageData[] $additionalImagesData
* @return string
*/
protected function getImageHtmlByEntityName(array $attributes, $entityName, $additionalImagesData = []): string
{
$htmlAttributes = $attributes;
unset($htmlAttributes['type'], $htmlAttributes['size']);
return $this->templating->render('@ShopsysFramework/Common/image.html.twig', [
'attr' => $htmlAttributes,
'additionalImagesData' => $additionalImagesData,
'imageCssClass' => $this->getImageCssClass($entityName, $attributes['type'], $attributes['size']),
]);
}
}
| 1 | 19,661 | Is OK that we don't have some placeholder? | shopsys-shopsys | php |
@@ -447,11 +447,11 @@ func (node *Node) setupStorageMining(ctx context.Context) error {
PoStProofType: postProofType,
SealProofType: sealProofType,
Miner: minerAddr,
- WorkerThreads: 1,
+ WorkerThreads: 2,
Paths: []fs.PathConfig{
{
Path: sectorDir,
- Cache: false,
+ Cache: true,
Weight: 1,
},
}, | 1 | package node
import (
"context"
"fmt"
"os"
"reflect"
"runtime"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-sectorbuilder"
"github.com/filecoin-project/go-sectorbuilder/fs"
"github.com/filecoin-project/specs-actors/actors/abi"
fbig "github.com/filecoin-project/specs-actors/actors/abi/big"
bserv "github.com/ipfs/go-blockservice"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/namespace"
logging "github.com/ipfs/go-log"
"github.com/libp2p/go-libp2p-core/host"
"github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/internal/submodule"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg"
"github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain"
"github.com/filecoin-project/go-filecoin/internal/pkg/block"
"github.com/filecoin-project/go-filecoin/internal/pkg/cborutil"
"github.com/filecoin-project/go-filecoin/internal/pkg/chain"
"github.com/filecoin-project/go-filecoin/internal/pkg/clock"
"github.com/filecoin-project/go-filecoin/internal/pkg/config"
"github.com/filecoin-project/go-filecoin/internal/pkg/consensus"
"github.com/filecoin-project/go-filecoin/internal/pkg/constants"
"github.com/filecoin-project/go-filecoin/internal/pkg/message"
"github.com/filecoin-project/go-filecoin/internal/pkg/metrics"
"github.com/filecoin-project/go-filecoin/internal/pkg/mining"
"github.com/filecoin-project/go-filecoin/internal/pkg/net/pubsub"
"github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager"
mining_protocol "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/mining"
"github.com/filecoin-project/go-filecoin/internal/pkg/repo"
"github.com/filecoin-project/go-filecoin/internal/pkg/state"
"github.com/filecoin-project/go-filecoin/internal/pkg/version"
)
var log = logging.Logger("node") // nolint: deadcode
var (
// ErrNoMinerAddress is returned when the node is not configured to have any miner addresses.
ErrNoMinerAddress = errors.New("no miner addresses configured")
)
// Node represents a full Filecoin node.
type Node struct {
// OfflineMode, when true, disables libp2p.
OfflineMode bool
// ChainClock is a chainClock used by the node for chain epoch.
ChainClock clock.ChainEpochClock
// Repo is the repo this node was created with.
//
// It contains all persistent artifacts of the filecoin node.
Repo repo.Repo
PorcelainAPI *porcelain.API
//
// Core services
//
Blockstore submodule.BlockstoreSubmodule
network submodule.NetworkSubmodule
Blockservice submodule.BlockServiceSubmodule
Discovery submodule.DiscoverySubmodule
//
// Subsystems
//
chain submodule.ChainSubmodule
syncer submodule.SyncerSubmodule
BlockMining submodule.BlockMiningSubmodule
StorageMining *submodule.StorageMiningSubmodule
//
// Supporting services
//
Wallet submodule.WalletSubmodule
Messaging submodule.MessagingSubmodule
StorageNetworking submodule.StorageNetworkingSubmodule
ProofVerification submodule.ProofVerificationSubmodule
//
// Protocols
//
VersionTable *version.ProtocolVersionTable
StorageProtocol *submodule.StorageProtocolSubmodule
RetrievalProtocol *submodule.RetrievalProtocolSubmodule
}
// Start boots up the node.
func (node *Node) Start(ctx context.Context) error {
if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Observability.Metrics); err != nil {
return errors.Wrap(err, "failed to setup metrics")
}
if err := metrics.RegisterJaeger(node.network.Host.ID().Pretty(), node.Repo.Config().Observability.Tracing); err != nil {
return errors.Wrap(err, "failed to setup tracing")
}
err := node.chain.Start(ctx, node)
if err != nil {
return err
}
// Only set these up if there is a miner configured.
if _, err := node.MiningAddress(); err == nil {
if err := node.setupStorageMining(ctx); err != nil {
log.Errorf("setup mining failed: %v", err)
return err
}
}
// TODO: defer establishing these API endpoints until the chain is synced when the commands
// can handle their absence: https://github.com/filecoin-project/go-filecoin/issues/3137
err = node.setupProtocols()
if err != nil {
return errors.Wrap(err, "failed to set up protocols:")
}
// DRAGONS: uncomment when we have retrieval market integration
//node.RetrievalProtocol.RetrievalProvider = retrieval.NewMiner()
var syncCtx context.Context
syncCtx, node.syncer.CancelChainSync = context.WithCancel(context.Background())
// Wire up propagation of new chain heads from the chain store to other components.
head, err := node.PorcelainAPI.ChainHead()
if err != nil {
return errors.Wrap(err, "failed to get chain head")
}
go node.handleNewChainHeads(syncCtx, head)
if !node.OfflineMode {
// Subscribe to block pubsub topic to learn about new chain heads.
node.syncer.BlockSub, err = node.pubsubscribe(syncCtx, node.syncer.BlockTopic, node.handleBlockSub)
if err != nil {
log.Error(err)
}
// Subscribe to the message pubsub topic to learn about messages to mine into blocks.
// TODO: defer this subscription until after mining (block production) is started:
// https://github.com/filecoin-project/go-filecoin/issues/2145.
// This is blocked by https://github.com/filecoin-project/go-filecoin/issues/2959, which
// is necessary for message_propagate_test to start mining before testing this behaviour.
node.Messaging.MessageSub, err = node.pubsubscribe(syncCtx, node.Messaging.MessageTopic, node.processMessage)
if err != nil {
return err
}
if err := node.setupHeartbeatServices(ctx); err != nil {
return errors.Wrap(err, "failed to start heartbeat services")
}
// Start node discovery
if err := node.Discovery.Start(node); err != nil {
return err
}
if err := node.syncer.Start(syncCtx, node); err != nil {
return err
}
// Wire up syncing and possible mining
go node.doMiningPause(syncCtx)
}
return nil
}
// Subscribes a handler function to a pubsub topic.
func (node *Node) pubsubscribe(ctx context.Context, topic *pubsub.Topic, handler pubSubHandler) (pubsub.Subscription, error) {
sub, err := topic.Subscribe()
if err != nil {
return nil, errors.Wrapf(err, "failed to subscribe")
}
go node.handleSubscription(ctx, sub, handler)
return sub, nil
}
func (node *Node) setupHeartbeatServices(ctx context.Context) error {
mag := func() address.Address {
addr, err := node.MiningAddress()
// the only error MiningAddress() returns is ErrNoMinerAddress.
// if there is no configured miner address, simply send a zero
// address across the wire.
if err != nil {
return address.Undef
}
return addr
}
// start the primary heartbeat service
if len(node.Repo.Config().Heartbeat.BeatTarget) > 0 {
hbs := metrics.NewHeartbeatService(node.Host(), node.chain.ChainReader.GenesisCid(), node.Repo.Config().Heartbeat, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag))
go hbs.Start(ctx)
}
// check if we want to connect to an alert service. An alerting service is a heartbeat
// service that can trigger alerts based on the contents of heatbeats.
if alertTarget := os.Getenv("FIL_HEARTBEAT_ALERTS"); len(alertTarget) > 0 {
ahbs := metrics.NewHeartbeatService(node.Host(), node.chain.ChainReader.GenesisCid(), &config.HeartbeatConfig{
BeatTarget: alertTarget,
BeatPeriod: "10s",
ReconnectPeriod: "10s",
Nickname: node.Repo.Config().Heartbeat.Nickname,
}, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag))
go ahbs.Start(ctx)
}
return nil
}
func (node *Node) setIsMining(isMining bool) {
node.BlockMining.Mining.Lock()
defer node.BlockMining.Mining.Unlock()
node.BlockMining.Mining.IsMining = isMining
}
func (node *Node) handleNewMiningOutput(ctx context.Context, miningOutCh <-chan mining.Output) {
defer func() {
node.BlockMining.MiningDoneWg.Done()
}()
for {
select {
case <-ctx.Done():
return
case output, ok := <-miningOutCh:
if !ok {
return
}
if output.Err != nil {
log.Errorf("stopping mining. error: %s", output.Err.Error())
node.StopMining(context.Background())
} else {
node.BlockMining.MiningDoneWg.Add(1)
go func() {
if node.IsMining() {
node.BlockMining.AddNewlyMinedBlock(ctx, output)
}
node.BlockMining.MiningDoneWg.Done()
}()
}
}
}
}
func (node *Node) handleNewChainHeads(ctx context.Context, prevHead block.TipSet) {
node.chain.HeaviestTipSetCh = node.chain.ChainReader.HeadEvents().Sub(chain.NewHeadTopic)
handler := message.NewHeadHandler(node.Messaging.Inbox, node.Messaging.Outbox, node.chain.ChainReader, prevHead)
for {
select {
case ts, ok := <-node.chain.HeaviestTipSetCh:
if !ok {
return
}
newHead, ok := ts.(block.TipSet)
if !ok {
log.Warn("non-tipset published on heaviest tipset channel")
continue
}
if node.StorageMining != nil {
if err := node.StorageMining.HandleNewHead(ctx, newHead); err != nil {
log.Error(err)
}
}
if err := handler.HandleNewHead(ctx, newHead); err != nil {
log.Error(err)
}
case <-ctx.Done():
return
}
}
}
func (node *Node) cancelSubscriptions() {
if node.syncer.CancelChainSync != nil {
node.syncer.CancelChainSync()
}
if node.syncer.BlockSub != nil {
node.syncer.BlockSub.Cancel()
node.syncer.BlockSub = nil
}
if node.Messaging.MessageSub != nil {
node.Messaging.MessageSub.Cancel()
node.Messaging.MessageSub = nil
}
}
// Stop initiates the shutdown of the node.
func (node *Node) Stop(ctx context.Context) {
node.chain.ChainReader.HeadEvents().Unsub(node.chain.HeaviestTipSetCh)
node.StopMining(ctx)
node.cancelSubscriptions()
node.chain.ChainReader.Stop()
if node.StorageMining != nil {
if err := node.StorageMining.Stop(ctx); err != nil {
fmt.Printf("error stopping storage miner: %s\n", err)
}
node.StorageMining = nil
}
if err := node.Host().Close(); err != nil {
fmt.Printf("error closing host: %s\n", err)
}
if err := node.Repo.Close(); err != nil {
fmt.Printf("error closing repo: %s\n", err)
}
node.Discovery.Stop()
fmt.Println("stopping filecoin :(")
}
func (node *Node) addNewlyMinedBlock(ctx context.Context, o mining.Output) {
log.Debugf("Got a newly mined block from the mining worker: %s", o.Header)
if err := node.AddNewBlock(ctx, o); err != nil {
log.Warnf("error adding new mined block: %s. err: %s", o.Header.Cid().String(), err.Error())
}
}
func (node *Node) addMinedBlockSynchronous(ctx context.Context, o mining.Output) error {
wait := node.syncer.ChainSyncManager.BlockProposer().WaiterForTarget(block.NewTipSetKey(o.Header.Cid()))
err := node.AddNewBlock(ctx, o)
if err != nil {
return err
}
err = wait()
return err
}
// MiningAddress returns the address of the mining actor mining on behalf of
// the node.
func (node *Node) MiningAddress() (address.Address, error) {
addr := node.Repo.Config().Mining.MinerAddress
if addr.Empty() {
return address.Undef, ErrNoMinerAddress
}
return addr, nil
}
// SetupMining initializes all the functionality the node needs to start mining.
// This method is idempotent.
func (node *Node) SetupMining(ctx context.Context) error {
// ensure we have a miner actor before we even consider mining
minerAddr, err := node.MiningAddress()
if err != nil {
return errors.Wrap(err, "failed to get mining address")
}
head := node.PorcelainAPI.ChainHeadKey()
_, err = node.PorcelainAPI.MinerGetStatus(ctx, minerAddr, head)
if err != nil {
return errors.Wrap(err, "failed to get miner actor")
}
// ensure we've got our storage mining submodule configured
if node.StorageMining == nil {
if err := node.setupStorageMining(ctx); err != nil {
return err
}
}
if node.RetrievalProtocol == nil {
if err := node.setupRetrievalMining(ctx); err != nil {
return err
}
}
// ensure we have a mining worker
if node.BlockMining.MiningWorker == nil {
if node.BlockMining.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil {
return err
}
}
return nil
}
func registeredProofsFromSectorSize(ss abi.SectorSize) (registeredSealProof abi.RegisteredProof, registeredPoStProof abi.RegisteredProof, err error) {
switch ss {
case constants.DevSectorSize:
return constants.DevRegisteredPoStProof, constants.DevRegisteredSealProof, nil
case constants.ThirtyTwoGiBSectorSize:
return abi.RegisteredProof_StackedDRG32GiBPoSt, abi.RegisteredProof_StackedDRG32GiBSeal, nil
case constants.EightMiBSectorSize:
return abi.RegisteredProof_StackedDRG8MiBPoSt, abi.RegisteredProof_StackedDRG8MiBSeal, nil
case constants.FiveHundredTwelveMiBSectorSize:
return abi.RegisteredProof_StackedDRG512MiBPoSt, abi.RegisteredProof_StackedDRG512MiBSeal, nil
default:
return 0, 0, errors.Errorf("unsupported sector size %d", ss)
}
}
func (node *Node) setupStorageMining(ctx context.Context) error {
if node.StorageMining != nil {
return errors.New("storage mining submodule has already been initialized")
}
minerAddr, err := node.MiningAddress()
if err != nil {
return err
}
head := node.Chain().ChainReader.GetHead()
status, err := node.PorcelainAPI.MinerGetStatus(ctx, minerAddr, head)
if err != nil {
return err
}
repoPath, err := node.Repo.Path()
if err != nil {
return err
}
sectorDir, err := paths.GetSectorPath(node.Repo.Config().SectorBase.RootDir, repoPath)
if err != nil {
return err
}
postProofType, sealProofType, err := registeredProofsFromSectorSize(status.SectorSize)
if err != nil {
return err
}
sectorBuilder, err := sectorbuilder.New(§orbuilder.Config{
PoStProofType: postProofType,
SealProofType: sealProofType,
Miner: minerAddr,
WorkerThreads: 1,
Paths: []fs.PathConfig{
{
Path: sectorDir,
Cache: false,
Weight: 1,
},
},
}, namespace.Wrap(node.Repo.Datastore(), ds.NewKey("/sectorbuilder")))
if err != nil {
return err
}
cborStore := node.Blockstore.CborStore
waiter := msg.NewWaiter(node.chain.ChainReader, node.chain.MessageStore, node.Blockstore.Blockstore, cborStore)
// TODO: rework these modules so they can be at least partially constructed during the building phase #3738
stateViewer := state.NewViewer(cborStore)
node.StorageMining, err = submodule.NewStorageMiningSubmodule(minerAddr, node.Repo.Datastore(),
sectorBuilder, &node.chain, &node.Messaging, waiter, &node.Wallet, stateViewer, node.BlockMining.PoStGenerator)
if err != nil {
return err
}
node.StorageProtocol, err = submodule.NewStorageProtocolSubmodule(
ctx,
minerAddr,
address.Undef, // TODO: This is for setting up mining, we need to pass the client address in if this is going to be a storage client also
&node.chain,
&node.Messaging,
waiter,
node.StorageMining.PieceManager,
node.Wallet.Signer,
node.Host(),
node.Repo.Datastore(),
node.Blockstore.Blockstore,
node.network.GraphExchange,
repoPath,
sectorBuilder.SealProofType(),
stateViewer,
)
if err != nil {
return errors.Wrap(err, "error initializing storage protocol")
}
return nil
}
func (node *Node) setupRetrievalMining(ctx context.Context) error {
providerAddr, err := node.MiningAddress()
if err != nil {
return errors.Wrap(err, "failed to get mining address")
}
rp, err := submodule.NewRetrievalProtocolSubmodule(
node.Blockstore.Blockstore,
node.Repo.Datastore(),
node.chain.State,
node.Host(),
providerAddr,
node.Wallet.Signer,
nil, // TODO: payment channel manager API, in follow-up
node.PieceManager(),
)
if err != nil {
return errors.Wrap(err, "failed to build node.RetrievalProtocol")
}
node.RetrievalProtocol = rp
return nil
}
func (node *Node) doMiningPause(ctx context.Context) {
// doMiningPause receives state transition signals from the syncer
// dispatcher allowing syncing to make progress.
//
// When mining, the node passes these signals along to the scheduler
// pausing and continuing mining based on syncer state.
catchupCh := node.Syncer().ChainSyncManager.TransitionChannel()
for {
select {
case <-ctx.Done():
return
case toCatchup, ok := <-catchupCh:
if !ok {
return
}
if node.BlockMining.MiningScheduler == nil {
// drop syncer transition signals if not mining
continue
}
if toCatchup {
node.BlockMining.MiningScheduler.Pause()
} else {
node.BlockMining.MiningScheduler.Continue()
}
}
}
}
// StartMining causes the node to start feeding blocks to the mining worker and initializes
// the StorageMining for the mining address.
func (node *Node) StartMining(ctx context.Context) error {
if node.IsMining() {
return errors.New("Node is already mining")
}
err := node.SetupMining(ctx)
if err != nil {
return errors.Wrap(err, "failed to setup mining")
}
if node.BlockMining.MiningScheduler == nil {
node.BlockMining.MiningScheduler = mining.NewScheduler(node.BlockMining.MiningWorker, node.PorcelainAPI.ChainHead, node.ChainClock)
} else if node.BlockMining.MiningScheduler.IsStarted() {
return fmt.Errorf("miner scheduler already started")
}
var miningCtx context.Context
miningCtx, node.BlockMining.CancelMining = context.WithCancel(context.Background())
outCh, doneWg := node.BlockMining.MiningScheduler.Start(miningCtx)
node.BlockMining.MiningDoneWg = doneWg
node.BlockMining.AddNewlyMinedBlock = node.addNewlyMinedBlock
node.BlockMining.MiningDoneWg.Add(1)
go node.handleNewMiningOutput(miningCtx, outCh)
if err := node.StorageMining.Start(ctx); err != nil {
fmt.Printf("error starting storage miner: %s\n", err)
}
if err := node.StorageProtocol.StorageProvider.Start(ctx); err != nil {
fmt.Printf("error starting storage provider: %s\n", err)
}
// TODO: Retrieval Market Integration
//if err := node.RetrievalProtocol.RetrievalProvider.Start(); err != nil {
// fmt.Printf("error starting retrieval provider: %s\n", err)
//}
node.setIsMining(true)
return nil
}
// StopMining stops mining on new blocks.
func (node *Node) StopMining(ctx context.Context) {
node.setIsMining(false)
if node.BlockMining.CancelMining != nil {
node.BlockMining.CancelMining()
}
if node.BlockMining.MiningDoneWg != nil {
node.BlockMining.MiningDoneWg.Wait()
}
if node.StorageMining != nil {
err := node.StorageMining.Stop(ctx)
if err != nil {
log.Warn("Error stopping storage miner", err)
}
}
}
func (node *Node) handleSubscription(ctx context.Context, sub pubsub.Subscription, handler pubSubHandler) {
for {
received, err := sub.Next(ctx)
if err != nil {
if ctx.Err() != context.Canceled {
log.Errorf("error reading message from topic %s: %s", sub.Topic(), err)
}
return
}
if err := handler(ctx, received); err != nil {
handlerName := runtime.FuncForPC(reflect.ValueOf(handler).Pointer()).Name()
if err != context.Canceled {
log.Errorf("error in handler %s for topic %s: %s", handlerName, sub.Topic(), err)
}
}
}
}
// setupProtocols creates protocol clients and miners, then sets the node's APIs
// for each
func (node *Node) setupProtocols() error {
blockMiningAPI := mining_protocol.New(
node.MiningAddress,
node.addMinedBlockSynchronous,
node.chain.ChainReader,
node.IsMining,
node.SetupMining,
node.StartMining,
node.StopMining,
node.GetMiningWorker,
node.ChainClock,
)
node.BlockMining.BlockMiningAPI = &blockMiningAPI
return nil
}
// GetMiningWorker ensures mining is setup and then returns the worker
func (node *Node) GetMiningWorker(ctx context.Context) (*mining.DefaultWorker, error) {
if err := node.SetupMining(ctx); err != nil {
return nil, err
}
return node.BlockMining.MiningWorker, nil
}
// CreateMiningWorker creates a mining.Worker for the node using the configured
// getStateTree, getWeight, and getAncestors functions for the node
func (node *Node) CreateMiningWorker(ctx context.Context) (*mining.DefaultWorker, error) {
minerAddr, err := node.MiningAddress()
if err != nil {
return nil, errors.Wrap(err, "failed to get mining address")
}
head := node.PorcelainAPI.ChainHeadKey()
minerStatus, err := node.PorcelainAPI.MinerGetStatus(ctx, minerAddr, head)
if err != nil {
log.Errorf("could not get owner address of miner actor")
return nil, err
}
return mining.NewDefaultWorker(mining.WorkerParameters{
API: node.PorcelainAPI,
MinerAddr: minerAddr,
MinerOwnerAddr: minerStatus.OwnerAddress,
WorkerSigner: node.Wallet.Signer,
GetStateTree: node.chain.ChainReader.GetTipSetState,
GetWeight: node.getWeight,
Election: consensus.NewElectionMachine(node.PorcelainAPI),
TicketGen: consensus.NewTicketMachine(node.PorcelainAPI),
TipSetMetadata: node.chain.ChainReader,
MessageSource: node.Messaging.Inbox.Pool(),
MessageStore: node.chain.MessageStore,
MessageQualifier: consensus.NewMessagePenaltyChecker(node.Chain().State),
Blockstore: node.Blockstore.Blockstore,
Clock: node.ChainClock,
Poster: node.StorageMining.PoStGenerator,
}), nil
}
// getWeight is the default GetWeight function for the mining worker.
func (node *Node) getWeight(ctx context.Context, ts block.TipSet) (fbig.Int, error) {
parent, err := ts.Parents()
if err != nil {
return fbig.Zero(), err
}
var baseStRoot cid.Cid
if parent.Empty() {
// use genesis state as parent state of genesis block
baseStRoot, err = node.chain.ChainReader.GetTipSetStateRoot(ts.Key())
} else {
baseStRoot, err = node.chain.ChainReader.GetTipSetStateRoot(parent)
}
if err != nil {
return fbig.Zero(), err
}
return node.syncer.ChainSelector.Weight(ctx, ts, baseStRoot)
}
// -- Accessors
// Host returns the nodes host.
func (node *Node) Host() host.Host {
return node.network.Host
}
// PieceManager returns the node's PieceManager.
func (node *Node) PieceManager() piecemanager.PieceManager {
return node.StorageMining.PieceManager
}
// BlockService returns the nodes blockservice.
func (node *Node) BlockService() bserv.BlockService {
return node.Blockservice.Blockservice
}
// CborStore returns the nodes cborStore.
func (node *Node) CborStore() *cborutil.IpldStore {
return node.Blockstore.CborStore
}
// IsMining returns a boolean indicating whether the node is mining blocks.
func (node *Node) IsMining() bool {
node.BlockMining.Mining.Lock()
defer node.BlockMining.Mining.Unlock()
return node.BlockMining.Mining.IsMining
}
// Chain returns the chain submodule.
func (node *Node) Chain() submodule.ChainSubmodule {
return node.chain
}
// Syncer returns the syncer submodule.
func (node *Node) Syncer() submodule.SyncerSubmodule {
return node.syncer
}
// Network returns the network submodule.
func (node *Node) Network() submodule.NetworkSubmodule {
return node.network
}
| 1 | 23,422 | Sectorbuilder behaves differently depending on whether it's given 1 or more threads. It won't seal if only given 1. | filecoin-project-venus | go |
@@ -16,8 +16,11 @@
package azkaban.metrics;
+import static azkaban.ServiceProvider.SERVICE_PROVIDER;
+
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
+import com.google.inject.Inject;
import java.util.concurrent.atomic.AtomicLong;
/** | 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.metrics;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import java.util.concurrent.atomic.AtomicLong;
/**
* This singleton class CommonMetrics is in charge of collecting varieties of metrics
* which are accessed in both web and exec modules. That said, these metrics will be
* exposed in both Web server and executor.
*/
public enum CommonMetrics {
INSTANCE;
private final AtomicLong dbConnectionTime = new AtomicLong(0L);
private final AtomicLong OOMWaitingJobCount = new AtomicLong(0L);
private final MetricRegistry registry;
private Meter dbConnectionMeter;
private Meter flowFailMeter;
CommonMetrics() {
this.registry = MetricsManager.INSTANCE.getRegistry();
setupAllMetrics();
}
private void setupAllMetrics() {
this.dbConnectionMeter = MetricsUtility.addMeter("DB-Connection-meter", this.registry);
this.flowFailMeter = MetricsUtility.addMeter("flow-fail-meter", this.registry);
MetricsUtility.addGauge("OOM-waiting-job-count", this.registry, this.OOMWaitingJobCount::get);
MetricsUtility.addGauge("dbConnectionTime", this.registry, this.dbConnectionTime::get);
}
/**
* Mark the occurrence of an DB query event.
*/
public void markDBConnection() {
/*
* This method should be Thread Safe.
* Two reasons that we don't make this function call synchronized:
* 1). drop wizard metrics deals with concurrency internally;
* 2). mark is basically a math addition operation, which should not cause race condition issue.
*/
this.dbConnectionMeter.mark();
}
/**
* Mark flowFailMeter when a flow is considered as FAILED.
* This method could be called by Web Server or Executor, as they both detect flow failure.
*/
public void markFlowFail() {
this.flowFailMeter.mark();
}
public void setDBConnectionTime(final long milliseconds) {
this.dbConnectionTime.set(milliseconds);
}
/**
* Mark the occurrence of an job waiting event due to OOM
*/
public void incrementOOMJobWaitCount() {
this.OOMWaitingJobCount.incrementAndGet();
}
/**
* Unmark the occurrence of an job waiting event due to OOM
*/
public void decrementOOMJobWaitCount() {
this.OOMWaitingJobCount.decrementAndGet();
}
}
| 1 | 13,692 | Sort of believe We should put MetricManager in constructor parameter given this case. | azkaban-azkaban | java |
@@ -28,11 +28,13 @@ import org.apache.iceberg.transforms.Transform;
*/
public class PartitionField implements Serializable {
private final int sourceId;
+ private final int fieldId;
private final String name;
private final Transform<?, ?> transform;
- PartitionField(int sourceId, String name, Transform<?, ?> transform) {
+ PartitionField(int sourceId, int fieldId, String name, Transform<?, ?> transform) {
this.sourceId = sourceId;
+ this.fieldId = fieldId;
this.name = name;
this.transform = transform;
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import com.google.common.base.Objects;
import java.io.Serializable;
import org.apache.iceberg.transforms.Transform;
/**
* Represents a single field in a {@link PartitionSpec}.
*/
public class PartitionField implements Serializable {
private final int sourceId;
private final String name;
private final Transform<?, ?> transform;
PartitionField(int sourceId, String name, Transform<?, ?> transform) {
this.sourceId = sourceId;
this.name = name;
this.transform = transform;
}
/**
* @return the field id of the source field in the {@link PartitionSpec spec's} table schema
*/
public int sourceId() {
return sourceId;
}
/**
* @return the name of this partition field
*/
public String name() {
return name;
}
/**
* @return the transform used to produce partition values from source values
*/
public Transform<?, ?> transform() {
return transform;
}
@Override
public String toString() {
return name + ": " + transform + "(" + sourceId + ")";
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
} else if (!(other instanceof PartitionField)) {
return false;
}
PartitionField that = (PartitionField) other;
return sourceId == that.sourceId &&
name.equals(that.name) &&
transform.equals(that.transform);
}
@Override
public int hashCode() {
return Objects.hashCode(sourceId, name, transform);
}
}
| 1 | 18,692 | Do we need to check `fieldId` is larger than 1000? | apache-iceberg | java |
@@ -356,10 +356,7 @@ void StatefulWriter::unsent_change_added_to_history(
periodic_hb_event_->restart_timer(max_blocking_time);
}
- if ( (mp_listener != nullptr) && this->is_acked_by_all(change) )
- {
- mp_listener->onWriterChangeReceivedByAll(this, change);
- }
+ check_acked_status();
if (disable_positive_acks_ && last_sequence_number_ == SequenceNumber_t())
{ | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file StatefulWriter.cpp
*
*/
#include <fastdds/rtps/writer/StatefulWriter.h>
#include <fastdds/rtps/writer/WriterListener.h>
#include <fastdds/rtps/writer/ReaderProxy.h>
#include <fastdds/rtps/resources/AsyncWriterThread.h>
#include <rtps/participant/RTPSParticipantImpl.h>
#include <rtps/flowcontrol/FlowController.h>
#include <fastdds/rtps/messages/RTPSMessageCreator.h>
#include <fastdds/rtps/messages/RTPSMessageGroup.h>
#include <fastdds/rtps/participant/RTPSParticipant.h>
#include <fastdds/rtps/resources/ResourceEvent.h>
#include <fastdds/rtps/resources/TimedEvent.h>
#include <fastdds/rtps/history/WriterHistory.h>
#include <fastdds/dds/log/Log.hpp>
#include <fastrtps/utils/TimeConversion.h>
#include <fastdds/rtps/builtin/BuiltinProtocols.h>
#include <fastdds/rtps/builtin/liveliness/WLP.h>
#include <rtps/writer/RTPSWriterCollector.h>
#include "rtps/RTPSDomainImpl.hpp"
#include "rtps/messages/RTPSGapBuilder.hpp"
#include <mutex>
#include <vector>
#include <stdexcept>
namespace eprosima {
namespace fastrtps {
namespace rtps {
template<typename UnaryFun>
bool send_data_or_fragments(
RTPSMessageGroup& group,
CacheChange_t* change,
bool inline_qos,
UnaryFun sent_fun)
{
bool sent_ok = true;
if (change->getFragmentSize() > 0)
{
for (FragmentNumber_t frag = 1; frag <= change->getFragmentCount(); frag++)
{
sent_ok &= group.add_data_frag(*change, frag, inline_qos);
if (sent_ok)
{
sent_fun(frag);
}
else
{
logError(RTPS_WRITER, "Error sending fragment ("
<< change->sequenceNumber << ", " << frag << ")");
break;
}
}
}
else
{
sent_ok = group.add_data(*change, inline_qos);
if (sent_ok)
{
sent_fun(0);
}
else
{
logError(RTPS_WRITER, "Error sending change " << change->sequenceNumber);
}
}
return sent_ok;
}
static void null_sent_fun(
FragmentNumber_t /*frag*/)
{
}
using namespace std::chrono;
StatefulWriter::StatefulWriter(
RTPSParticipantImpl* pimpl,
const GUID_t& guid,
const WriterAttributes& att,
WriterHistory* hist,
WriterListener* listen)
: RTPSWriter(pimpl, guid, att, hist, listen)
, periodic_hb_event_(nullptr)
, nack_response_event_(nullptr)
, ack_event_(nullptr)
, m_heartbeatCount(0)
, m_times(att.times)
, matched_readers_(att.matched_readers_allocation)
, matched_readers_pool_(att.matched_readers_allocation)
, next_all_acked_notify_sequence_(0, 1)
, all_acked_(false)
, may_remove_change_cond_()
, may_remove_change_(0)
, disable_heartbeat_piggyback_(att.disable_heartbeat_piggyback)
, disable_positive_acks_(att.disable_positive_acks)
, keep_duration_us_(att.keep_duration.to_ns() * 1e-3)
, last_sequence_number_()
, biggest_removed_sequence_number_()
, sendBufferSize_(pimpl->get_min_network_send_buffer_size())
, currentUsageSendBufferSize_(static_cast<int32_t>(pimpl->get_min_network_send_buffer_size()))
, m_controllers()
{
m_heartbeatCount = 0;
const RTPSParticipantAttributes& part_att = pimpl->getRTPSParticipantAttributes();
periodic_hb_event_ = new TimedEvent(pimpl->getEventResource(), [&]() -> bool
{
return send_periodic_heartbeat();
},
TimeConv::Time_t2MilliSecondsDouble(m_times.heartbeatPeriod));
nack_response_event_ = new TimedEvent(pimpl->getEventResource(), [&]() -> bool
{
perform_nack_response();
return false;
},
TimeConv::Time_t2MilliSecondsDouble(m_times.nackResponseDelay));
if (disable_positive_acks_)
{
ack_event_ = new TimedEvent(pimpl->getEventResource(), [&]() -> bool
{
return ack_timer_expired();
},
att.keep_duration.to_ns() * 1e-6); // in milliseconds
}
for (size_t n = 0; n < att.matched_readers_allocation.initial; ++n)
{
matched_readers_pool_.push_back(new ReaderProxy(m_times, part_att.allocation.locators, this));
}
}
StatefulWriter::~StatefulWriter()
{
logInfo(RTPS_WRITER, "StatefulWriter destructor");
for (std::unique_ptr<FlowController>& controller : m_controllers)
{
controller->disable();
}
if (disable_positive_acks_)
{
delete(ack_event_);
ack_event_ = nullptr;
}
if (nack_response_event_ != nullptr)
{
delete(nack_response_event_);
nack_response_event_ = nullptr;
}
mp_RTPSParticipant->async_thread().unregister_writer(this);
// After unregistering writer from AsyncWriterThread, delete all flow_controllers because they register the writer in
// the AsyncWriterThread.
m_controllers.clear();
// Stop all active proxies and pass them to the pool
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
while (!matched_readers_.empty())
{
ReaderProxy* remote_reader = matched_readers_.back();
matched_readers_.pop_back();
remote_reader->stop();
matched_readers_pool_.push_back(remote_reader);
}
}
// Destroy heartbeat event
if (periodic_hb_event_ != nullptr)
{
delete(periodic_hb_event_);
periodic_hb_event_ = nullptr;
}
// Delete all proxies in the pool
for (ReaderProxy* remote_reader : matched_readers_pool_)
{
delete(remote_reader);
}
}
/*
* CHANGE-RELATED METHODS
*/
void StatefulWriter::unsent_change_added_to_history(
CacheChange_t* change,
const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
if (liveliness_lease_duration_ < c_TimeInfinite)
{
mp_RTPSParticipant->wlp()->assert_liveliness(
getGuid(),
liveliness_kind_,
liveliness_lease_duration_);
}
#if HAVE_SECURITY
encrypt_cachechange(change);
#endif // if HAVE_SECURITY
if (!matched_readers_.empty())
{
if (!isAsync())
{
//TODO(Ricardo) Temporal.
bool expectsInlineQos = false;
// First step is to add the new CacheChange_t to all reader proxies.
// It has to be done before sending, because if a timeout is catched, we will not include the
// CacheChange_t in some reader proxies.
for (ReaderProxy* it : matched_readers_)
{
ChangeForReader_t changeForReader(change);
if (m_pushMode)
{
if (it->is_reliable())
{
changeForReader.setStatus(UNDERWAY);
}
else
{
changeForReader.setStatus(ACKNOWLEDGED);
}
}
else
{
changeForReader.setStatus(UNACKNOWLEDGED);
}
changeForReader.setRelevance(it->rtps_is_relevant(change));
it->add_change(changeForReader, true, max_blocking_time);
expectsInlineQos |= it->expects_inline_qos();
}
try
{
//At this point we are sure all information was stored. We now can send data.
if (!m_separateSendingEnabled)
{
if (locator_selector_.selected_size() > 0)
{
RTPSMessageGroup group(mp_RTPSParticipant, this, *this, max_blocking_time);
auto sent_fun = [this, change](
FragmentNumber_t frag)
{
if (frag > 0)
{
for (ReaderProxy* it : matched_readers_)
{
if (!it->is_local_reader())
{
bool allFragmentsSent = false;
it->mark_fragment_as_sent_for_change(
change->sequenceNumber,
frag,
allFragmentsSent);
}
}
}
};
send_data_or_fragments(group, change, expectsInlineQos, sent_fun);
send_heartbeat_nts_(all_remote_readers_.size(), group, disable_positive_acks_);
}
for (ReaderProxy* it : matched_readers_)
{
if (it->is_local_reader())
{
intraprocess_heartbeat(it, false);
bool delivered = intraprocess_delivery(change, it);
it->set_change_to_status(
change->sequenceNumber,
delivered ? ACKNOWLEDGED : UNDERWAY,
false);
}
}
}
else
{
for (ReaderProxy* it : matched_readers_)
{
if (it->is_local_reader())
{
intraprocess_heartbeat(it, false);
bool delivered = intraprocess_delivery(change, it);
it->set_change_to_status(
change->sequenceNumber,
delivered ? ACKNOWLEDGED : UNDERWAY,
false);
}
else
{
RTPSMessageGroup group(mp_RTPSParticipant, this, it->message_sender(),
max_blocking_time);
if (change->getFragmentCount() > 0)
{
logError(RTPS_WRITER, "Cannot send large messages on separate sending mode");
}
else
{
if (!group.add_data(*change, it->expects_inline_qos()))
{
logError(RTPS_WRITER, "Error sending change " << change->sequenceNumber);
}
}
uint32_t last_processed = 0;
send_heartbeat_piggyback_nts_(it, group, last_processed);
}
}
}
if (there_are_remote_readers_)
{
periodic_hb_event_->restart_timer(max_blocking_time);
}
if ( (mp_listener != nullptr) && this->is_acked_by_all(change) )
{
mp_listener->onWriterChangeReceivedByAll(this, change);
}
if (disable_positive_acks_ && last_sequence_number_ == SequenceNumber_t())
{
last_sequence_number_ = change->sequenceNumber;
}
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
else
{
for (ReaderProxy* it : matched_readers_)
{
ChangeForReader_t changeForReader(change);
if (m_pushMode)
{
changeForReader.setStatus(UNSENT);
}
else
{
changeForReader.setStatus(UNACKNOWLEDGED);
}
changeForReader.setRelevance(it->rtps_is_relevant(change));
it->add_change(changeForReader, false, max_blocking_time);
}
if (m_pushMode)
{
mp_RTPSParticipant->async_thread().wake_up(this, max_blocking_time);
}
}
if (disable_positive_acks_)
{
auto source_timestamp = system_clock::time_point() + nanoseconds(change->sourceTimestamp.to_ns());
auto now = system_clock::now();
auto interval = source_timestamp - now + keep_duration_us_;
assert(interval.count() >= 0);
ack_event_->update_interval_millisec((double)duration_cast<milliseconds>(interval).count());
ack_event_->restart_timer(max_blocking_time);
}
}
else
{
logInfo(RTPS_WRITER, "No reader proxy to add change.");
if (mp_listener != nullptr)
{
mp_listener->onWriterChangeReceivedByAll(this, change);
}
}
}
bool StatefulWriter::intraprocess_delivery(
CacheChange_t* change,
ReaderProxy* reader_proxy)
{
RTPSReader* reader = reader_proxy->local_reader();
if (reader)
{
if (change->write_params.related_sample_identity() != SampleIdentity::unknown())
{
change->write_params.sample_identity(change->write_params.related_sample_identity());
}
return reader->processDataMsg(change);
}
return false;
}
bool StatefulWriter::intraprocess_gap(
ReaderProxy* reader_proxy,
const SequenceNumber_t& seq_num)
{
RTPSReader* reader = reader_proxy->local_reader();
if (reader)
{
return reader->processGapMsg(m_guid, seq_num, SequenceNumberSet_t(seq_num + 1));
}
return false;
}
bool StatefulWriter::intraprocess_heartbeat(
ReaderProxy* reader_proxy,
bool liveliness)
{
bool returned_value = false;
std::lock_guard<RecursiveTimedMutex> guardW(mp_mutex);
RTPSReader* reader = RTPSDomainImpl::find_local_reader(reader_proxy->guid());
if (reader)
{
SequenceNumber_t first_seq = get_seq_num_min();
SequenceNumber_t last_seq = get_seq_num_max();
if (first_seq == c_SequenceNumber_Unknown || last_seq == c_SequenceNumber_Unknown)
{
if (liveliness)
{
first_seq = next_sequence_number();
last_seq = first_seq - 1;
}
}
if (first_seq != c_SequenceNumber_Unknown && last_seq != c_SequenceNumber_Unknown)
{
incrementHBCount();
if (true == (returned_value =
reader->processHeartbeatMsg(m_guid, m_heartbeatCount, first_seq, last_seq, true, liveliness)))
{
if (reader_proxy->durability_kind() < TRANSIENT_LOCAL ||
this->getAttributes().durabilityKind < TRANSIENT_LOCAL)
{
SequenceNumber_t last_irrelevance = reader_proxy->changes_low_mark();
if (first_seq <= last_irrelevance)
{
reader->processGapMsg(m_guid, first_seq, SequenceNumberSet_t(last_irrelevance + 1));
}
}
}
}
}
return returned_value;
}
bool StatefulWriter::change_removed_by_history(
CacheChange_t* a_change)
{
SequenceNumber_t sequence_number = a_change->sequenceNumber;
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
logInfo(RTPS_WRITER, "Change " << sequence_number << " to be removed.");
// Take note of biggest removed sequence number to improve sending of gaps
if (sequence_number > biggest_removed_sequence_number_)
{
biggest_removed_sequence_number_ = sequence_number;
}
// Invalidate CacheChange pointer in ReaderProxies.
for (ReaderProxy* it : matched_readers_)
{
it->change_has_been_removed(sequence_number);
}
may_remove_change_ = 2;
may_remove_change_cond_.notify_one();
return true;
}
void StatefulWriter::send_any_unsent_changes()
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
bool activateHeartbeatPeriod = false;
SequenceNumber_t max_sequence = mp_history->next_sequence_number();
if (!m_pushMode || mp_history->getHistorySize() == 0 || matched_readers_.empty())
{
send_heartbeat_to_all_readers();
}
else if (m_separateSendingEnabled)
{
send_changes_separatedly(max_sequence, activateHeartbeatPeriod);
}
else
{
bool no_flow_controllers = m_controllers.empty() && mp_RTPSParticipant->getFlowControllers().empty();
if (no_flow_controllers || !there_are_remote_readers_)
{
send_all_unsent_changes(max_sequence, activateHeartbeatPeriod);
}
else
{
send_unsent_changes_with_flow_control(max_sequence, activateHeartbeatPeriod);
}
}
if (activateHeartbeatPeriod)
{
periodic_hb_event_->restart_timer();
}
// On VOLATILE writers, remove auto-acked (best effort readers) changes
check_acked_status();
logInfo(RTPS_WRITER, "Finish sending unsent changes");
}
void StatefulWriter::send_heartbeat_to_all_readers()
{
// This version is called when any of the following conditions is satisfied:
// a) push mode is false
// b) history is empty
// c) there are no matched readers
if (m_separateSendingEnabled)
{
for (ReaderProxy* reader : matched_readers_)
{
if (reader->is_local_reader())
{
intraprocess_heartbeat(reader);
}
else
{
send_heartbeat_to_nts(*reader);
}
}
}
else
{
for (ReaderProxy* reader : matched_readers_)
{
if (reader->is_local_reader())
{
intraprocess_heartbeat(reader);
}
}
if (there_are_remote_readers_)
{
RTPSMessageGroup group(mp_RTPSParticipant, this, *this);
send_heartbeat_nts_(all_remote_readers_.size(), group, disable_positive_acks_);
}
}
}
void StatefulWriter::send_changes_separatedly(
SequenceNumber_t max_sequence,
bool& activateHeartbeatPeriod)
{
// This version is called when all of the following conditions are satisfied:
// a) push mode is true
// b) history is not empty
// c) there is at least one matched reader
// d) separate sending is enabled
for (ReaderProxy* remoteReader : matched_readers_)
{
if (remoteReader->is_local_reader())
{
SequenceNumber_t max_ack_seq = SequenceNumber_t::unknown();
auto unsent_change_process =
[&](const SequenceNumber_t& seqNum, const ChangeForReader_t* unsentChange)
{
if (unsentChange != nullptr && unsentChange->isRelevant() && unsentChange->isValid())
{
if (intraprocess_delivery(unsentChange->getChange(), remoteReader))
{
max_ack_seq = seqNum;
}
else
{
remoteReader->set_change_to_status(seqNum, UNDERWAY, false);
}
}
else
{
if (intraprocess_gap(remoteReader, seqNum))
{
max_ack_seq = seqNum;
}
else
{
remoteReader->set_change_to_status(seqNum, UNDERWAY, true);
}
}
};
remoteReader->for_each_unsent_change(max_sequence, unsent_change_process);
if (max_ack_seq != SequenceNumber_t::unknown())
{
remoteReader->acked_changes_set(max_ack_seq + 1);
}
}
else
{
// Specific destination message group
RTPSMessageGroup group(mp_RTPSParticipant, this, remoteReader->message_sender());
SequenceNumber_t min_history_seq = get_seq_num_min();
if (remoteReader->is_reliable())
{
if (remoteReader->are_there_gaps())
{
send_heartbeat_nts_(1u, group, true);
}
RTPSGapBuilder gaps(group);
uint32_t lastBytesProcessed = 0;
auto sent_fun = [this, remoteReader, &lastBytesProcessed, &group](
FragmentNumber_t /*frag*/)
{
// Heartbeat piggyback.
send_heartbeat_piggyback_nts_(remoteReader, group, lastBytesProcessed);
};
auto unsent_change_process =
[&](const SequenceNumber_t& seqNum, const ChangeForReader_t* unsentChange)
{
if (unsentChange != nullptr && unsentChange->isRelevant() && unsentChange->isValid())
{
bool sent_ok = send_data_or_fragments(
group,
unsentChange->getChange(),
remoteReader->expects_inline_qos(),
sent_fun);
if (sent_ok)
{
remoteReader->set_change_to_status(seqNum, UNDERWAY, true);
activateHeartbeatPeriod = true;
}
}
else
{
if (seqNum >= min_history_seq)
{
gaps.add(seqNum);
}
remoteReader->set_change_to_status(seqNum, UNDERWAY, true);
}
};
remoteReader->for_each_unsent_change(max_sequence, unsent_change_process);
}
else
{
SequenceNumber_t max_ack_seq = SequenceNumber_t::unknown();
auto unsent_change_process =
[&](const SequenceNumber_t& seqNum, const ChangeForReader_t* unsentChange)
{
if (unsentChange != nullptr && unsentChange->isRelevant() && unsentChange->isValid())
{
bool sent_ok = send_data_or_fragments(
group,
unsentChange->getChange(),
remoteReader->expects_inline_qos(),
null_sent_fun);
if (sent_ok)
{
max_ack_seq = seqNum;
}
}
else
{
max_ack_seq = seqNum;
}
};
remoteReader->for_each_unsent_change(max_sequence, unsent_change_process);
if (max_ack_seq != SequenceNumber_t::unknown())
{
remoteReader->acked_changes_set(max_ack_seq + 1);
}
}
}
} // Readers loop
}
void StatefulWriter::send_all_intraprocess_changes(
SequenceNumber_t max_sequence)
{
for (ReaderProxy* remoteReader : matched_readers_)
{
if (remoteReader->is_local_reader())
{
intraprocess_heartbeat(remoteReader, false);
SequenceNumber_t max_ack_seq = SequenceNumber_t::unknown();
auto unsent_change_process = [&](const SequenceNumber_t& seq_num, const ChangeForReader_t* unsentChange)
{
if (unsentChange != nullptr && unsentChange->isRelevant() && unsentChange->isValid())
{
if (intraprocess_delivery(unsentChange->getChange(), remoteReader))
{
max_ack_seq = seq_num;
}
else
{
remoteReader->set_change_to_status(seq_num, UNDERWAY, false);
}
}
else
{
if (intraprocess_gap(remoteReader, seq_num))
{
max_ack_seq = seq_num;
}
else
{
remoteReader->set_change_to_status(seq_num, UNDERWAY, true);
}
}
};
remoteReader->for_each_unsent_change(max_sequence, unsent_change_process);
if (max_ack_seq != SequenceNumber_t::unknown())
{
remoteReader->acked_changes_set(max_ack_seq + 1);
}
}
}
}
void StatefulWriter::send_all_unsent_changes(
SequenceNumber_t max_sequence,
bool& activateHeartbeatPeriod)
{
// This version is called when all of the following conditions are satisfied:
// a) push mode is true
// b) history is not empty
// c) there is at least one matched reader
// d) separate sending is disabled
// e) either all matched readers are local or no flow controllers are configured
// Process intraprocess first
if (there_are_local_readers_)
{
send_all_intraprocess_changes(max_sequence);
}
if (there_are_remote_readers_)
{
static constexpr uint32_t implicit_flow_controller_size = RTPSMessageGroup::get_max_fragment_payload_size();
NetworkFactory& network = mp_RTPSParticipant->network_factory();
locator_selector_.reset(true);
network.select_locators(locator_selector_);
compute_selected_guids();
bool acknack_required = next_all_acked_notify_sequence_ < get_seq_num_min();
RTPSMessageGroup group(mp_RTPSParticipant, this, *this);
acknack_required |= send_hole_gaps_to_group(group);
uint32_t lastBytesProcessed = 0;
auto sent_fun = [this, &lastBytesProcessed, &group](
FragmentNumber_t /*frag*/)
{
// Heartbeat piggyback.
send_heartbeat_piggyback_nts_(nullptr, group, lastBytesProcessed);
};
RTPSGapBuilder gap_builder(group);
uint32_t total_sent_size = 0;
History::iterator cit;
for (cit = mp_history->changesBegin();
cit != mp_history->changesEnd() && (total_sent_size < implicit_flow_controller_size);
cit++)
{
SequenceNumber_t seq = (*cit)->sequenceNumber;
// Deselect all entries on the locator selector (we will only activate the
// readers for which this sequence number is pending)
locator_selector_.reset(false);
bool is_irrelevant = true; // Will turn to false if change is relevant for at least one reader
bool should_be_sent = false;
bool inline_qos = false;
for (ReaderProxy* remoteReader : matched_readers_)
{
if (!remoteReader->is_local_reader())
{
if (remoteReader->change_is_unsent(seq, is_irrelevant))
{
should_be_sent = true;
locator_selector_.enable(remoteReader->guid());
inline_qos |= remoteReader->expects_inline_qos();
if (is_irrelevant)
{
remoteReader->set_change_to_status(seq, UNDERWAY, true);
}
}
}
}
if (locator_selector_.state_has_changed())
{
gap_builder.flush();
group.flush_and_reset();
network.select_locators(locator_selector_);
compute_selected_guids();
}
if (should_be_sent)
{
if (is_irrelevant)
{
gap_builder.add(seq);
}
else
{
bool sent_ok = send_data_or_fragments(group, *cit, inline_qos, sent_fun);
if (sent_ok)
{
total_sent_size += (*cit)->serializedPayload.length;
bool tmp_bool = false;
for (ReaderProxy* remoteReader : matched_readers_)
{
if (!remoteReader->is_local_reader())
{
if (remoteReader->change_is_unsent(seq, tmp_bool))
{
remoteReader->set_change_to_status(seq, UNDERWAY, true);
if (remoteReader->is_reliable())
{
activateHeartbeatPeriod = true;
}
}
}
}
}
}
}
}
// Heartbeat piggyback.
if (acknack_required)
{
send_heartbeat_nts_(all_remote_readers_.size(), group, disable_positive_acks_);
}
group.flush_and_reset();
locator_selector_.reset(true);
network.select_locators(locator_selector_);
compute_selected_guids();
if (cit != mp_history->changesEnd())
{
mp_RTPSParticipant->async_thread().wake_up(this);
}
}
}
void StatefulWriter::send_unsent_changes_with_flow_control(
SequenceNumber_t max_sequence,
bool& activateHeartbeatPeriod)
{
// This version is called when all of the following conditions are satisfied:
// a) push mode is true
// b) history is not empty
// c) there is at least one matched reader
// d) separate sending is disabled
// e) there is at least one remote matched reader and flow controllers are configured
// Process intraprocess first
if (there_are_local_readers_)
{
send_all_intraprocess_changes(max_sequence);
}
// From here onwards, only remote readers should be accessed
RTPSWriterCollector<ReaderProxy*> relevantChanges;
bool heartbeat_has_been_sent = false;
NetworkFactory& network = mp_RTPSParticipant->network_factory();
locator_selector_.reset(true);
network.select_locators(locator_selector_);
compute_selected_guids();
RTPSMessageGroup group(mp_RTPSParticipant, this, *this);
// GAP for holes in history sent to the readers that need it
send_hole_gaps_to_group(group);
// Reset the state of locator_selector to select all readers
group.flush_and_reset();
locator_selector_.reset(true);
network.select_locators(locator_selector_);
compute_selected_guids();
for (ReaderProxy* remoteReader : matched_readers_)
{
// Skip local readers (were processed before)
if (remoteReader->is_local_reader())
{
continue;
}
if (!heartbeat_has_been_sent && remoteReader->are_there_gaps())
{
send_heartbeat_nts_(all_remote_readers_.size(), group, true);
heartbeat_has_been_sent = true;
}
RTPSGapBuilder gaps(group, remoteReader->guid());
auto unsent_change_process = [&](const SequenceNumber_t& seq_num, const ChangeForReader_t* unsentChange)
{
if (unsentChange != nullptr && unsentChange->isRelevant() && unsentChange->isValid())
{
relevantChanges.add_change(
unsentChange->getChange(), remoteReader, unsentChange->getUnsentFragments());
}
else
{
// Skip holes in history, as they were added before
if (unsentChange != nullptr && remoteReader->is_reliable())
{
gaps.add(seq_num);
}
remoteReader->set_change_to_status(seq_num, UNDERWAY, true);
}
};
remoteReader->for_each_unsent_change(max_sequence, unsent_change_process);
}
// Clear all relevant changes through the local controllers first
for (std::unique_ptr<FlowController>& controller : m_controllers)
{
(*controller)(relevantChanges);
}
// Clear all relevant changes through the parent controllers
for (std::unique_ptr<FlowController>& controller : mp_RTPSParticipant->getFlowControllers())
{
(*controller)(relevantChanges);
}
try
{
uint32_t lastBytesProcessed = 0;
while (!relevantChanges.empty())
{
RTPSWriterCollector<ReaderProxy*>::Item changeToSend = relevantChanges.pop();
bool expectsInlineQos = false;
locator_selector_.reset(false);
for (const ReaderProxy* remoteReader : changeToSend.remoteReaders)
{
locator_selector_.enable(remoteReader->guid());
expectsInlineQos |= remoteReader->expects_inline_qos();
}
if (locator_selector_.state_has_changed())
{
group.flush_and_reset();
network.select_locators(locator_selector_);
compute_selected_guids();
}
// TODO(Ricardo) Flowcontroller has to be used in RTPSMessageGroup. Study.
// And controllers are notified about the changes being sent
FlowController::NotifyControllersChangeSent(changeToSend.cacheChange);
if (changeToSend.fragmentNumber != 0)
{
if (group.add_data_frag(*changeToSend.cacheChange, changeToSend.fragmentNumber,
expectsInlineQos))
{
bool must_wake_up_async_thread = false;
for (ReaderProxy* remoteReader : changeToSend.remoteReaders)
{
bool allFragmentsSent = false;
if (remoteReader->mark_fragment_as_sent_for_change(
changeToSend.sequenceNumber,
changeToSend.fragmentNumber,
allFragmentsSent))
{
must_wake_up_async_thread |= !allFragmentsSent;
if (remoteReader->is_remote_and_reliable())
{
activateHeartbeatPeriod = true;
if (allFragmentsSent)
{
remoteReader->set_change_to_status(changeToSend.sequenceNumber,
UNDERWAY,
true);
}
}
else
{
if (allFragmentsSent)
{
remoteReader->set_change_to_status(changeToSend.sequenceNumber,
ACKNOWLEDGED, false);
}
}
}
}
if (must_wake_up_async_thread)
{
mp_RTPSParticipant->async_thread().wake_up(this);
}
}
else
{
logError(RTPS_WRITER, "Error sending fragment (" << changeToSend.sequenceNumber <<
", " << changeToSend.fragmentNumber << ")");
}
}
else
{
if (group.add_data(*changeToSend.cacheChange, expectsInlineQos))
{
for (ReaderProxy* remoteReader : changeToSend.remoteReaders)
{
remoteReader->set_change_to_status(changeToSend.sequenceNumber, UNDERWAY, true);
if (remoteReader->is_remote_and_reliable())
{
activateHeartbeatPeriod = true;
}
}
}
else
{
logError(RTPS_WRITER, "Error sending change " << changeToSend.sequenceNumber);
}
}
// Heartbeat piggyback.
send_heartbeat_piggyback_nts_(nullptr, group, lastBytesProcessed);
}
group.flush_and_reset();
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
locator_selector_.reset(true);
network.select_locators(locator_selector_);
compute_selected_guids();
}
bool StatefulWriter::send_hole_gaps_to_group(
RTPSMessageGroup& group)
{
bool ret_val = false;
// Add holes in history and send them to all readers in group
SequenceNumber_t max_removed = biggest_removed_sequence_number_;
SequenceNumber_t last_sequence = mp_history->next_sequence_number();
SequenceNumber_t min_history_seq = get_seq_num_min();
uint32_t history_size = static_cast<uint32_t>(mp_history->getHistorySize());
if ( (min_readers_low_mark_ < max_removed) && // some holes pending acknowledgement
(min_history_seq + history_size != last_sequence)) // There is a hole in the history
{
try
{
// Only send gaps to readers requiring it
select_all_readers_with_lowmark_below(max_removed, group);
send_heartbeat_nts_(all_remote_readers_.size(), group, true);
ret_val = true;
// Find holes in history from min_history_seq to last_sequence - 1
RTPSGapBuilder gap_builder(group);
// Algorithm starts in min_history_seq
SequenceNumber_t seq = min_history_seq;
// Loop all history
for (auto cit = mp_history->changesBegin(); cit != mp_history->changesEnd(); cit++)
{
// Add all sequence numbers until the change's sequence number
while (seq < (*cit)->sequenceNumber)
{
gap_builder.add(seq);
seq++;
}
// Skip change's sequence number
seq++;
}
// Add all sequence numbers above last change
while (seq < last_sequence)
{
gap_builder.add(seq);
seq++;
}
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
return ret_val;
}
void StatefulWriter::select_all_readers_with_lowmark_below(
SequenceNumber_t seq,
RTPSMessageGroup& group)
{
// Deselect all entries on the locator selector (we will only activate the
// readers for which this sequence number is pending)
locator_selector_.reset(false);
for (ReaderProxy* remoteReader : matched_readers_)
{
if (remoteReader->changes_low_mark() < seq)
{
locator_selector_.enable(remoteReader->guid());
}
}
if (locator_selector_.state_has_changed())
{
group.flush_and_reset();
getRTPSParticipant()->network_factory().select_locators(locator_selector_);
compute_selected_guids();
}
}
/*
* MATCHED_READER-RELATED METHODS
*/
void StatefulWriter::update_reader_info(
bool create_sender_resources)
{
update_cached_info_nts();
compute_selected_guids();
if (create_sender_resources)
{
RTPSParticipantImpl* part = getRTPSParticipant();
locator_selector_.for_each([part](const Locator_t& loc)
{
part->createSenderResources(loc);
});
}
// Check if we have local or remote readers
size_t n_readers = matched_readers_.size();
there_are_remote_readers_ = false;
there_are_local_readers_ = false;
size_t i = 0;
for (; i < n_readers && !there_are_remote_readers_; ++i)
{
bool is_local = matched_readers_.at(i)->is_local_reader();
there_are_remote_readers_ |= !is_local;
there_are_local_readers_ |= is_local;
}
for (; i < n_readers && !there_are_local_readers_; ++i)
{
bool is_local = matched_readers_.at(i)->is_local_reader();
there_are_local_readers_ |= is_local;
}
}
bool StatefulWriter::matched_reader_add(
const ReaderProxyData& rdata)
{
if (rdata.guid() == c_Guid_Unknown)
{
logError(RTPS_WRITER, "Reliable Writer need GUID_t of matched readers");
return false;
}
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
// Check if it is already matched.
for (ReaderProxy* it : matched_readers_)
{
if (it->guid() == rdata.guid())
{
logInfo(RTPS_WRITER, "Attempting to add existing reader, updating information.");
if (it->update(rdata))
{
update_reader_info(true);
}
return false;
}
}
// Get a reader proxy from the inactive pool (or create a new one if necessary and allowed)
ReaderProxy* rp = nullptr;
if (matched_readers_pool_.empty())
{
size_t max_readers = matched_readers_pool_.max_size();
if (matched_readers_.size() + matched_readers_pool_.size() < max_readers)
{
const RTPSParticipantAttributes& part_att = mp_RTPSParticipant->getRTPSParticipantAttributes();
rp = new ReaderProxy(m_times, part_att.allocation.locators, this);
}
else
{
logWarning(RTPS_WRITER, "Maximum number of reader proxies (" << max_readers << \
") reached for writer " << m_guid);
return false;
}
}
else
{
rp = matched_readers_pool_.back();
matched_readers_pool_.pop_back();
}
// Add info of new datareader.
rp->start(rdata);
locator_selector_.add_entry(rp->locator_selector_entry());
matched_readers_.push_back(rp);
update_reader_info(true);
RTPSMessageGroup group(mp_RTPSParticipant, this, rp->message_sender());
// Add initial heartbeat to message group
send_heartbeat_nts_(1u, group, disable_positive_acks_);
SequenceNumber_t current_seq = get_seq_num_min();
SequenceNumber_t last_seq = get_seq_num_max();
if (current_seq != SequenceNumber_t::unknown())
{
(void)last_seq;
assert(last_seq != SequenceNumber_t::unknown());
assert(current_seq <= last_seq);
RTPSGapBuilder gap_builder(group);
bool is_reliable = rp->is_reliable();
for (History::iterator cit = mp_history->changesBegin(); cit != mp_history->changesEnd(); ++cit)
{
// This is to cover the case when there are holes in the history
if (is_reliable)
{
while (current_seq != (*cit)->sequenceNumber)
{
if (rp->is_local_reader())
{
intraprocess_gap(rp, current_seq);
}
else
{
try
{
gap_builder.add(current_seq);
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
++current_seq;
}
}
else
{
current_seq = (*cit)->sequenceNumber;
}
ChangeForReader_t changeForReader(*cit);
bool relevance =
rp->durability_kind() >= TRANSIENT_LOCAL &&
m_att.durabilityKind >= TRANSIENT_LOCAL &&
rp->rtps_is_relevant(*cit);
changeForReader.setRelevance(relevance);
if (!relevance && is_reliable)
{
if (rp->is_local_reader())
{
intraprocess_gap(rp, current_seq);
}
else
{
try
{
gap_builder.add(current_seq);
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
}
// The ChangeForReader_t status has to be UNACKNOWLEDGED
if (!rp->is_local_reader() || !changeForReader.isRelevant())
{
changeForReader.setStatus(UNACKNOWLEDGED);
}
rp->add_change(changeForReader, false);
++current_seq;
}
// This is to cover the case where the last changes have been removed from the history
if (is_reliable)
{
while (current_seq < next_sequence_number())
{
if (rp->is_local_reader())
{
intraprocess_gap(rp, current_seq);
}
else
{
try
{
gap_builder.add(current_seq);
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
++current_seq;
}
}
try
{
if (rp->is_local_reader())
{
mp_RTPSParticipant->async_thread().wake_up(this);
}
else if (is_reliable)
{
// Send Gap
gap_builder.flush();
}
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
// Always activate heartbeat period. We need a confirmation of the reader.
// The state has to be updated.
periodic_hb_event_->restart_timer();
}
try
{
// Send all messages
group.flush_and_reset();
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
logInfo(RTPS_WRITER, "Reader Proxy " << rp->guid() << " added to " << this->m_guid.entityId << " with "
<< rdata.remote_locators().unicast.size() << "(u)-"
<< rdata.remote_locators().multicast.size() <<
"(m) locators");
return true;
}
bool StatefulWriter::matched_reader_remove(
const GUID_t& reader_guid)
{
ReaderProxy* rproxy = nullptr;
std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
ReaderProxyIterator it = matched_readers_.begin();
while (it != matched_readers_.end())
{
if ((*it)->guid() == reader_guid)
{
logInfo(RTPS_WRITER, "Reader Proxy removed: " << reader_guid);
rproxy = std::move(*it);
it = matched_readers_.erase(it);
continue;
}
++it;
}
locator_selector_.remove_entry(reader_guid);
update_reader_info(false);
if (matched_readers_.size() == 0)
{
periodic_hb_event_->cancel_timer();
}
if (rproxy != nullptr)
{
rproxy->stop();
matched_readers_pool_.push_back(rproxy);
lock.unlock();
check_acked_status();
return true;
}
logInfo(RTPS_HISTORY, "Reader Proxy doesn't exist in this writer");
return false;
}
bool StatefulWriter::matched_reader_is_matched(
const GUID_t& reader_guid)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
for (ReaderProxy* it : matched_readers_)
{
if (it->guid() == reader_guid)
{
return true;
}
}
return false;
}
bool StatefulWriter::matched_reader_lookup(
GUID_t& readerGuid,
ReaderProxy** RP)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
for (ReaderProxy* it : matched_readers_)
{
if (it->guid() == readerGuid)
{
*RP = it;
return true;
}
}
return false;
}
bool StatefulWriter::is_acked_by_all(
const CacheChange_t* change) const
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
if (change->writerGUID != this->getGuid())
{
logWarning(RTPS_WRITER, "The given change is not from this Writer");
return false;
}
assert(mp_history->next_sequence_number() > change->sequenceNumber);
return std::all_of(matched_readers_.begin(), matched_readers_.end(),
[change](const ReaderProxy* reader)
{
return reader->change_is_acked(change->sequenceNumber);
});
}
bool StatefulWriter::all_readers_updated()
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
for (auto it = matched_readers_.begin(); it != matched_readers_.end(); ++it)
{
if ((*it)->has_changes())
{
return false;
}
}
return true;
}
bool StatefulWriter::wait_for_all_acked(
const Duration_t& max_wait)
{
std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
std::unique_lock<std::mutex> all_acked_lock(all_acked_mutex_);
all_acked_ = std::none_of(matched_readers_.begin(), matched_readers_.end(),
[](const ReaderProxy* reader)
{
return reader->has_changes();
});
lock.unlock();
if (!all_acked_)
{
std::chrono::microseconds max_w(TimeConv::Duration_t2MicroSecondsInt64(max_wait));
all_acked_cond_.wait_for(all_acked_lock, max_w, [&]()
{
return all_acked_;
});
}
return all_acked_;
}
void StatefulWriter::check_acked_status()
{
std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
bool all_acked = true;
bool has_min_low_mark = false;
SequenceNumber_t min_low_mark;
for (const ReaderProxy* it : matched_readers_)
{
SequenceNumber_t reader_low_mark = it->changes_low_mark();
if (reader_low_mark < min_low_mark || !has_min_low_mark)
{
has_min_low_mark = true;
min_low_mark = reader_low_mark;
}
if (it->has_changes())
{
all_acked = false;
}
}
SequenceNumber_t min_seq = get_seq_num_min();
if (min_seq != SequenceNumber_t::unknown())
{
// In the case where we haven't received an acknack from a recently matched reader,
// min_low_mark will be zero, and no change will be notified as received by all
if (next_all_acked_notify_sequence_ <= min_low_mark)
{
if ( (mp_listener != nullptr) && (min_low_mark >= get_seq_num_min()))
{
// We will inform backwards about the changes received by all readers, starting
// on min_low_mark down until next_all_acked_notify_sequence_. This way we can
// safely proceed with the traversal, in case a change is removed from the history
// inside the callback
History::iterator history_end = mp_history->changesEnd();
History::iterator cit =
std::lower_bound(mp_history->changesBegin(), history_end, min_low_mark,
[](
const CacheChange_t* change,
const SequenceNumber_t& seq)
{
return change->sequenceNumber < seq;
});
if (cit != history_end && (*cit)->sequenceNumber == min_low_mark)
{
++cit;
}
SequenceNumber_t seq{};
SequenceNumber_t end_seq = min_seq > next_all_acked_notify_sequence_ ?
min_seq : next_all_acked_notify_sequence_;
// The iterator starts pointing to the change inmediately after min_low_mark
--cit;
do
{
// Avoid notifying changes before next_all_acked_notify_sequence_
CacheChange_t* change = *cit;
seq = change->sequenceNumber;
if (seq < next_all_acked_notify_sequence_)
{
break;
}
// Change iterator before it possibly becomes invalidated
if (cit != mp_history->changesBegin())
{
--cit;
}
// Notify reception of change (may remove that change on VOLATILE writers)
mp_listener->onWriterChangeReceivedByAll(this, change);
// Stop if we got to either next_all_acked_notify_sequence_ or the first change
} while (seq > end_seq);
}
next_all_acked_notify_sequence_ = min_low_mark + 1;
}
if (min_low_mark >= get_seq_num_min())
{
may_remove_change_ = 1;
may_remove_change_cond_.notify_one();
}
min_readers_low_mark_ = min_low_mark;
}
if (all_acked)
{
std::unique_lock<std::mutex> all_acked_lock(all_acked_mutex_);
all_acked_ = true;
all_acked_cond_.notify_all();
}
}
bool StatefulWriter::try_remove_change(
const std::chrono::steady_clock::time_point& max_blocking_time_point,
std::unique_lock<RecursiveTimedMutex>& lock)
{
logInfo(RTPS_WRITER, "Starting process try remove change for writer " << getGuid());
SequenceNumber_t min_low_mark;
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
for (ReaderProxy* it : matched_readers_)
{
SequenceNumber_t reader_low_mark = it->changes_low_mark();
if (min_low_mark == SequenceNumber_t() || reader_low_mark < min_low_mark)
{
min_low_mark = reader_low_mark;
}
}
}
SequenceNumber_t calc = min_low_mark < get_seq_num_min() ? SequenceNumber_t() :
(min_low_mark - get_seq_num_min()) + 1;
unsigned int may_remove_change = 1;
if (calc <= SequenceNumber_t())
{
may_remove_change_ = 0;
may_remove_change_cond_.wait_until(lock, max_blocking_time_point,
[&]()
{
return may_remove_change_ > 0;
});
may_remove_change = may_remove_change_;
}
// Some changes acked
if (may_remove_change == 1)
{
return mp_history->remove_min_change();
}
// Waiting a change was removed.
else if (may_remove_change == 2)
{
return true;
}
return false;
}
/*
* PARAMETER_RELATED METHODS
*/
void StatefulWriter::updateAttributes(
const WriterAttributes& att)
{
this->updateTimes(att.times);
}
void StatefulWriter::updateTimes(
const WriterTimes& times)
{
std::lock_guard<RecursiveTimedMutex> guard(mp_mutex);
if (m_times.heartbeatPeriod != times.heartbeatPeriod)
{
periodic_hb_event_->update_interval(times.heartbeatPeriod);
}
if (m_times.nackResponseDelay != times.nackResponseDelay)
{
if (nack_response_event_ != nullptr)
{
nack_response_event_->update_interval(times.nackResponseDelay);
}
}
if (m_times.nackSupressionDuration != times.nackSupressionDuration)
{
for (ReaderProxy* it : matched_readers_)
{
it->update_nack_supression_interval(times.nackSupressionDuration);
}
for (ReaderProxy* it : matched_readers_pool_)
{
it->update_nack_supression_interval(times.nackSupressionDuration);
}
}
m_times = times;
}
void StatefulWriter::add_flow_controller(
std::unique_ptr<FlowController> controller)
{
m_controllers.push_back(std::move(controller));
}
SequenceNumber_t StatefulWriter::next_sequence_number() const
{
return mp_history->next_sequence_number();
}
bool StatefulWriter::send_periodic_heartbeat(
bool final,
bool liveliness)
{
std::lock_guard<RecursiveTimedMutex> guardW(mp_mutex);
bool unacked_changes = false;
if (m_separateSendingEnabled)
{
for (ReaderProxy* it : matched_readers_)
{
if (it->has_unacknowledged() && !it->is_local_reader())
{
send_heartbeat_to_nts(*it, liveliness);
unacked_changes = true;
}
}
}
else if (!liveliness)
{
SequenceNumber_t firstSeq, lastSeq;
firstSeq = get_seq_num_min();
lastSeq = get_seq_num_max();
if (firstSeq == c_SequenceNumber_Unknown || lastSeq == c_SequenceNumber_Unknown)
{
return false;
}
else
{
assert(firstSeq <= lastSeq);
unacked_changes = std::any_of(matched_readers_.begin(), matched_readers_.end(),
[](const ReaderProxy* reader)
{
return reader->has_unacknowledged();
});
if (unacked_changes)
{
try
{
RTPSMessageGroup group(mp_RTPSParticipant, this, *this);
send_heartbeat_nts_(all_remote_readers_.size(), group, disable_positive_acks_, liveliness);
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
}
}
else
{
// This is a liveliness heartbeat, we don't care about checking sequence numbers
try
{
for (ReaderProxy* it : matched_readers_)
{
if (it->is_local_reader())
{
intraprocess_heartbeat(it, true);
}
}
RTPSMessageGroup group(mp_RTPSParticipant, this, *this);
send_heartbeat_nts_(all_remote_readers_.size(), group, final, liveliness);
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
return unacked_changes;
}
void StatefulWriter::send_heartbeat_to_nts(
ReaderProxy& remoteReaderProxy,
bool liveliness)
{
if (remoteReaderProxy.is_remote_and_reliable())
{
try
{
RTPSMessageGroup group(mp_RTPSParticipant, this, remoteReaderProxy.message_sender());
send_heartbeat_nts_(1u, group, disable_positive_acks_, liveliness);
SequenceNumber_t first_seq = get_seq_num_min();
if (first_seq != c_SequenceNumber_Unknown)
{
SequenceNumber_t low_mark = remoteReaderProxy.changes_low_mark();
if (remoteReaderProxy.durability_kind() == VOLATILE && first_seq <= low_mark)
{
group.add_gap(first_seq, SequenceNumberSet_t(low_mark + 1));
}
remoteReaderProxy.send_gaps(group, mp_history->next_sequence_number());
}
}
catch (const RTPSMessageGroup::timeout&)
{
logError(RTPS_WRITER, "Max blocking time reached");
}
}
}
void StatefulWriter::send_heartbeat_nts_(
size_t number_of_readers,
RTPSMessageGroup& message_group,
bool final,
bool liveliness)
{
SequenceNumber_t firstSeq = get_seq_num_min();
SequenceNumber_t lastSeq = get_seq_num_max();
if (firstSeq == c_SequenceNumber_Unknown || lastSeq == c_SequenceNumber_Unknown)
{
assert(firstSeq == c_SequenceNumber_Unknown && lastSeq == c_SequenceNumber_Unknown);
if (number_of_readers == 1 || liveliness)
{
firstSeq = next_sequence_number();
lastSeq = firstSeq - 1;
}
else
{
return;
}
}
else
{
assert(firstSeq <= lastSeq);
}
incrementHBCount();
message_group.add_heartbeat(firstSeq, lastSeq, m_heartbeatCount, final, liveliness);
// Update calculate of heartbeat piggyback.
currentUsageSendBufferSize_ = static_cast<int32_t>(sendBufferSize_);
logInfo(RTPS_WRITER, getGuid().entityId << " Sending Heartbeat (" << firstSeq << " - " << lastSeq << ")" );
}
void StatefulWriter::send_heartbeat_piggyback_nts_(
ReaderProxy* reader,
RTPSMessageGroup& message_group,
uint32_t& last_bytes_processed)
{
if (!disable_heartbeat_piggyback_)
{
size_t number_of_readers = reader == nullptr ? all_remote_readers_.size() : 1u;
if (mp_history->isFull())
{
if (reader == nullptr)
{
locator_selector_.reset(true);
if (locator_selector_.state_has_changed())
{
message_group.flush_and_reset();
getRTPSParticipant()->network_factory().select_locators(locator_selector_);
compute_selected_guids();
}
}
send_heartbeat_nts_(number_of_readers, message_group, disable_positive_acks_);
}
else
{
uint32_t current_bytes = message_group.get_current_bytes_processed();
currentUsageSendBufferSize_ -= current_bytes - last_bytes_processed;
last_bytes_processed = current_bytes;
if (currentUsageSendBufferSize_ < 0)
{
send_heartbeat_nts_(number_of_readers, message_group, disable_positive_acks_);
}
}
}
}
void StatefulWriter::perform_nack_response()
{
std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
bool must_wake_up_async_thread = false;
for (ReaderProxy* remote_reader : matched_readers_)
{
if (remote_reader->perform_acknack_response() || remote_reader->are_there_gaps())
{
must_wake_up_async_thread = true;
}
}
if (must_wake_up_async_thread)
{
mp_RTPSParticipant->async_thread().wake_up(this);
}
}
void StatefulWriter::perform_nack_supression(
const GUID_t& reader_guid)
{
std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
for (ReaderProxy* remote_reader : matched_readers_)
{
if (remote_reader->guid() == reader_guid)
{
remote_reader->perform_nack_supression();
periodic_hb_event_->restart_timer();
return;
}
}
}
bool StatefulWriter::process_acknack(
const GUID_t& writer_guid,
const GUID_t& reader_guid,
uint32_t ack_count,
const SequenceNumberSet_t& sn_set,
bool final_flag,
bool& result)
{
std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
result = (m_guid == writer_guid);
if (result)
{
for (ReaderProxy* remote_reader : matched_readers_)
{
if (remote_reader->guid() == reader_guid)
{
if (remote_reader->check_and_set_acknack_count(ack_count))
{
// Sequence numbers before Base are set as Acknowledged.
remote_reader->acked_changes_set(sn_set.base());
if (sn_set.base() > SequenceNumber_t(0, 0))
{
if (remote_reader->requested_changes_set(sn_set) || remote_reader->are_there_gaps())
{
nack_response_event_->restart_timer();
}
else if (!final_flag)
{
periodic_hb_event_->restart_timer();
}
}
else if (sn_set.empty() && !final_flag)
{
// This is the preemptive acknack.
if (remote_reader->process_initial_acknack())
{
if (remote_reader->is_local_reader())
{
mp_RTPSParticipant->async_thread().wake_up(this);
}
else
{
// Send heartbeat if requested
send_heartbeat_to_nts(*remote_reader);
}
}
if (remote_reader->is_local_reader())
{
intraprocess_heartbeat(remote_reader);
}
}
// Check if all CacheChange are acknowledge, because a user could be waiting
// for this, of if VOLATILE should be removed CacheChanges
check_acked_status();
}
break;
}
}
}
return result;
}
bool StatefulWriter::process_nack_frag(
const GUID_t& writer_guid,
const GUID_t& reader_guid,
uint32_t ack_count,
const SequenceNumber_t& seq_num,
const FragmentNumberSet_t fragments_state,
bool& result)
{
std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
result = false;
if (m_guid == writer_guid)
{
result = true;
for (ReaderProxy* remote_reader : matched_readers_)
{
if (remote_reader->guid() == reader_guid)
{
if (remote_reader->process_nack_frag(reader_guid, ack_count, seq_num, fragments_state))
{
nack_response_event_->restart_timer();
}
break;
}
}
}
return result;
}
bool StatefulWriter::ack_timer_expired()
{
std::unique_lock<RecursiveTimedMutex> lock(mp_mutex);
// The timer has expired so the earliest non-acked change must be marked as acknowledged
// This will be done in the first while iteration, as we start with a negative interval
auto interval = -keep_duration_us_;
// On the other hand, we've seen in the tests that if samples are sent very quickly with little
// time between consecutive samples, the timer interval could end up being negative
// In this case, we keep marking changes as acknowledged until the timer is able to keep up, hence the while
// loop
while (interval.count() < 0)
{
for (ReaderProxy* remote_reader : matched_readers_)
{
if (remote_reader->disable_positive_acks())
{
remote_reader->acked_changes_set(last_sequence_number_ + 1);
}
}
last_sequence_number_++;
// Get the next cache change from the history
CacheChange_t* change;
if (!mp_history->get_change(
last_sequence_number_,
getGuid(),
&change))
{
return false;
}
auto source_timestamp = system_clock::time_point() + nanoseconds(change->sourceTimestamp.to_ns());
auto now = system_clock::now();
interval = source_timestamp - now + keep_duration_us_;
}
assert(interval.count() >= 0);
ack_event_->update_interval_millisec((double)duration_cast<milliseconds>(interval).count());
return true;
}
} // namespace rtps
} // namespace fastrtps
} // namespace eprosima
| 1 | 19,333 | I think this should be called after the if below (the one for disable positive acks) | eProsima-Fast-DDS | cpp |
@@ -271,7 +271,7 @@ public class SparkTableUtil {
* @param metricsConfig a metrics conf
* @return a List of DataFile
*/
- public static List<DataFile> listPartition(Map<String, String> partition, String uri, String format,
+ public static List<DataFile> listPartition(Map<String, String> partition, URI uri, String format,
PartitionSpec spec, Configuration conf, MetricsConfig metricsConfig) {
if (format.contains("avro")) {
return listAvroPartition(partition, uri, spec, conf); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark;
import java.io.IOException;
import java.io.Serializable;
import java.net.URI;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.ManifestFile;
import org.apache.iceberg.ManifestFiles;
import org.apache.iceberg.ManifestWriter;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.MetricsConfig;
import org.apache.iceberg.PartitionField;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableProperties;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.hadoop.SerializableConfiguration;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.orc.OrcMetrics;
import org.apache.iceberg.parquet.ParquetUtil;
import org.apache.iceberg.relocated.com.google.common.base.MoreObjects;
import org.apache.iceberg.relocated.com.google.common.base.Objects;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.iceberg.util.Tasks;
import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.spark.TaskContext;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.api.java.function.MapPartitionsFunction;
import org.apache.spark.sql.AnalysisException;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.catalyst.TableIdentifier;
import org.apache.spark.sql.catalyst.analysis.NoSuchDatabaseException;
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException;
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute;
import org.apache.spark.sql.catalyst.catalog.CatalogTable;
import org.apache.spark.sql.catalyst.catalog.CatalogTablePartition;
import org.apache.spark.sql.catalyst.catalog.SessionCatalog;
import org.apache.spark.sql.catalyst.expressions.Expression;
import org.apache.spark.sql.catalyst.expressions.NamedExpression;
import org.apache.spark.sql.catalyst.parser.ParseException;
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan;
import scala.Function2;
import scala.Option;
import scala.Some;
import scala.Tuple2;
import scala.collection.JavaConverters;
import scala.collection.Seq;
import scala.runtime.AbstractPartialFunction;
import static org.apache.spark.sql.functions.col;
/**
* Java version of the original SparkTableUtil.scala
* https://github.com/apache/iceberg/blob/apache-iceberg-0.8.0-incubating/spark/src/main/scala/org/apache/iceberg/spark/SparkTableUtil.scala
*/
public class SparkTableUtil {
private static final PathFilter HIDDEN_PATH_FILTER =
p -> !p.getName().startsWith("_") && !p.getName().startsWith(".");
private SparkTableUtil() {
}
/**
* Returns a DataFrame with a row for each partition in the table.
*
* The DataFrame has 3 columns, partition key (a=1/b=2), partition location, and format
* (avro or parquet).
*
* @param spark a Spark session
* @param table a table name and (optional) database
* @return a DataFrame of the table's partitions
*/
public static Dataset<Row> partitionDF(SparkSession spark, String table) {
List<SparkPartition> partitions = getPartitions(spark, table);
return spark.createDataFrame(partitions, SparkPartition.class).toDF("partition", "uri", "format");
}
/**
* Returns a DataFrame with a row for each partition that matches the specified 'expression'.
*
* @param spark a Spark session.
* @param table name of the table.
* @param expression The expression whose matching partitions are returned.
* @return a DataFrame of the table partitions.
*/
public static Dataset<Row> partitionDFByFilter(SparkSession spark, String table, String expression) {
List<SparkPartition> partitions = getPartitionsByFilter(spark, table, expression);
return spark.createDataFrame(partitions, SparkPartition.class).toDF("partition", "uri", "format");
}
/**
* Returns all partitions in the table.
*
* @param spark a Spark session
* @param table a table name and (optional) database
* @return all table's partitions
*/
public static List<SparkPartition> getPartitions(SparkSession spark, String table) {
try {
TableIdentifier tableIdent = spark.sessionState().sqlParser().parseTableIdentifier(table);
return getPartitions(spark, tableIdent);
} catch (ParseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to parse table identifier: %s", table);
}
}
/**
* Returns all partitions in the table.
*
* @param spark a Spark session
* @param tableIdent a table identifier
* @return all table's partitions
*/
public static List<SparkPartition> getPartitions(SparkSession spark, TableIdentifier tableIdent) {
try {
SessionCatalog catalog = spark.sessionState().catalog();
CatalogTable catalogTable = catalog.getTableMetadata(tableIdent);
Seq<CatalogTablePartition> partitions = catalog.listPartitions(tableIdent, Option.empty());
return JavaConverters
.seqAsJavaListConverter(partitions)
.asJava()
.stream()
.map(catalogPartition -> toSparkPartition(catalogPartition, catalogTable))
.collect(Collectors.toList());
} catch (NoSuchDatabaseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Database not found in catalog.", tableIdent);
} catch (NoSuchTableException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Table not found in catalog.", tableIdent);
}
}
/**
* Returns partitions that match the specified 'predicate'.
*
* @param spark a Spark session
* @param table a table name and (optional) database
* @param predicate a predicate on partition columns
* @return matching table's partitions
*/
public static List<SparkPartition> getPartitionsByFilter(SparkSession spark, String table, String predicate) {
TableIdentifier tableIdent;
try {
tableIdent = spark.sessionState().sqlParser().parseTableIdentifier(table);
} catch (ParseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to parse the table identifier: %s", table);
}
Expression unresolvedPredicateExpr;
try {
unresolvedPredicateExpr = spark.sessionState().sqlParser().parseExpression(predicate);
} catch (ParseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to parse the predicate expression: %s", predicate);
}
Expression resolvedPredicateExpr = resolveAttrs(spark, table, unresolvedPredicateExpr);
return getPartitionsByFilter(spark, tableIdent, resolvedPredicateExpr);
}
/**
* Returns partitions that match the specified 'predicate'.
*
* @param spark a Spark session
* @param tableIdent a table identifier
* @param predicateExpr a predicate expression on partition columns
* @return matching table's partitions
*/
public static List<SparkPartition> getPartitionsByFilter(SparkSession spark, TableIdentifier tableIdent,
Expression predicateExpr) {
try {
SessionCatalog catalog = spark.sessionState().catalog();
CatalogTable catalogTable = catalog.getTableMetadata(tableIdent);
Expression resolvedPredicateExpr;
if (!predicateExpr.resolved()) {
resolvedPredicateExpr = resolveAttrs(spark, tableIdent.quotedString(), predicateExpr);
} else {
resolvedPredicateExpr = predicateExpr;
}
Seq<Expression> predicates = JavaConverters
.collectionAsScalaIterableConverter(ImmutableList.of(resolvedPredicateExpr))
.asScala().toSeq();
Seq<CatalogTablePartition> partitions = catalog.listPartitionsByFilter(tableIdent, predicates);
return JavaConverters
.seqAsJavaListConverter(partitions)
.asJava()
.stream()
.map(catalogPartition -> toSparkPartition(catalogPartition, catalogTable))
.collect(Collectors.toList());
} catch (NoSuchDatabaseException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Database not found in catalog.", tableIdent);
} catch (NoSuchTableException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unknown table: %s. Table not found in catalog.", tableIdent);
}
}
/**
* Returns the data files in a partition by listing the partition location.
*
* For Parquet and ORC partitions, this will read metrics from the file footer. For Avro partitions,
* metrics are set to null.
*
* @param partition a partition
* @param conf a serializable Hadoop conf
* @param metricsConfig a metrics conf
* @return a List of DataFile
*/
public static List<DataFile> listPartition(SparkPartition partition, PartitionSpec spec,
SerializableConfiguration conf, MetricsConfig metricsConfig) {
return listPartition(partition.values, partition.uri, partition.format, spec, conf.get(), metricsConfig);
}
/**
* Returns the data files in a partition by listing the partition location.
*
* For Parquet and ORC partitions, this will read metrics from the file footer. For Avro partitions,
* metrics are set to null.
*
* @param partition partition key, e.g., "a=1/b=2"
* @param uri partition location URI
* @param format partition format, avro or parquet
* @param conf a Hadoop conf
* @param metricsConfig a metrics conf
* @return a List of DataFile
*/
public static List<DataFile> listPartition(Map<String, String> partition, String uri, String format,
PartitionSpec spec, Configuration conf, MetricsConfig metricsConfig) {
if (format.contains("avro")) {
return listAvroPartition(partition, uri, spec, conf);
} else if (format.contains("parquet")) {
return listParquetPartition(partition, uri, spec, conf, metricsConfig);
} else if (format.contains("orc")) {
// TODO: use MetricsConfig in listOrcPartition
return listOrcPartition(partition, uri, spec, conf);
} else {
throw new UnsupportedOperationException("Unknown partition format: " + format);
}
}
private static List<DataFile> listAvroPartition(
Map<String, String> partitionPath, String partitionUri, PartitionSpec spec, Configuration conf) {
try {
Path partition = new Path(partitionUri);
FileSystem fs = partition.getFileSystem(conf);
return Arrays.stream(fs.listStatus(partition, HIDDEN_PATH_FILTER))
.filter(FileStatus::isFile)
.map(stat -> {
Metrics metrics = new Metrics(-1L, null, null, null);
String partitionKey = spec.fields().stream()
.map(PartitionField::name)
.map(name -> String.format("%s=%s", name, partitionPath.get(name)))
.collect(Collectors.joining("/"));
return DataFiles.builder(spec)
.withPath(stat.getPath().toString())
.withFormat("avro")
.withFileSizeInBytes(stat.getLen())
.withMetrics(metrics)
.withPartitionPath(partitionKey)
.build();
}).collect(Collectors.toList());
} catch (IOException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to list files in partition: %s", partitionUri);
}
}
private static List<DataFile> listParquetPartition(Map<String, String> partitionPath, String partitionUri,
PartitionSpec spec, Configuration conf,
MetricsConfig metricsSpec) {
try {
Path partition = new Path(partitionUri);
FileSystem fs = partition.getFileSystem(conf);
return Arrays.stream(fs.listStatus(partition, HIDDEN_PATH_FILTER))
.filter(FileStatus::isFile)
.map(stat -> {
Metrics metrics;
try {
metrics = ParquetUtil.footerMetrics(ParquetFileReader.readFooter(conf, stat), metricsSpec);
} catch (IOException e) {
throw SparkExceptionUtil.toUncheckedException(
e, "Unable to read the footer of the parquet file: %s", stat.getPath());
}
String partitionKey = spec.fields().stream()
.map(PartitionField::name)
.map(name -> String.format("%s=%s", name, partitionPath.get(name)))
.collect(Collectors.joining("/"));
return DataFiles.builder(spec)
.withPath(stat.getPath().toString())
.withFormat("parquet")
.withFileSizeInBytes(stat.getLen())
.withMetrics(metrics)
.withPartitionPath(partitionKey)
.build();
}).collect(Collectors.toList());
} catch (IOException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to list files in partition: %s", partitionUri);
}
}
private static List<DataFile> listOrcPartition(
Map<String, String> partitionPath, String partitionUri, PartitionSpec spec, Configuration conf) {
try {
Path partition = new Path(partitionUri);
FileSystem fs = partition.getFileSystem(conf);
return Arrays.stream(fs.listStatus(partition, HIDDEN_PATH_FILTER))
.filter(FileStatus::isFile)
.map(stat -> {
Metrics metrics = OrcMetrics.fromInputFile(HadoopInputFile.fromPath(stat.getPath(), conf));
String partitionKey = spec.fields().stream()
.map(PartitionField::name)
.map(name -> String.format("%s=%s", name, partitionPath.get(name)))
.collect(Collectors.joining("/"));
return DataFiles.builder(spec)
.withPath(stat.getPath().toString())
.withFormat("orc")
.withFileSizeInBytes(stat.getLen())
.withMetrics(metrics)
.withPartitionPath(partitionKey)
.build();
}).collect(Collectors.toList());
} catch (IOException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to list files in partition: %s", partitionUri);
}
}
private static SparkPartition toSparkPartition(CatalogTablePartition partition, CatalogTable table) {
Option<URI> locationUri = partition.storage().locationUri();
Option<String> serde = partition.storage().serde();
Preconditions.checkArgument(locationUri.nonEmpty(), "Partition URI should be defined");
Preconditions.checkArgument(serde.nonEmpty() || table.provider().nonEmpty(),
"Partition format should be defined");
String uri = String.valueOf(locationUri.get());
String format = serde.nonEmpty() ? serde.get() : table.provider().get();
Map<String, String> partitionSpec = JavaConverters.mapAsJavaMapConverter(partition.spec()).asJava();
return new SparkPartition(partitionSpec, uri, format);
}
private static Expression resolveAttrs(SparkSession spark, String table, Expression expr) {
Function2<String, String, Object> resolver = spark.sessionState().analyzer().resolver();
LogicalPlan plan = spark.table(table).queryExecution().analyzed();
return expr.transform(new AbstractPartialFunction<Expression, Expression>() {
@Override
public Expression apply(Expression attr) {
UnresolvedAttribute unresolvedAttribute = (UnresolvedAttribute) attr;
Option<NamedExpression> namedExpressionOption = plan.resolve(unresolvedAttribute.nameParts(), resolver);
if (namedExpressionOption.isDefined()) {
return (Expression) namedExpressionOption.get();
} else {
throw new IllegalArgumentException(
String.format("Could not resolve %s using columns: %s", attr, plan.output()));
}
}
@Override
public boolean isDefinedAt(Expression attr) {
return attr instanceof UnresolvedAttribute;
}
});
}
private static Iterator<ManifestFile> buildManifest(SerializableConfiguration conf, PartitionSpec spec,
String basePath, Iterator<Tuple2<String, DataFile>> fileTuples) {
if (fileTuples.hasNext()) {
FileIO io = new HadoopFileIO(conf.get());
TaskContext ctx = TaskContext.get();
String suffix = String.format("stage-%d-task-%d-manifest", ctx.stageId(), ctx.taskAttemptId());
Path location = new Path(basePath, suffix);
String outputPath = FileFormat.AVRO.addExtension(location.toString());
OutputFile outputFile = io.newOutputFile(outputPath);
ManifestWriter<DataFile> writer = ManifestFiles.write(spec, outputFile);
try (ManifestWriter<DataFile> writerRef = writer) {
fileTuples.forEachRemaining(fileTuple -> writerRef.add(fileTuple._2));
} catch (IOException e) {
throw SparkExceptionUtil.toUncheckedException(e, "Unable to close the manifest writer: %s", outputPath);
}
ManifestFile manifestFile = writer.toManifestFile();
return ImmutableList.of(manifestFile).iterator();
} else {
return Collections.emptyIterator();
}
}
/**
* Import files from an existing Spark table to an Iceberg table.
*
* The import uses the Spark session to get table metadata. It assumes no
* operation is going on the original and target table and thus is not
* thread-safe.
*
* @param spark a Spark session
* @param sourceTableIdent an identifier of the source Spark table
* @param targetTable an Iceberg table where to import the data
* @param stagingDir a staging directory to store temporary manifest files
*/
public static void importSparkTable(
SparkSession spark, TableIdentifier sourceTableIdent, Table targetTable, String stagingDir) {
SessionCatalog catalog = spark.sessionState().catalog();
String db = sourceTableIdent.database().nonEmpty() ?
sourceTableIdent.database().get() :
catalog.getCurrentDatabase();
TableIdentifier sourceTableIdentWithDB = new TableIdentifier(sourceTableIdent.table(), Some.apply(db));
if (!catalog.tableExists(sourceTableIdentWithDB)) {
throw new org.apache.iceberg.exceptions.NoSuchTableException(
String.format("Table %s does not exist", sourceTableIdentWithDB));
}
try {
PartitionSpec spec = SparkSchemaUtil.specForTable(spark, sourceTableIdentWithDB.unquotedString());
if (spec == PartitionSpec.unpartitioned()) {
importUnpartitionedSparkTable(spark, sourceTableIdentWithDB, targetTable);
} else {
List<SparkPartition> sourceTablePartitions = getPartitions(spark, sourceTableIdent);
importSparkPartitions(spark, sourceTablePartitions, targetTable, spec, stagingDir);
}
} catch (AnalysisException e) {
throw SparkExceptionUtil.toUncheckedException(
e, "Unable to get partition spec for table: %s", sourceTableIdentWithDB);
}
}
private static void importUnpartitionedSparkTable(
SparkSession spark, TableIdentifier sourceTableIdent, Table targetTable) {
try {
CatalogTable sourceTable = spark.sessionState().catalog().getTableMetadata(sourceTableIdent);
Option<String> format =
sourceTable.storage().serde().nonEmpty() ? sourceTable.storage().serde() : sourceTable.provider();
Preconditions.checkArgument(format.nonEmpty(), "Could not determine table format");
Map<String, String> partition = Collections.emptyMap();
PartitionSpec spec = PartitionSpec.unpartitioned();
Configuration conf = spark.sessionState().newHadoopConf();
MetricsConfig metricsConfig = MetricsConfig.fromProperties(targetTable.properties());
List<DataFile> files = listPartition(
partition, sourceTable.location().toString(), format.get(), spec, conf, metricsConfig);
AppendFiles append = targetTable.newAppend();
files.forEach(append::appendFile);
append.commit();
} catch (NoSuchDatabaseException e) {
throw SparkExceptionUtil.toUncheckedException(
e, "Unknown table: %s. Database not found in catalog.", sourceTableIdent);
} catch (NoSuchTableException e) {
throw SparkExceptionUtil.toUncheckedException(
e, "Unknown table: %s. Table not found in catalog.", sourceTableIdent);
}
}
/**
* Import files from given partitions to an Iceberg table.
*
* @param spark a Spark session
* @param partitions partitions to import
* @param targetTable an Iceberg table where to import the data
* @param spec a partition spec
* @param stagingDir a staging directory to store temporary manifest files
*/
public static void importSparkPartitions(
SparkSession spark, List<SparkPartition> partitions, Table targetTable, PartitionSpec spec, String stagingDir) {
Configuration conf = spark.sessionState().newHadoopConf();
SerializableConfiguration serializableConf = new SerializableConfiguration(conf);
int parallelism = Math.min(partitions.size(), spark.sessionState().conf().parallelPartitionDiscoveryParallelism());
int numShufflePartitions = spark.sessionState().conf().numShufflePartitions();
MetricsConfig metricsConfig = MetricsConfig.fromProperties(targetTable.properties());
JavaSparkContext sparkContext = JavaSparkContext.fromSparkContext(spark.sparkContext());
JavaRDD<SparkPartition> partitionRDD = sparkContext.parallelize(partitions, parallelism);
Dataset<SparkPartition> partitionDS = spark.createDataset(
partitionRDD.rdd(),
Encoders.javaSerialization(SparkPartition.class));
List<ManifestFile> manifests = partitionDS
.flatMap((FlatMapFunction<SparkPartition, DataFile>) sparkPartition ->
listPartition(sparkPartition, spec, serializableConf, metricsConfig).iterator(),
Encoders.javaSerialization(DataFile.class))
.repartition(numShufflePartitions)
.map((MapFunction<DataFile, Tuple2<String, DataFile>>) file ->
Tuple2.apply(file.path().toString(), file),
Encoders.tuple(Encoders.STRING(), Encoders.javaSerialization(DataFile.class)))
.orderBy(col("_1"))
.mapPartitions(
(MapPartitionsFunction<Tuple2<String, DataFile>, ManifestFile>) fileTuple ->
buildManifest(serializableConf, spec, stagingDir, fileTuple),
Encoders.javaSerialization(ManifestFile.class))
.collectAsList();
try {
boolean snapshotIdInheritanceEnabled = PropertyUtil.propertyAsBoolean(
targetTable.properties(),
TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED,
TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT);
AppendFiles append = targetTable.newAppend();
manifests.forEach(append::appendManifest);
append.commit();
if (!snapshotIdInheritanceEnabled) {
// delete original manifests as they were rewritten before the commit
deleteManifests(targetTable.io(), manifests);
}
} catch (Throwable e) {
deleteManifests(targetTable.io(), manifests);
throw e;
}
}
private static void deleteManifests(FileIO io, List<ManifestFile> manifests) {
Tasks.foreach(manifests)
.noRetry()
.suppressFailureWhenFinished()
.run(item -> io.deleteFile(item.path()));
}
/**
* Class representing a table partition.
*/
public static class SparkPartition implements Serializable {
private final Map<String, String> values;
private final String uri;
private final String format;
public SparkPartition(Map<String, String> values, String uri, String format) {
this.values = ImmutableMap.copyOf(values);
this.uri = uri;
this.format = format;
}
public Map<String, String> getValues() {
return values;
}
public String getUri() {
return uri;
}
public String getFormat() {
return format;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this)
.add("values", values)
.add("uri", uri)
.add("format", format)
.toString();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
SparkPartition that = (SparkPartition) o;
return Objects.equal(values, that.values) &&
Objects.equal(uri, that.uri) &&
Objects.equal(format, that.format);
}
@Override
public int hashCode() {
return Objects.hashCode(values, uri, format);
}
}
}
| 1 | 21,995 | I'd like to avoid changing this method since it is public and using a URI will probably change behavior for users passing strings (String -> URI -> Path instead of String -> Path). | apache-iceberg | java |
@@ -5,11 +5,13 @@ import (
"path/filepath"
"testing"
+ "github.com/aws/aws-sdk-go/internal/sdktesting"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
func TestSharedCredentialsProvider(t *testing.T) {
- os.Clearenv()
+
+ sdktesting.StashEnv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
creds, err := p.Retrieve() | 1 | package credentials
import (
"os"
"path/filepath"
"testing"
"github.com/aws/aws-sdk-go/internal/shareddefaults"
)
func TestSharedCredentialsProvider(t *testing.T) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
creds, err := p.Retrieve()
if err != nil {
t.Errorf("expect nil, got %v", err)
}
if e, a := "accessKey", creds.AccessKeyID; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "secret", creds.SecretAccessKey; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "token", creds.SessionToken; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestSharedCredentialsProviderIsExpired(t *testing.T) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
if !p.IsExpired() {
t.Errorf("Expect creds to be expired before retrieve")
}
_, err := p.Retrieve()
if err != nil {
t.Errorf("expect nil, got %v", err)
}
if p.IsExpired() {
t.Errorf("Expect creds to not be expired after retrieve")
}
}
func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILE(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", "example.ini")
p := SharedCredentialsProvider{}
creds, err := p.Retrieve()
if err != nil {
t.Errorf("expect nil, got %v", err)
}
if e, a := "accessKey", creds.AccessKeyID; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "secret", creds.SecretAccessKey; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "token", creds.SessionToken; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestSharedCredentialsProviderWithAWS_SHARED_CREDENTIALS_FILEAbsPath(t *testing.T) {
os.Clearenv()
wd, err := os.Getwd()
if err != nil {
t.Errorf("expect no error, got %v", err)
}
os.Setenv("AWS_SHARED_CREDENTIALS_FILE", filepath.Join(wd, "example.ini"))
p := SharedCredentialsProvider{}
creds, err := p.Retrieve()
if err != nil {
t.Errorf("expect nil, got %v", err)
}
if e, a := "accessKey", creds.AccessKeyID; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "secret", creds.SecretAccessKey; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "token", creds.SessionToken; e != a {
t.Errorf("expect %v, got %v", e, a)
}
}
func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) {
os.Clearenv()
os.Setenv("AWS_PROFILE", "no_token")
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
creds, err := p.Retrieve()
if err != nil {
t.Errorf("expect nil, got %v", err)
}
if e, a := "accessKey", creds.AccessKeyID; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "secret", creds.SecretAccessKey; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if v := creds.SessionToken; len(v) != 0 {
t.Errorf("Expect no token, %v", v)
}
}
func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"}
creds, err := p.Retrieve()
if err != nil {
t.Errorf("expect nil, got %v", err)
}
if e, a := "accessKey", creds.AccessKeyID; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "secret", creds.SecretAccessKey; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if v := creds.SessionToken; len(v) != 0 {
t.Errorf("Expect no token, %v", v)
}
}
func TestSharedCredentialsProviderColonInCredFile(t *testing.T) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: "with_colon"}
creds, err := p.Retrieve()
if err != nil {
t.Errorf("expect nil, got %v", err)
}
if e, a := "accessKey", creds.AccessKeyID; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if e, a := "secret", creds.SecretAccessKey; e != a {
t.Errorf("expect %v, got %v", e, a)
}
if v := creds.SessionToken; len(v) != 0 {
t.Errorf("Expect no token, %v", v)
}
}
func TestSharedCredentialsProvider_DefaultFilename(t *testing.T) {
os.Clearenv()
os.Setenv("USERPROFILE", "profile_dir")
os.Setenv("HOME", "home_dir")
// default filename and profile
p := SharedCredentialsProvider{}
filename, err := p.filename()
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
if e, a := shareddefaults.SharedCredentialsFilename(), filename; e != a {
t.Errorf("expect %q filename, got %q", e, a)
}
}
func BenchmarkSharedCredentialsProvider(b *testing.B) {
os.Clearenv()
p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""}
_, err := p.Retrieve()
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := p.Retrieve()
if err != nil {
b.Fatal(err)
}
}
}
| 1 | 9,757 | Should these restore the stashed env after the test runs? | aws-aws-sdk-go | go |
@@ -402,7 +402,10 @@ def is_default_argument(
if not scope:
scope = node.scope()
if isinstance(scope, (nodes.FunctionDef, nodes.Lambda)):
- for default_node in scope.args.defaults:
+ all_defaults = scope.args.defaults + [
+ d for d in scope.args.kw_defaults if d is not None
+ ]
+ for default_node in all_defaults:
for default_name_node in default_node.nodes_of_class(nodes.Name):
if default_name_node is node:
return True | 1 | # Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2009 Mads Kiilerich <[email protected]>
# Copyright (c) 2010 Daniel Harding <[email protected]>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2012 FELD Boris <[email protected]>
# Copyright (c) 2013-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Ricardo Gemignani <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Dmitry Pribysh <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015 Radu Ciorba <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016, 2018-2019 Ashley Whetter <[email protected]>
# Copyright (c) 2016-2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2016-2017 Moises Lopez <[email protected]>
# Copyright (c) 2016 Brian C. Lane <[email protected]>
# Copyright (c) 2017-2018, 2020 hippo91 <[email protected]>
# Copyright (c) 2017 ttenhoeve-aa <[email protected]>
# Copyright (c) 2018 Alan Chan <[email protected]>
# Copyright (c) 2018 Sushobhit <[email protected]>
# Copyright (c) 2018 Yury Gribov <[email protected]>
# Copyright (c) 2018 Caio Carrara <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Brian Shaginaw <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Matthijs Blom <[email protected]>
# Copyright (c) 2019 Djailla <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2019 Nathan Marrow <[email protected]>
# Copyright (c) 2019 Svet <[email protected]>
# Copyright (c) 2019 Pascal Corpet <[email protected]>
# Copyright (c) 2020 Batuhan Taskaya <[email protected]>
# Copyright (c) 2020 Luigi <[email protected]>
# Copyright (c) 2020 ethan-leba <[email protected]>
# Copyright (c) 2020 Damien Baty <[email protected]>
# Copyright (c) 2020 Andrew Simmons <[email protected]>
# Copyright (c) 2020 Ram Rachum <[email protected]>
# Copyright (c) 2020 Slavfox <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 Nick Drozd <[email protected]>
# Copyright (c) 2021 David Liu <[email protected]>
# Copyright (c) 2021 Matus Valo <[email protected]>
# Copyright (c) 2021 Lorena B <[email protected]>
# Copyright (c) 2021 yushao2 <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""some functions that may be useful for various checkers
"""
import builtins
import itertools
import numbers
import re
import string
from functools import lru_cache, partial
from typing import Callable, Dict, Iterable, List, Match, Optional, Set, Tuple, Union
import _string
import astroid
import astroid.objects
from astroid import TooManyLevelsError, nodes
from astroid.context import InferenceContext
COMP_NODE_TYPES = (
nodes.ListComp,
nodes.SetComp,
nodes.DictComp,
nodes.GeneratorExp,
)
EXCEPTIONS_MODULE = "builtins"
ABC_MODULES = {"abc", "_py_abc"}
ABC_METHODS = {
"abc.abstractproperty",
"abc.abstractmethod",
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
}
TYPING_PROTOCOLS = frozenset(
{"typing.Protocol", "typing_extensions.Protocol", ".Protocol"}
)
ITER_METHOD = "__iter__"
AITER_METHOD = "__aiter__"
NEXT_METHOD = "__next__"
GETITEM_METHOD = "__getitem__"
CLASS_GETITEM_METHOD = "__class_getitem__"
SETITEM_METHOD = "__setitem__"
DELITEM_METHOD = "__delitem__"
CONTAINS_METHOD = "__contains__"
KEYS_METHOD = "keys"
# Dictionary which maps the number of expected parameters a
# special method can have to a set of special methods.
# The following keys are used to denote the parameters restrictions:
#
# * None: variable number of parameters
# * number: exactly that number of parameters
# * tuple: this are the odd ones. Basically it means that the function
# can work with any number of arguments from that tuple,
# although it's best to implement it in order to accept
# all of them.
_SPECIAL_METHODS_PARAMS = {
None: ("__new__", "__init__", "__call__"),
0: (
"__del__",
"__repr__",
"__str__",
"__bytes__",
"__hash__",
"__bool__",
"__dir__",
"__len__",
"__length_hint__",
"__iter__",
"__reversed__",
"__neg__",
"__pos__",
"__abs__",
"__invert__",
"__complex__",
"__int__",
"__float__",
"__index__",
"__trunc__",
"__floor__",
"__ceil__",
"__enter__",
"__aenter__",
"__getnewargs_ex__",
"__getnewargs__",
"__getstate__",
"__reduce__",
"__copy__",
"__unicode__",
"__nonzero__",
"__await__",
"__aiter__",
"__anext__",
"__fspath__",
),
1: (
"__format__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__getattr__",
"__getattribute__",
"__delattr__",
"__delete__",
"__instancecheck__",
"__subclasscheck__",
"__getitem__",
"__missing__",
"__delitem__",
"__contains__",
"__add__",
"__sub__",
"__mul__",
"__truediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__divmod__",
"__lshift__",
"__rshift__",
"__and__",
"__xor__",
"__or__",
"__radd__",
"__rsub__",
"__rmul__",
"__rtruediv__",
"__rmod__",
"__rdivmod__",
"__rpow__",
"__rlshift__",
"__rrshift__",
"__rand__",
"__rxor__",
"__ror__",
"__iadd__",
"__isub__",
"__imul__",
"__itruediv__",
"__ifloordiv__",
"__imod__",
"__ilshift__",
"__irshift__",
"__iand__",
"__ixor__",
"__ior__",
"__ipow__",
"__setstate__",
"__reduce_ex__",
"__deepcopy__",
"__cmp__",
"__matmul__",
"__rmatmul__",
"__imatmul__",
"__div__",
),
2: ("__setattr__", "__get__", "__set__", "__setitem__", "__set_name__"),
3: ("__exit__", "__aexit__"),
(0, 1): ("__round__",),
(1, 2): ("__pow__",),
}
SPECIAL_METHODS_PARAMS = {
name: params
for params, methods in _SPECIAL_METHODS_PARAMS.items()
for name in methods
}
PYMETHODS = set(SPECIAL_METHODS_PARAMS)
SUBSCRIPTABLE_CLASSES_PEP585 = frozenset(
(
"builtins.tuple",
"builtins.list",
"builtins.dict",
"builtins.set",
"builtins.frozenset",
"builtins.type",
"collections.deque",
"collections.defaultdict",
"collections.OrderedDict",
"collections.Counter",
"collections.ChainMap",
"_collections_abc.Awaitable",
"_collections_abc.Coroutine",
"_collections_abc.AsyncIterable",
"_collections_abc.AsyncIterator",
"_collections_abc.AsyncGenerator",
"_collections_abc.Iterable",
"_collections_abc.Iterator",
"_collections_abc.Generator",
"_collections_abc.Reversible",
"_collections_abc.Container",
"_collections_abc.Collection",
"_collections_abc.Callable",
"_collections_abc.Set",
"_collections_abc.MutableSet",
"_collections_abc.Mapping",
"_collections_abc.MutableMapping",
"_collections_abc.Sequence",
"_collections_abc.MutableSequence",
"_collections_abc.ByteString",
"_collections_abc.MappingView",
"_collections_abc.KeysView",
"_collections_abc.ItemsView",
"_collections_abc.ValuesView",
"contextlib.AbstractContextManager",
"contextlib.AbstractAsyncContextManager",
"re.Pattern",
"re.Match",
)
)
class NoSuchArgumentError(Exception):
pass
class InferredTypeError(Exception):
pass
def is_inside_lambda(node: nodes.NodeNG) -> bool:
"""Return true if given node is inside lambda"""
for parent in node.node_ancestors():
if isinstance(parent, nodes.Lambda):
return True
return False
def get_all_elements(
node: nodes.NodeNG,
) -> Iterable[nodes.NodeNG]:
"""Recursively returns all atoms in nested lists and tuples."""
if isinstance(node, (nodes.Tuple, nodes.List)):
for child in node.elts:
yield from get_all_elements(child)
else:
yield node
def is_super(node: nodes.NodeNG) -> bool:
"""return True if the node is referencing the "super" builtin function"""
if getattr(node, "name", None) == "super" and node.root().name == "builtins":
return True
return False
def is_error(node: nodes.FunctionDef) -> bool:
"""Return true if the given function node only raises an exception"""
return len(node.body) == 1 and isinstance(node.body[0], nodes.Raise)
builtins = builtins.__dict__.copy() # type: ignore
SPECIAL_BUILTINS = ("__builtins__",) # '__path__', '__file__')
def is_builtin_object(node: nodes.NodeNG) -> bool:
"""Returns True if the given node is an object from the __builtin__ module."""
return node and node.root().name == "builtins"
def is_builtin(name: str) -> bool:
"""return true if <name> could be considered as a builtin defined by python"""
return name in builtins or name in SPECIAL_BUILTINS # type: ignore
def is_defined_in_scope(
var_node: nodes.NodeNG,
varname: str,
scope: nodes.NodeNG,
) -> bool:
if isinstance(scope, nodes.If):
for node in scope.body:
if (
isinstance(node, nodes.Assign)
and any(
isinstance(target, nodes.AssignName) and target.name == varname
for target in node.targets
)
) or (isinstance(node, nodes.Nonlocal) and varname in node.names):
return True
elif isinstance(scope, (COMP_NODE_TYPES, nodes.For)):
for ass_node in scope.nodes_of_class(nodes.AssignName):
if ass_node.name == varname:
return True
elif isinstance(scope, nodes.With):
for expr, ids in scope.items:
if expr.parent_of(var_node):
break
if ids and isinstance(ids, nodes.AssignName) and ids.name == varname:
return True
elif isinstance(scope, (nodes.Lambda, nodes.FunctionDef)):
if scope.args.is_argument(varname):
# If the name is found inside a default value
# of a function, then let the search continue
# in the parent's tree.
if scope.args.parent_of(var_node):
try:
scope.args.default_value(varname)
scope = scope.parent
is_defined_in_scope(var_node, varname, scope)
except astroid.NoDefault:
pass
return True
if getattr(scope, "name", None) == varname:
return True
elif isinstance(scope, nodes.ExceptHandler):
if isinstance(scope.name, nodes.AssignName):
ass_node = scope.name
if ass_node.name == varname:
return True
return False
def is_defined_before(var_node: nodes.Name) -> bool:
"""Check if the given variable node is defined before
Verify that the variable node is defined by a parent node
(list, set, dict, or generator comprehension, lambda)
or in a previous sibling node on the same line
(statement_defining ; statement_using).
"""
varname = var_node.name
for parent in var_node.node_ancestors():
if is_defined_in_scope(var_node, varname, parent):
return True
# possibly multiple statements on the same line using semi colon separator
stmt = var_node.statement()
_node = stmt.previous_sibling()
lineno = stmt.fromlineno
while _node and _node.fromlineno == lineno:
for assign_node in _node.nodes_of_class(nodes.AssignName):
if assign_node.name == varname:
return True
for imp_node in _node.nodes_of_class((nodes.ImportFrom, nodes.Import)):
if varname in [name[1] or name[0] for name in imp_node.names]:
return True
_node = _node.previous_sibling()
return False
def is_default_argument(
node: nodes.NodeNG, scope: Optional[nodes.NodeNG] = None
) -> bool:
"""return true if the given Name node is used in function or lambda
default argument's value
"""
if not scope:
scope = node.scope()
if isinstance(scope, (nodes.FunctionDef, nodes.Lambda)):
for default_node in scope.args.defaults:
for default_name_node in default_node.nodes_of_class(nodes.Name):
if default_name_node is node:
return True
return False
def is_func_decorator(node: nodes.NodeNG) -> bool:
"""return true if the name is used in function decorator"""
for parent in node.node_ancestors():
if isinstance(parent, nodes.Decorators):
return True
if parent.is_statement or isinstance(
parent,
(
nodes.Lambda,
nodes.ComprehensionScope,
nodes.ListComp,
),
):
break
return False
def is_ancestor_name(frame: nodes.ClassDef, node: nodes.NodeNG) -> bool:
"""return True if `frame` is an astroid.Class node with `node` in the
subtree of its bases attribute
"""
if not isinstance(frame, nodes.ClassDef):
return False
for base in frame.bases:
if node in base.nodes_of_class(nodes.Name):
return True
return False
def is_being_called(node: nodes.NodeNG) -> bool:
"""return True if node is the function being called in a Call node"""
return isinstance(node.parent, nodes.Call) and node.parent.func is node
def assign_parent(node: nodes.NodeNG) -> nodes.NodeNG:
"""return the higher parent which is not an AssignName, Tuple or List node"""
while node and isinstance(node, (nodes.AssignName, nodes.Tuple, nodes.List)):
node = node.parent
return node
def overrides_a_method(class_node: nodes.ClassDef, name: str) -> bool:
"""return True if <name> is a method overridden from an ancestor"""
for ancestor in class_node.ancestors():
if name in ancestor and isinstance(ancestor[name], nodes.FunctionDef):
return True
return False
def check_messages(*messages: str) -> Callable:
"""decorator to store messages that are handled by a checker method"""
def store_messages(func):
func.checks_msgs = messages
return func
return store_messages
class IncompleteFormatString(Exception):
"""A format string ended in the middle of a format specifier."""
class UnsupportedFormatCharacter(Exception):
"""A format character in a format string is not one of the supported
format characters."""
def __init__(self, index):
Exception.__init__(self, index)
self.index = index
def parse_format_string(
format_string: str,
) -> Tuple[Set[str], int, Dict[str, str], List[str]]:
"""Parses a format string, returning a tuple of (keys, num_args), where keys
is the set of mapping keys in the format string, and num_args is the number
of arguments required by the format string. Raises
IncompleteFormatString or UnsupportedFormatCharacter if a
parse error occurs."""
keys = set()
key_types = {}
pos_types = []
num_args = 0
def next_char(i):
i += 1
if i == len(format_string):
raise IncompleteFormatString
return (i, format_string[i])
i = 0
while i < len(format_string):
char = format_string[i]
if char == "%":
i, char = next_char(i)
# Parse the mapping key (optional).
key = None
if char == "(":
depth = 1
i, char = next_char(i)
key_start = i
while depth != 0:
if char == "(":
depth += 1
elif char == ")":
depth -= 1
i, char = next_char(i)
key_end = i - 1
key = format_string[key_start:key_end]
# Parse the conversion flags (optional).
while char in "#0- +":
i, char = next_char(i)
# Parse the minimum field width (optional).
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the precision (optional).
if char == ".":
i, char = next_char(i)
if char == "*":
num_args += 1
i, char = next_char(i)
else:
while char in string.digits:
i, char = next_char(i)
# Parse the length modifier (optional).
if char in "hlL":
i, char = next_char(i)
# Parse the conversion type (mandatory).
flags = "diouxXeEfFgGcrs%a"
if char not in flags:
raise UnsupportedFormatCharacter(i)
if key:
keys.add(key)
key_types[key] = char
elif char != "%":
num_args += 1
pos_types.append(char)
i += 1
return keys, num_args, key_types, pos_types
def split_format_field_names(format_string) -> Tuple[str, Iterable[Tuple[bool, str]]]:
try:
return _string.formatter_field_name_split(format_string)
except ValueError as e:
raise IncompleteFormatString() from e
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
"""Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
yield from collect_string_fields(nested)
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string) from exc
def parse_format_method_string(
format_string: str,
) -> Tuple[List[Tuple[str, List[Tuple[bool, str]]]], int, int]:
"""
Parses a PEP 3101 format string, returning a tuple of
(keyword_arguments, implicit_pos_args_cnt, explicit_pos_args),
where keyword_arguments is the set of mapping keys in the format string, implicit_pos_args_cnt
is the number of arguments required by the format string and
explicit_pos_args is the number of arguments passed with the position.
"""
keyword_arguments = []
implicit_pos_args_cnt = 0
explicit_pos_args = set()
for name in collect_string_fields(format_string):
if name and str(name).isdigit():
explicit_pos_args.add(str(name))
elif name:
keyname, fielditerator = split_format_field_names(name)
if isinstance(keyname, numbers.Number):
explicit_pos_args.add(str(keyname))
try:
keyword_arguments.append((keyname, list(fielditerator)))
except ValueError as e:
raise IncompleteFormatString() from e
else:
implicit_pos_args_cnt += 1
return keyword_arguments, implicit_pos_args_cnt, len(explicit_pos_args)
def is_attr_protected(attrname: str) -> bool:
"""return True if attribute name is protected (start with _ and some other
details), False otherwise.
"""
return (
attrname[0] == "_"
and attrname != "_"
and not (attrname.startswith("__") and attrname.endswith("__"))
)
def node_frame_class(node: nodes.NodeNG) -> Optional[nodes.ClassDef]:
"""Return the class that is wrapping the given node
The function returns a class for a method node (or a staticmethod or a
classmethod), otherwise it returns `None`.
"""
klass = node.frame()
nodes_to_check = (
nodes.NodeNG,
astroid.UnboundMethod,
astroid.BaseInstance,
)
while (
klass
and isinstance(klass, nodes_to_check)
and not isinstance(klass, nodes.ClassDef)
):
if klass.parent is None:
return None
klass = klass.parent.frame()
return klass
def is_attr_private(attrname: str) -> Optional[Match[str]]:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile("^_{2,}.*[^_]+_?$")
return regex.match(attrname)
def get_argument_from_call(
call_node: nodes.Call, position: int = None, keyword: str = None
) -> nodes.Name:
"""Returns the specified argument from a function call.
:param nodes.Call call_node: Node representing a function call to check.
:param int position: position of the argument.
:param str keyword: the keyword of the argument.
:returns: The node representing the argument, None if the argument is not found.
:rtype: nodes.Name
:raises ValueError: if both position and keyword are None.
:raises NoSuchArgumentError: if no argument at the provided position or with
the provided keyword.
"""
if position is None and keyword is None:
raise ValueError("Must specify at least one of: position or keyword.")
if position is not None:
try:
return call_node.args[position]
except IndexError:
pass
if keyword and call_node.keywords:
for arg in call_node.keywords:
if arg.arg == keyword:
return arg.value
raise NoSuchArgumentError
def inherit_from_std_ex(node: nodes.NodeNG) -> bool:
"""
Return true if the given class node is subclass of
exceptions.Exception.
"""
ancestors = node.ancestors() if hasattr(node, "ancestors") else []
for ancestor in itertools.chain([node], ancestors):
if (
ancestor.name in ("Exception", "BaseException")
and ancestor.root().name == EXCEPTIONS_MODULE
):
return True
return False
def error_of_type(handler: nodes.ExceptHandler, error_type) -> bool:
"""
Check if the given exception handler catches
the given error_type.
The *handler* parameter is a node, representing an ExceptHandler node.
The *error_type* can be an exception, such as AttributeError,
the name of an exception, or it can be a tuple of errors.
The function will return True if the handler catches any of the
given errors.
"""
def stringify_error(error):
if not isinstance(error, str):
return error.__name__
return error
if not isinstance(error_type, tuple):
error_type = (error_type,)
expected_errors = {stringify_error(error) for error in error_type}
if not handler.type:
return False
return handler.catch(expected_errors)
def decorated_with_property(node: nodes.FunctionDef) -> bool:
"""Detect if the given function node is decorated with a property."""
if not node.decorators:
return False
for decorator in node.decorators.nodes:
try:
if _is_property_decorator(decorator):
return True
except astroid.InferenceError:
pass
return False
def _is_property_kind(node, *kinds):
if not isinstance(node, (astroid.UnboundMethod, nodes.FunctionDef)):
return False
if node.decorators:
for decorator in node.decorators.nodes:
if isinstance(decorator, nodes.Attribute) and decorator.attrname in kinds:
return True
return False
def is_property_setter(node: nodes.FunctionDef) -> bool:
"""Check if the given node is a property setter"""
return _is_property_kind(node, "setter")
def is_property_deleter(node: nodes.FunctionDef) -> bool:
"""Check if the given node is a property deleter"""
return _is_property_kind(node, "deleter")
def is_property_setter_or_deleter(node: nodes.FunctionDef) -> bool:
"""Check if the given node is either a property setter or a deleter"""
return _is_property_kind(node, "setter", "deleter")
def _is_property_decorator(decorator: nodes.Name) -> bool:
for inferred in decorator.infer():
if isinstance(inferred, nodes.ClassDef):
if inferred.qname() in ("builtins.property", "functools.cached_property"):
return True
for ancestor in inferred.ancestors():
if ancestor.name == "property" and ancestor.root().name == "builtins":
return True
elif isinstance(inferred, nodes.FunctionDef):
# If decorator is function, check if it has exactly one return
# and the return is itself a function decorated with property
returns: List[nodes.Return] = list(
inferred._get_return_nodes_skip_functions()
)
if len(returns) == 1 and isinstance(
returns[0].value, (nodes.Name, nodes.Attribute)
):
inferred = safe_infer(returns[0].value)
if (
inferred
and isinstance(inferred, astroid.objects.Property)
and isinstance(inferred.function, nodes.FunctionDef)
):
return decorated_with_property(inferred.function)
return False
def decorated_with(
func: Union[nodes.FunctionDef, astroid.BoundMethod, astroid.UnboundMethod],
qnames: Iterable[str],
) -> bool:
"""Determine if the `func` node has a decorator with the qualified name `qname`."""
decorators = func.decorators.nodes if func.decorators else []
for decorator_node in decorators:
if isinstance(decorator_node, nodes.Call):
# We only want to infer the function name
decorator_node = decorator_node.func
try:
if any(
i.name in qnames or i.qname() in qnames
for i in decorator_node.infer()
if i is not None and i != astroid.Uninferable
):
return True
except astroid.InferenceError:
continue
return False
@lru_cache(maxsize=1024)
def unimplemented_abstract_methods(
node: nodes.ClassDef, is_abstract_cb: nodes.FunctionDef = None
) -> Dict[str, nodes.NodeNG]:
"""
Get the unimplemented abstract methods for the given *node*.
A method can be considered abstract if the callback *is_abstract_cb*
returns a ``True`` value. The check defaults to verifying that
a method is decorated with abstract methods.
The function will work only for new-style classes. For old-style
classes, it will simply return an empty dictionary.
For the rest of them, it will return a dictionary of abstract method
names and their inferred objects.
"""
if is_abstract_cb is None:
is_abstract_cb = partial(decorated_with, qnames=ABC_METHODS)
visited: Dict[str, nodes.NodeNG] = {}
try:
mro = reversed(node.mro())
except NotImplementedError:
# Old style class, it will not have a mro.
return {}
except astroid.ResolveError:
# Probably inconsistent hierarchy, don'try
# to figure this out here.
return {}
for ancestor in mro:
for obj in ancestor.values():
inferred = obj
if isinstance(obj, nodes.AssignName):
inferred = safe_infer(obj)
if not inferred:
# Might be an abstract function,
# but since we don't have enough information
# in order to take this decision, we're taking
# the *safe* decision instead.
if obj.name in visited:
del visited[obj.name]
continue
if not isinstance(inferred, nodes.FunctionDef):
if obj.name in visited:
del visited[obj.name]
if isinstance(inferred, nodes.FunctionDef):
# It's critical to use the original name,
# since after inferring, an object can be something
# else than expected, as in the case of the
# following assignment.
#
# class A:
# def keys(self): pass
# __iter__ = keys
abstract = is_abstract_cb(inferred)
if abstract:
visited[obj.name] = inferred
elif not abstract and obj.name in visited:
del visited[obj.name]
return visited
def find_try_except_wrapper_node(
node: nodes.NodeNG,
) -> Optional[Union[nodes.ExceptHandler, nodes.TryExcept]]:
"""Return the ExceptHandler or the TryExcept node in which the node is."""
current = node
ignores = (nodes.ExceptHandler, nodes.TryExcept)
while current and not isinstance(current.parent, ignores):
current = current.parent
if current and isinstance(current.parent, ignores):
return current.parent
return None
def find_except_wrapper_node_in_scope(
node: nodes.NodeNG,
) -> Optional[Union[nodes.ExceptHandler, nodes.TryExcept]]:
"""Return the ExceptHandler in which the node is, without going out of scope."""
for current in node.node_ancestors():
if isinstance(current, astroid.scoped_nodes.LocalsDictNodeNG):
# If we're inside a function/class definition, we don't want to keep checking
# higher ancestors for `except` clauses, because if these exist, it means our
# function/class was defined in an `except` clause, rather than the current code
# actually running in an `except` clause.
return None
if isinstance(current, nodes.ExceptHandler):
return current
return None
def is_from_fallback_block(node: nodes.NodeNG) -> bool:
"""Check if the given node is from a fallback import block."""
context = find_try_except_wrapper_node(node)
if not context:
return False
if isinstance(context, nodes.ExceptHandler):
other_body = context.parent.body
handlers = context.parent.handlers
else:
other_body = itertools.chain.from_iterable(
handler.body for handler in context.handlers
)
handlers = context.handlers
has_fallback_imports = any(
isinstance(import_node, (nodes.ImportFrom, nodes.Import))
for import_node in other_body
)
ignores_import_error = _except_handlers_ignores_exception(handlers, ImportError)
return ignores_import_error or has_fallback_imports
def _except_handlers_ignores_exception(
handlers: nodes.ExceptHandler, exception
) -> bool:
func = partial(error_of_type, error_type=(exception,))
return any(func(handler) for handler in handlers)
def get_exception_handlers(
node: nodes.NodeNG, exception=Exception
) -> Optional[List[nodes.ExceptHandler]]:
"""Return the collections of handlers handling the exception in arguments.
Args:
node (nodes.NodeNG): A node that is potentially wrapped in a try except.
exception (builtin.Exception or str): exception or name of the exception.
Returns:
list: the collection of handlers that are handling the exception or None.
"""
context = find_try_except_wrapper_node(node)
if isinstance(context, nodes.TryExcept):
return [
handler for handler in context.handlers if error_of_type(handler, exception)
]
return []
def is_node_inside_try_except(node: nodes.Raise) -> bool:
"""Check if the node is directly under a Try/Except statement.
(but not under an ExceptHandler!)
Args:
node (nodes.Raise): the node raising the exception.
Returns:
bool: True if the node is inside a try/except statement, False otherwise.
"""
context = find_try_except_wrapper_node(node)
return isinstance(context, nodes.TryExcept)
def node_ignores_exception(node: nodes.NodeNG, exception=Exception) -> bool:
"""Check if the node is in a TryExcept which handles the given exception.
If the exception is not given, the function is going to look for bare
excepts.
"""
managing_handlers = get_exception_handlers(node, exception)
if not managing_handlers:
return False
return any(managing_handlers)
def class_is_abstract(node: nodes.ClassDef) -> bool:
"""return true if the given class node should be considered as an abstract
class
"""
# Only check for explicit metaclass=ABCMeta on this specific class
meta = node.declared_metaclass()
if meta is not None:
if meta.name == "ABCMeta" and meta.root().name in ABC_MODULES:
return True
for ancestor in node.ancestors():
if ancestor.name == "ABC" and ancestor.root().name in ABC_MODULES:
# abc.ABC inheritance
return True
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
def _supports_protocol_method(value: nodes.NodeNG, attr: str) -> bool:
try:
attributes = value.getattr(attr)
except astroid.NotFoundError:
return False
first = attributes[0]
if isinstance(first, nodes.AssignName):
if isinstance(first.parent.value, nodes.Const):
return False
return True
def is_comprehension(node: nodes.NodeNG) -> bool:
comprehensions = (
nodes.ListComp,
nodes.SetComp,
nodes.DictComp,
nodes.GeneratorExp,
)
return isinstance(node, comprehensions)
def _supports_mapping_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(
value, GETITEM_METHOD
) and _supports_protocol_method(value, KEYS_METHOD)
def _supports_membership_test_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, CONTAINS_METHOD)
def _supports_iteration_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, ITER_METHOD) or _supports_protocol_method(
value, GETITEM_METHOD
)
def _supports_async_iteration_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, AITER_METHOD)
def _supports_getitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, GETITEM_METHOD)
def _supports_setitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, SETITEM_METHOD)
def _supports_delitem_protocol(value: nodes.NodeNG) -> bool:
return _supports_protocol_method(value, DELITEM_METHOD)
def _is_abstract_class_name(name: str) -> bool:
lname = name.lower()
is_mixin = lname.endswith("mixin")
is_abstract = lname.startswith("abstract")
is_base = lname.startswith("base") or lname.endswith("base")
return is_mixin or is_abstract or is_base
def is_inside_abstract_class(node: nodes.NodeNG) -> bool:
while node is not None:
if isinstance(node, nodes.ClassDef):
if class_is_abstract(node):
return True
name = getattr(node, "name", None)
if name is not None and _is_abstract_class_name(name):
return True
node = node.parent
return False
def _supports_protocol(
value: nodes.NodeNG, protocol_callback: nodes.FunctionDef
) -> bool:
if isinstance(value, nodes.ClassDef):
if not has_known_bases(value):
return True
# classobj can only be iterable if it has an iterable metaclass
meta = value.metaclass()
if meta is not None:
if protocol_callback(meta):
return True
if isinstance(value, astroid.BaseInstance):
if not has_known_bases(value):
return True
if value.has_dynamic_getattr():
return True
if protocol_callback(value):
return True
if (
isinstance(value, astroid.bases.Proxy)
and isinstance(value._proxied, astroid.BaseInstance)
and has_known_bases(value._proxied)
):
value = value._proxied
return protocol_callback(value)
return False
def is_iterable(value: nodes.NodeNG, check_async: bool = False) -> bool:
if check_async:
protocol_check = _supports_async_iteration_protocol
else:
protocol_check = _supports_iteration_protocol
return _supports_protocol(value, protocol_check)
def is_mapping(value: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_mapping_protocol)
def supports_membership_test(value: nodes.NodeNG) -> bool:
supported = _supports_protocol(value, _supports_membership_test_protocol)
return supported or is_iterable(value)
def supports_getitem(value: nodes.NodeNG, node: nodes.NodeNG) -> bool:
if isinstance(value, nodes.ClassDef):
if _supports_protocol_method(value, CLASS_GETITEM_METHOD):
return True
if is_class_subscriptable_pep585_with_postponed_evaluation_enabled(value, node):
return True
return _supports_protocol(value, _supports_getitem_protocol)
def supports_setitem(value: nodes.NodeNG, _: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_setitem_protocol)
def supports_delitem(value: nodes.NodeNG, _: nodes.NodeNG) -> bool:
return _supports_protocol(value, _supports_delitem_protocol)
def _get_python_type_of_node(node):
pytype = getattr(node, "pytype", None)
if callable(pytype):
return pytype()
return None
@lru_cache(maxsize=1024)
def safe_infer(node: nodes.NodeNG, context=None) -> Optional[nodes.NodeNG]:
"""Return the inferred value for the given node.
Return None if inference failed or if there is some ambiguity (more than
one node has been inferred of different types).
"""
inferred_types = set()
try:
infer_gen = node.infer(context=context)
value = next(infer_gen)
except astroid.InferenceError:
return None
if value is not astroid.Uninferable:
inferred_types.add(_get_python_type_of_node(value))
try:
for inferred in infer_gen:
inferred_type = _get_python_type_of_node(inferred)
if inferred_type not in inferred_types:
return None # If there is ambiguity on the inferred node.
except astroid.InferenceError:
return None # There is some kind of ambiguity
except StopIteration:
return value
return value if len(inferred_types) <= 1 else None
@lru_cache(maxsize=512)
def infer_all(
node: nodes.NodeNG, context: InferenceContext = None
) -> List[nodes.NodeNG]:
try:
return list(node.infer(context=context))
except astroid.InferenceError:
return []
def has_known_bases(klass: nodes.ClassDef, context=None) -> bool:
"""Return true if all base classes of a class could be inferred."""
try:
return klass._all_bases_known
except AttributeError:
pass
for base in klass.bases:
result = safe_infer(base, context=context)
if (
not isinstance(result, nodes.ClassDef)
or result is klass
or not has_known_bases(result, context=context)
):
klass._all_bases_known = False
return False
klass._all_bases_known = True
return True
def is_none(node: nodes.NodeNG) -> bool:
return (
node is None
or (isinstance(node, nodes.Const) and node.value is None)
or (isinstance(node, nodes.Name) and node.name == "None")
)
def node_type(node: nodes.NodeNG) -> Optional[nodes.NodeNG]:
"""Return the inferred type for `node`
If there is more than one possible type, or if inferred type is Uninferable or None,
return None
"""
# check there is only one possible type for the assign node. Else we
# don't handle it for now
types: Set[nodes.NodeNG] = set()
try:
for var_type in node.infer():
if var_type == astroid.Uninferable or is_none(var_type):
continue
types.add(var_type)
if len(types) > 1:
return None
except astroid.InferenceError:
return None
return types.pop() if types else None
def is_registered_in_singledispatch_function(node: nodes.FunctionDef) -> bool:
"""Check if the given function node is a singledispatch function."""
singledispatch_qnames = (
"functools.singledispatch",
"singledispatch.singledispatch",
)
if not isinstance(node, nodes.FunctionDef):
return False
decorators = node.decorators.nodes if node.decorators else []
for decorator in decorators:
# func.register are function calls
if not isinstance(decorator, nodes.Call):
continue
func = decorator.func
if not isinstance(func, nodes.Attribute) or func.attrname != "register":
continue
try:
func_def = next(func.expr.infer())
except astroid.InferenceError:
continue
if isinstance(func_def, nodes.FunctionDef):
# pylint: disable=redundant-keyword-arg; some flow inference goes wrong here
return decorated_with(func_def, singledispatch_qnames)
return False
def get_node_last_lineno(node: nodes.NodeNG) -> int:
"""
Get the last lineno of the given node. For a simple statement this will just be node.lineno,
but for a node that has child statements (e.g. a method) this will be the lineno of the last
child statement recursively.
"""
# 'finalbody' is always the last clause in a try statement, if present
if getattr(node, "finalbody", False):
return get_node_last_lineno(node.finalbody[-1])
# For if, while, and for statements 'orelse' is always the last clause.
# For try statements 'orelse' is the last in the absence of a 'finalbody'
if getattr(node, "orelse", False):
return get_node_last_lineno(node.orelse[-1])
# try statements have the 'handlers' last if there is no 'orelse' or 'finalbody'
if getattr(node, "handlers", False):
return get_node_last_lineno(node.handlers[-1])
# All compound statements have a 'body'
if getattr(node, "body", False):
return get_node_last_lineno(node.body[-1])
# Not a compound statement
return node.lineno
def is_postponed_evaluation_enabled(node: nodes.NodeNG) -> bool:
"""Check if the postponed evaluation of annotations is enabled"""
module = node.root()
return "annotations" in module.future_imports
def is_class_subscriptable_pep585_with_postponed_evaluation_enabled(
value: nodes.ClassDef, node: nodes.NodeNG
) -> bool:
"""Check if class is subscriptable with PEP 585 and
postponed evaluation enabled.
"""
return (
is_postponed_evaluation_enabled(node)
and value.qname() in SUBSCRIPTABLE_CLASSES_PEP585
and is_node_in_type_annotation_context(node)
)
def is_node_in_type_annotation_context(node: nodes.NodeNG) -> bool:
"""Check if node is in type annotation context.
Check for 'AnnAssign', function 'Arguments',
or part of function return type anntation.
"""
# pylint: disable=too-many-boolean-expressions
current_node, parent_node = node, node.parent
while True:
if (
isinstance(parent_node, nodes.AnnAssign)
and parent_node.annotation == current_node
or isinstance(parent_node, nodes.Arguments)
and current_node
in (
*parent_node.annotations,
*parent_node.posonlyargs_annotations,
*parent_node.kwonlyargs_annotations,
parent_node.varargannotation,
parent_node.kwargannotation,
)
or isinstance(parent_node, nodes.FunctionDef)
and parent_node.returns == current_node
):
return True
current_node, parent_node = parent_node, parent_node.parent
if isinstance(parent_node, nodes.Module):
return False
def is_subclass_of(child: nodes.ClassDef, parent: nodes.ClassDef) -> bool:
"""
Check if first node is a subclass of second node.
:param child: Node to check for subclass.
:param parent: Node to check for superclass.
:returns: True if child is derived from parent. False otherwise.
"""
if not all(isinstance(node, nodes.ClassDef) for node in (child, parent)):
return False
for ancestor in child.ancestors():
try:
if astroid.helpers.is_subtype(ancestor, parent):
return True
except astroid.exceptions._NonDeducibleTypeHierarchy:
continue
return False
@lru_cache(maxsize=1024)
def is_overload_stub(node: nodes.NodeNG) -> bool:
"""Check if a node if is a function stub decorated with typing.overload.
:param node: Node to check.
:returns: True if node is an overload function stub. False otherwise.
"""
decorators = getattr(node, "decorators", None)
return bool(decorators and decorated_with(node, ["typing.overload", "overload"]))
def is_protocol_class(cls: nodes.NodeNG) -> bool:
"""Check if the given node represents a protocol class
:param cls: The node to check
:returns: True if the node is a typing protocol class, false otherwise.
"""
if not isinstance(cls, nodes.ClassDef):
return False
# Use .ancestors() since not all protocol classes can have
# their mro deduced.
return any(parent.qname() in TYPING_PROTOCOLS for parent in cls.ancestors())
def is_call_of_name(node: nodes.NodeNG, name: str) -> bool:
"""Checks if node is a function call with the given name"""
return (
isinstance(node, nodes.Call)
and isinstance(node.func, nodes.Name)
and node.func.name == name
)
def is_test_condition(
node: nodes.NodeNG,
parent: Optional[nodes.NodeNG] = None,
) -> bool:
"""Returns true if the given node is being tested for truthiness"""
parent = parent or node.parent
if isinstance(parent, (nodes.While, nodes.If, nodes.IfExp, nodes.Assert)):
return node is parent.test or parent.test.parent_of(node)
if isinstance(parent, nodes.Comprehension):
return node in parent.ifs
return is_call_of_name(parent, "bool") and parent.parent_of(node)
def is_classdef_type(node: nodes.ClassDef) -> bool:
"""Test if ClassDef node is Type."""
if node.name == "type":
return True
for base in node.bases:
if isinstance(base, nodes.Name) and base.name == "type":
return True
return False
def is_attribute_typed_annotation(
node: Union[nodes.ClassDef, astroid.Instance], attr_name: str
) -> bool:
"""Test if attribute is typed annotation in current node
or any base nodes.
"""
attribute = node.locals.get(attr_name, [None])[0]
if (
attribute
and isinstance(attribute, nodes.AssignName)
and isinstance(attribute.parent, nodes.AnnAssign)
):
return True
for base in node.bases:
inferred = safe_infer(base)
if (
inferred
and isinstance(inferred, nodes.ClassDef)
and is_attribute_typed_annotation(inferred, attr_name)
):
return True
return False
def is_assign_name_annotated_with(node: nodes.AssignName, typing_name: str) -> bool:
"""Test if AssignName node has `typing_name` annotation.
Especially useful to check for `typing._SpecialForm` instances
like: `Union`, `Optional`, `Literal`, `ClassVar`, `Final`.
"""
if not isinstance(node.parent, nodes.AnnAssign):
return False
annotation = node.parent.annotation
if isinstance(annotation, nodes.Subscript):
annotation = annotation.value
if (
isinstance(annotation, nodes.Name)
and annotation.name == typing_name
or isinstance(annotation, nodes.Attribute)
and annotation.attrname == typing_name
):
return True
return False
def get_iterating_dictionary_name(
node: Union[nodes.For, nodes.Comprehension]
) -> Optional[str]:
"""Get the name of the dictionary which keys are being iterated over on
a ``nodes.For`` or ``nodes.Comprehension`` node.
If the iterating object is not either the keys method of a dictionary
or a dictionary itself, this returns None.
"""
# Is it a proper keys call?
if (
isinstance(node.iter, nodes.Call)
and isinstance(node.iter.func, nodes.Attribute)
and node.iter.func.attrname == "keys"
):
inferred = safe_infer(node.iter.func)
if not isinstance(inferred, astroid.BoundMethod):
return None
return node.iter.as_string().rpartition(".keys")[0]
# Is it a dictionary?
if isinstance(node.iter, (nodes.Name, nodes.Attribute)):
inferred = safe_infer(node.iter)
if not isinstance(inferred, nodes.Dict):
return None
return node.iter.as_string()
return None
def get_subscript_const_value(node: nodes.Subscript) -> nodes.Const:
"""
Returns the value 'subscript.slice' of a Subscript node.
:param node: Subscript Node to extract value from
:returns: Const Node containing subscript value
:raises InferredTypeError: if the subscript node cannot be inferred as a Const
"""
inferred = safe_infer(node.slice)
if not isinstance(inferred, nodes.Const):
raise InferredTypeError("Subscript.slice cannot be inferred as a nodes.Const")
return inferred
def get_import_name(
importnode: Union[nodes.Import, nodes.ImportFrom], modname: str
) -> str:
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
:param importnode: node representing import statement.
:param modname: module name from import statement.
:returns: absolute qualified module name of the module
used in import.
"""
if isinstance(importnode, nodes.ImportFrom) and importnode.level:
root = importnode.root()
if isinstance(root, nodes.Module):
try:
return root.relative_to_absolute_name(modname, level=importnode.level)
except TooManyLevelsError:
return modname
return modname
def is_node_in_guarded_import_block(node: nodes.NodeNG) -> bool:
"""Return True if node is part for guarded if block.
I.e. `sys.version_info` or `typing.TYPE_CHECKING`
"""
return isinstance(node.parent, nodes.If) and (
node.parent.is_sys_guard() or node.parent.is_typing_guard()
)
def is_reassigned_after_current(node: nodes.NodeNG, varname: str) -> bool:
"""Check if the given variable name is reassigned in the same scope after the current node"""
return any(
a.name == varname and a.lineno > node.lineno
for a in node.scope().nodes_of_class((nodes.AssignName, nodes.FunctionDef))
)
| 1 | 16,050 | We're calculating the full list of kwargs here (even if the first element of the list would return True) so we could improve performance by using a generator line 408 in ``for default_node in all_defaults:``. | PyCQA-pylint | py |
@@ -119,6 +119,14 @@ type accountDelta struct {
new basics.AccountData
}
+// accountDeltaCount is an extention to accountDelta that is being used by the commitRound function for counting the
+// number of changes we've made per account. The ndeltas is used execlusively for consistency checking - making sure that
+// all the pending changes were written and that there are no outstanding writes missing.
+type accountDeltaCount struct {
+ accountDelta
+ ndeltas int
+}
+
// catchpointState is used to store catchpoint related variables into the catchpointstate table.
type catchpointState string
| 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package ledger
import (
"bytes"
"context"
"database/sql"
"fmt"
"time"
"github.com/mattn/go-sqlite3"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
// accountsDbQueries is used to cache a prepared SQL statement to look up
// the state of a single account.
type accountsDbQueries struct {
listCreatablesStmt *sql.Stmt
lookupStmt *sql.Stmt
lookupCreatorStmt *sql.Stmt
deleteStoredCatchpoint *sql.Stmt
insertStoredCatchpoint *sql.Stmt
selectOldestsCatchpointFiles *sql.Stmt
selectCatchpointStateUint64 *sql.Stmt
deleteCatchpointState *sql.Stmt
insertCatchpointStateUint64 *sql.Stmt
selectCatchpointStateString *sql.Stmt
insertCatchpointStateString *sql.Stmt
}
var accountsSchema = []string{
`CREATE TABLE IF NOT EXISTS acctrounds (
id string primary key,
rnd integer)`,
`CREATE TABLE IF NOT EXISTS accounttotals (
id string primary key,
online integer,
onlinerewardunits integer,
offline integer,
offlinerewardunits integer,
notparticipating integer,
notparticipatingrewardunits integer,
rewardslevel integer)`,
`CREATE TABLE IF NOT EXISTS accountbase (
address blob primary key,
data blob)`,
`CREATE TABLE IF NOT EXISTS assetcreators (
asset integer primary key,
creator blob)`,
`CREATE TABLE IF NOT EXISTS storedcatchpoints (
round integer primary key,
filename text NOT NULL,
catchpoint text NOT NULL,
filesize size NOT NULL,
pinned integer NOT NULL)`,
`CREATE TABLE IF NOT EXISTS accounthashes (
id integer primary key,
data blob)`,
`CREATE TABLE IF NOT EXISTS catchpointstate (
id string primary key,
intval integer,
strval text)`,
}
// TODO: Post applications, rename assetcreators -> creatables and rename
// 'asset' column -> 'creatable'
var creatablesMigration = []string{
`ALTER TABLE assetcreators ADD COLUMN ctype INTEGER DEFAULT 0`,
}
func createNormalizedOnlineBalanceIndex(idxname string, tablename string) string {
return fmt.Sprintf(`CREATE INDEX IF NOT EXISTS %s
ON %s ( normalizedonlinebalance, address, data )
WHERE normalizedonlinebalance>0`, idxname, tablename)
}
var createOnlineAccountIndex = []string{
`ALTER TABLE accountbase
ADD COLUMN normalizedonlinebalance INTEGER`,
createNormalizedOnlineBalanceIndex("onlineaccountbals", "accountbase"),
}
var accountsResetExprs = []string{
`DROP TABLE IF EXISTS acctrounds`,
`DROP TABLE IF EXISTS accounttotals`,
`DROP TABLE IF EXISTS accountbase`,
`DROP TABLE IF EXISTS assetcreators`,
`DROP TABLE IF EXISTS storedcatchpoints`,
`DROP TABLE IF EXISTS catchpointstate`,
`DROP TABLE IF EXISTS accounthashes`,
}
// accountDBVersion is the database version that this binary would know how to support and how to upgrade to.
// details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX
// and their descriptions.
var accountDBVersion = int32(4)
type accountDelta struct {
old basics.AccountData
new basics.AccountData
}
// catchpointState is used to store catchpoint related variables into the catchpointstate table.
type catchpointState string
const (
// catchpointStateLastCatchpoint is written by a node once a catchpoint label is created for a round
catchpointStateLastCatchpoint = catchpointState("lastCatchpoint")
// catchpointStateWritingCatchpoint is written by a node while a catchpoint file is being created. It gets deleted once the file
// creation is complete, and used as a way to record the fact that we've started generating the catchpoint file for that particular
// round.
catchpointStateWritingCatchpoint = catchpointState("writingCatchpoint")
// catchpointCatchupState is the state of the catchup process. The variable is stored only during the catchpoint catchup process, and removed afterward.
catchpointStateCatchupState = catchpointState("catchpointCatchupState")
// catchpointStateCatchupLabel is the label to which the currently catchpoint catchup process is trying to catchup to.
catchpointStateCatchupLabel = catchpointState("catchpointCatchupLabel")
// catchpointCatchupBlockRound is the block round that is associated with the current running catchpoint catchup.
catchpointStateCatchupBlockRound = catchpointState("catchpointCatchupBlockRound")
// catchpointStateCatchupBalancesRound is the balance round that is associated with the current running catchpoint catchup. Typically it would be
// equal to catchpointStateCatchupBlockRound - 320.
catchpointStateCatchupBalancesRound = catchpointState("catchpointCatchupBalancesRound")
)
// normalizedAccountBalance is a staging area for a catchpoint file account information before it's being added to the catchpoint staging tables.
type normalizedAccountBalance struct {
address basics.Address
accountData basics.AccountData
encodedAccountData []byte
accountHash []byte
normalizedBalance uint64
}
// prepareNormalizedBalances converts an array of encodedBalanceRecord into an equal size array of normalizedAccountBalances.
func prepareNormalizedBalances(bals []encodedBalanceRecord, proto config.ConsensusParams) (normalizedAccountBalances []normalizedAccountBalance, err error) {
normalizedAccountBalances = make([]normalizedAccountBalance, len(bals), len(bals))
for i, balance := range bals {
normalizedAccountBalances[i].address = balance.Address
err = protocol.Decode(balance.AccountData, &(normalizedAccountBalances[i].accountData))
if err != nil {
return nil, err
}
normalizedAccountBalances[i].normalizedBalance = normalizedAccountBalances[i].accountData.NormalizedOnlineBalance(proto)
normalizedAccountBalances[i].encodedAccountData = balance.AccountData
normalizedAccountBalances[i].accountHash = accountHashBuilder(balance.Address, normalizedAccountBalances[i].accountData, balance.AccountData)
}
return
}
// writeCatchpointStagingBalances inserts all the account balances in the provided array into the catchpoint balance staging table catchpointbalances.
func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []normalizedAccountBalance) error {
insertStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointbalances(address, normalizedonlinebalance, data) VALUES(?, ?, ?)")
if err != nil {
return err
}
for _, balance := range bals {
result, err := insertStmt.ExecContext(ctx, balance.address[:], balance.normalizedBalance, balance.encodedAccountData)
if err != nil {
return err
}
aff, err := result.RowsAffected()
if err != nil {
return err
}
if aff != 1 {
return fmt.Errorf("number of affected record in insert was expected to be one, but was %d", aff)
}
}
return nil
}
// writeCatchpointStagingBalances inserts all the account hashes in the provided array into the catchpoint pending hashes table catchpointpendinghashes.
func writeCatchpointStagingHashes(ctx context.Context, tx *sql.Tx, bals []normalizedAccountBalance) error {
insertStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointpendinghashes(data) VALUES(?)")
if err != nil {
return err
}
for _, balance := range bals {
result, err := insertStmt.ExecContext(ctx, balance.accountHash[:])
if err != nil {
return err
}
aff, err := result.RowsAffected()
if err != nil {
return err
}
if aff != 1 {
return fmt.Errorf("number of affected record in insert was expected to be one, but was %d", aff)
}
}
return nil
}
// createCatchpointStagingHashesIndex creates an index on catchpointpendinghashes to allow faster scanning according to the hash order
func createCatchpointStagingHashesIndex(ctx context.Context, tx *sql.Tx) (err error) {
_, err = tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS catchpointpendinghashesidx ON catchpointpendinghashes(data)")
if err != nil {
return
}
return
}
// writeCatchpointStagingCreatable inserts all the creatables in the provided array into the catchpoint asset creator staging table catchpointassetcreators.
func writeCatchpointStagingCreatable(ctx context.Context, tx *sql.Tx, bals []normalizedAccountBalance) error {
insertStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointassetcreators(asset, creator, ctype) VALUES(?, ?, ?)")
if err != nil {
return err
}
for _, balance := range bals {
// if the account has any asset params, it means that it's the creator of an asset.
if len(balance.accountData.AssetParams) > 0 {
for aidx := range balance.accountData.AssetParams {
_, err := insertStmt.ExecContext(ctx, basics.CreatableIndex(aidx), balance.address[:], basics.AssetCreatable)
if err != nil {
return err
}
}
}
if len(balance.accountData.AppParams) > 0 {
for aidx := range balance.accountData.AppParams {
_, err := insertStmt.ExecContext(ctx, basics.CreatableIndex(aidx), balance.address[:], basics.AppCreatable)
if err != nil {
return err
}
}
}
}
return nil
}
func resetCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, newCatchup bool) (err error) {
s := []string{
"DROP TABLE IF EXISTS catchpointbalances",
"DROP TABLE IF EXISTS catchpointassetcreators",
"DROP TABLE IF EXISTS catchpointaccounthashes",
"DROP TABLE IF EXISTS catchpointpendinghashes",
"DELETE FROM accounttotals where id='catchpointStaging'",
}
if newCatchup {
// SQLite has no way to rename an existing index. So, we need
// to cook up a fresh name for the index, which will be kept
// around after we rename the table from "catchpointbalances"
// to "accountbase". To construct a unique index name, we
// use the current time.
idxname := fmt.Sprintf("onlineaccountbals%d", time.Now().UnixNano())
s = append(s,
"CREATE TABLE IF NOT EXISTS catchpointassetcreators (asset integer primary key, creator blob, ctype integer)",
"CREATE TABLE IF NOT EXISTS catchpointbalances (address blob primary key, data blob, normalizedonlinebalance integer)",
"CREATE TABLE IF NOT EXISTS catchpointpendinghashes (data blob)",
"CREATE TABLE IF NOT EXISTS catchpointaccounthashes (id integer primary key, data blob)",
createNormalizedOnlineBalanceIndex(idxname, "catchpointbalances"),
)
}
for _, stmt := range s {
_, err = tx.Exec(stmt)
if err != nil {
return err
}
}
return nil
}
// applyCatchpointStagingBalances switches the staged catchpoint catchup tables onto the actual
// tables and update the correct balance round. This is the final step in switching onto the new catchpoint round.
func applyCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, balancesRound basics.Round) (err error) {
stmts := []string{
"ALTER TABLE accountbase RENAME TO accountbase_old",
"ALTER TABLE assetcreators RENAME TO assetcreators_old",
"ALTER TABLE accounthashes RENAME TO accounthashes_old",
"ALTER TABLE catchpointbalances RENAME TO accountbase",
"ALTER TABLE catchpointassetcreators RENAME TO assetcreators",
"ALTER TABLE catchpointaccounthashes RENAME TO accounthashes",
"DROP TABLE IF EXISTS accountbase_old",
"DROP TABLE IF EXISTS assetcreators_old",
"DROP TABLE IF EXISTS accounthashes_old",
}
for _, stmt := range stmts {
_, err = tx.Exec(stmt)
if err != nil {
return err
}
}
_, err = tx.Exec("INSERT OR REPLACE INTO acctrounds(id, rnd) VALUES('acctbase', ?)", balancesRound)
if err != nil {
return err
}
_, err = tx.Exec("INSERT OR REPLACE INTO acctrounds(id, rnd) VALUES('hashbase', ?)", balancesRound)
if err != nil {
return err
}
return
}
func getCatchpoint(tx *sql.Tx, round basics.Round) (fileName string, catchpoint string, fileSize int64, err error) {
err = tx.QueryRow("SELECT filename, catchpoint, filesize FROM storedcatchpoints WHERE round=?", int64(round)).Scan(&fileName, &catchpoint, &fileSize)
return
}
// accountsInit fills the database using tx with initAccounts if the
// database has not been initialized yet.
//
// accountsInit returns nil if either it has initialized the database
// correctly, or if the database has already been initialized.
func accountsInit(tx *sql.Tx, initAccounts map[basics.Address]basics.AccountData, proto config.ConsensusParams) error {
for _, tableCreate := range accountsSchema {
_, err := tx.Exec(tableCreate)
if err != nil {
return err
}
}
// Run creatables migration if it hasn't run yet
var creatableMigrated bool
err := tx.QueryRow("SELECT 1 FROM pragma_table_info('assetcreators') WHERE name='ctype'").Scan(&creatableMigrated)
if err == sql.ErrNoRows {
// Run migration
for _, migrateCmd := range creatablesMigration {
_, err = tx.Exec(migrateCmd)
if err != nil {
return err
}
}
} else if err != nil {
return err
}
_, err = tx.Exec("INSERT INTO acctrounds (id, rnd) VALUES ('acctbase', 0)")
if err == nil {
var ot basics.OverflowTracker
var totals AccountTotals
for addr, data := range initAccounts {
_, err = tx.Exec("INSERT INTO accountbase (address, data) VALUES (?, ?)",
addr[:], protocol.Encode(&data))
if err != nil {
return err
}
totals.addAccount(proto, data, &ot)
}
if ot.Overflowed {
return fmt.Errorf("overflow computing totals")
}
err = accountsPutTotals(tx, totals, false)
if err != nil {
return err
}
} else {
serr, ok := err.(sqlite3.Error)
// serr.Code is sqlite.ErrConstraint if the database has already been initialized;
// in that case, ignore the error and return nil.
if !ok || serr.Code != sqlite3.ErrConstraint {
return err
}
}
return nil
}
// accountsAddNormalizedBalance adds the normalizedonlinebalance column
// to the accountbase table.
func accountsAddNormalizedBalance(tx *sql.Tx, proto config.ConsensusParams) error {
var exists bool
err := tx.QueryRow("SELECT 1 FROM pragma_table_info('accountbase') WHERE name='normalizedonlinebalance'").Scan(&exists)
if err == nil {
// Already exists.
return nil
}
if err != sql.ErrNoRows {
return err
}
for _, stmt := range createOnlineAccountIndex {
_, err := tx.Exec(stmt)
if err != nil {
return err
}
}
rows, err := tx.Query("SELECT address, data FROM accountbase")
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
var addrbuf []byte
var buf []byte
err = rows.Scan(&addrbuf, &buf)
if err != nil {
return err
}
var data basics.AccountData
err = protocol.Decode(buf, &data)
if err != nil {
return err
}
normBalance := data.NormalizedOnlineBalance(proto)
if normBalance > 0 {
_, err = tx.Exec("UPDATE accountbase SET normalizedonlinebalance=? WHERE address=?", normBalance, addrbuf)
if err != nil {
return err
}
}
}
return rows.Err()
}
// accountDataToOnline returns the part of the AccountData that matters
// for online accounts (to answer top-N queries). We store a subset of
// the full AccountData because we need to store a large number of these
// in memory (say, 1M), and storing that many AccountData could easily
// cause us to run out of memory.
func accountDataToOnline(address basics.Address, ad *basics.AccountData, proto config.ConsensusParams) *onlineAccount {
return &onlineAccount{
Address: address,
MicroAlgos: ad.MicroAlgos,
RewardsBase: ad.RewardsBase,
NormalizedOnlineBalance: ad.NormalizedOnlineBalance(proto),
VoteID: ad.VoteID,
VoteFirstValid: ad.VoteFirstValid,
VoteLastValid: ad.VoteLastValid,
VoteKeyDilution: ad.VoteKeyDilution,
}
}
func resetAccountHashes(tx *sql.Tx) (err error) {
_, err = tx.Exec(`DELETE FROM accounthashes`)
return
}
func accountsReset(tx *sql.Tx) error {
for _, stmt := range accountsResetExprs {
_, err := tx.Exec(stmt)
if err != nil {
return err
}
}
_, err := db.SetUserVersion(context.Background(), tx, 0)
return err
}
// accountsRound returns the tracker balances round number, and the round of the hash tree
// if the hash of the tree doesn't exists, it returns zero.
func accountsRound(tx *sql.Tx) (rnd basics.Round, hashrnd basics.Round, err error) {
err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='acctbase'").Scan(&rnd)
if err != nil {
return
}
err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='hashbase'").Scan(&hashrnd)
if err == sql.ErrNoRows {
hashrnd = basics.Round(0)
err = nil
}
return
}
func accountsDbInit(r db.Queryable, w db.Queryable) (*accountsDbQueries, error) {
var err error
qs := &accountsDbQueries{}
qs.listCreatablesStmt, err = r.Prepare("SELECT rnd, asset, creator FROM acctrounds LEFT JOIN assetcreators ON assetcreators.asset <= ? AND assetcreators.ctype = ? WHERE acctrounds.id='acctbase' ORDER BY assetcreators.asset desc LIMIT ?")
if err != nil {
return nil, err
}
qs.lookupStmt, err = r.Prepare("SELECT rnd, data FROM acctrounds LEFT JOIN accountbase ON address=? WHERE id='acctbase'")
if err != nil {
return nil, err
}
qs.lookupCreatorStmt, err = r.Prepare("SELECT rnd, creator FROM acctrounds LEFT JOIN assetcreators ON asset = ? AND ctype = ? WHERE id='acctbase'")
if err != nil {
return nil, err
}
qs.deleteStoredCatchpoint, err = w.Prepare("DELETE FROM storedcatchpoints WHERE round=?")
if err != nil {
return nil, err
}
qs.insertStoredCatchpoint, err = w.Prepare("INSERT INTO storedcatchpoints(round, filename, catchpoint, filesize, pinned) VALUES(?, ?, ?, ?, 0)")
if err != nil {
return nil, err
}
qs.selectOldestsCatchpointFiles, err = r.Prepare("SELECT round, filename FROM storedcatchpoints WHERE pinned = 0 and round <= COALESCE((SELECT round FROM storedcatchpoints WHERE pinned = 0 ORDER BY round DESC LIMIT ?, 1),0) ORDER BY round ASC LIMIT ?")
if err != nil {
return nil, err
}
qs.selectCatchpointStateUint64, err = r.Prepare("SELECT intval FROM catchpointstate WHERE id=?")
if err != nil {
return nil, err
}
qs.deleteCatchpointState, err = r.Prepare("DELETE FROM catchpointstate WHERE id=?")
if err != nil {
return nil, err
}
qs.insertCatchpointStateUint64, err = r.Prepare("INSERT OR REPLACE INTO catchpointstate(id, intval) VALUES(?, ?)")
if err != nil {
return nil, err
}
qs.insertCatchpointStateString, err = r.Prepare("INSERT OR REPLACE INTO catchpointstate(id, strval) VALUES(?, ?)")
if err != nil {
return nil, err
}
qs.selectCatchpointStateString, err = r.Prepare("SELECT strval FROM catchpointstate WHERE id=?")
if err != nil {
return nil, err
}
return qs, nil
}
// listCreatables returns an array of CreatableLocator which have CreatableIndex smaller or equal to maxIdx and are of the provided CreatableType.
func (qs *accountsDbQueries) listCreatables(maxIdx basics.CreatableIndex, maxResults uint64, ctype basics.CreatableType) (results []basics.CreatableLocator, dbRound basics.Round, err error) {
err = db.Retry(func() error {
// Query for assets in range
rows, err := qs.listCreatablesStmt.Query(maxIdx, ctype, maxResults)
if err != nil {
return err
}
defer rows.Close()
// For each row, copy into a new CreatableLocator and append to results
var buf []byte
var cl basics.CreatableLocator
var creatableIndex sql.NullInt64
for rows.Next() {
err = rows.Scan(&dbRound, &creatableIndex, &buf)
if err != nil {
return err
}
if !creatableIndex.Valid {
// we received an entry without any index. This would happen only on the first entry when there are no creatables of the requested type.
break
}
cl.Index = basics.CreatableIndex(creatableIndex.Int64)
copy(cl.Creator[:], buf)
cl.Type = ctype
results = append(results, cl)
}
return nil
})
return
}
func (qs *accountsDbQueries) lookupCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (addr basics.Address, ok bool, dbRound basics.Round, err error) {
err = db.Retry(func() error {
var buf []byte
err := qs.lookupCreatorStmt.QueryRow(cidx, ctype).Scan(&dbRound, &buf)
// this shouldn't happen unless we can't figure the round number.
if err == sql.ErrNoRows {
return fmt.Errorf("lookupCreator was unable to retrieve round number")
}
// Some other database error
if err != nil {
return err
}
if len(buf) > 0 {
ok = true
copy(addr[:], buf)
}
return nil
})
return
}
// lookup looks up for a the account data given it's address. It returns the current database round and the matching
// account data, if such was found. If no matching account data could be found for the given address, an empty account data would
// be retrieved.
func (qs *accountsDbQueries) lookup(addr basics.Address) (data basics.AccountData, dbRound basics.Round, err error) {
err = db.Retry(func() error {
var buf []byte
err := qs.lookupStmt.QueryRow(addr[:]).Scan(&dbRound, &buf)
if err == nil {
if len(buf) > 0 {
return protocol.Decode(buf, &data)
}
// we don't have that account, just return the database round.
return nil
}
// this should never happen; it indicates that we don't have a current round in the acctrounds table.
if err == sql.ErrNoRows {
// Return the zero value of data
return fmt.Errorf("unable to query account data for address %v : %w", addr, err)
}
return err
})
return
}
func (qs *accountsDbQueries) storeCatchpoint(ctx context.Context, round basics.Round, fileName string, catchpoint string, fileSize int64) (err error) {
err = db.Retry(func() (err error) {
_, err = qs.deleteStoredCatchpoint.ExecContext(ctx, round)
if err != nil || (fileName == "" && catchpoint == "" && fileSize == 0) {
return
}
_, err = qs.insertStoredCatchpoint.ExecContext(ctx, round, fileName, catchpoint, fileSize)
return
})
return
}
func (qs *accountsDbQueries) getOldestCatchpointFiles(ctx context.Context, fileCount int, filesToKeep int) (fileNames map[basics.Round]string, err error) {
err = db.Retry(func() (err error) {
var rows *sql.Rows
rows, err = qs.selectOldestsCatchpointFiles.QueryContext(ctx, filesToKeep, fileCount)
if err != nil {
return
}
defer rows.Close()
fileNames = make(map[basics.Round]string)
for rows.Next() {
var fileName string
var round basics.Round
err = rows.Scan(&round, &fileName)
if err != nil {
return
}
fileNames[round] = fileName
}
err = rows.Err()
return
})
return
}
func (qs *accountsDbQueries) readCatchpointStateUint64(ctx context.Context, stateName catchpointState) (rnd uint64, def bool, err error) {
var val sql.NullInt64
err = db.Retry(func() (err error) {
err = qs.selectCatchpointStateUint64.QueryRowContext(ctx, stateName).Scan(&val)
if err == sql.ErrNoRows || (err == nil && false == val.Valid) {
val.Int64 = 0 // default to zero.
err = nil
def = true
return
}
return err
})
return uint64(val.Int64), def, err
}
func (qs *accountsDbQueries) writeCatchpointStateUint64(ctx context.Context, stateName catchpointState, setValue uint64) (cleared bool, err error) {
err = db.Retry(func() (err error) {
if setValue == 0 {
_, err = qs.deleteCatchpointState.ExecContext(ctx, stateName)
cleared = true
return err
}
// we don't know if there is an entry in the table for this state, so we'll insert/replace it just in case.
_, err = qs.insertCatchpointStateUint64.ExecContext(ctx, stateName, setValue)
cleared = false
return err
})
return cleared, err
}
func (qs *accountsDbQueries) readCatchpointStateString(ctx context.Context, stateName catchpointState) (str string, def bool, err error) {
var val sql.NullString
err = db.Retry(func() (err error) {
err = qs.selectCatchpointStateString.QueryRowContext(ctx, stateName).Scan(&val)
if err == sql.ErrNoRows || (err == nil && false == val.Valid) {
val.String = "" // default to empty string
err = nil
def = true
return
}
return err
})
return val.String, def, err
}
func (qs *accountsDbQueries) writeCatchpointStateString(ctx context.Context, stateName catchpointState, setValue string) (cleared bool, err error) {
err = db.Retry(func() (err error) {
if setValue == "" {
_, err = qs.deleteCatchpointState.ExecContext(ctx, stateName)
cleared = true
return err
}
// we don't know if there is an entry in the table for this state, so we'll insert/replace it just in case.
_, err = qs.insertCatchpointStateString.ExecContext(ctx, stateName, setValue)
cleared = false
return err
})
return cleared, err
}
func (qs *accountsDbQueries) close() {
preparedQueries := []**sql.Stmt{
&qs.listCreatablesStmt,
&qs.lookupStmt,
&qs.lookupCreatorStmt,
&qs.deleteStoredCatchpoint,
&qs.insertStoredCatchpoint,
&qs.selectOldestsCatchpointFiles,
&qs.selectCatchpointStateUint64,
&qs.deleteCatchpointState,
&qs.insertCatchpointStateUint64,
&qs.selectCatchpointStateString,
&qs.insertCatchpointStateString,
}
for _, preparedQuery := range preparedQueries {
if (*preparedQuery) != nil {
(*preparedQuery).Close()
*preparedQuery = nil
}
}
}
// accountsOnlineTop returns the top n online accounts starting at position offset
// (that is, the top offset'th account through the top offset+n-1'th account).
//
// The accounts are sorted by their normalized balance and address. The normalized
// balance has to do with the reward parts of online account balances. See the
// normalization procedure in AccountData.NormalizedOnlineBalance().
//
// Note that this does not check if the accounts have a vote key valid for any
// particular round (past, present, or future).
func accountsOnlineTop(tx *sql.Tx, offset, n uint64, proto config.ConsensusParams) (map[basics.Address]*onlineAccount, error) {
rows, err := tx.Query("SELECT address, data FROM accountbase WHERE normalizedonlinebalance>0 ORDER BY normalizedonlinebalance DESC, address DESC LIMIT ? OFFSET ?", n, offset)
if err != nil {
return nil, err
}
defer rows.Close()
res := make(map[basics.Address]*onlineAccount, n)
for rows.Next() {
var addrbuf []byte
var buf []byte
err = rows.Scan(&addrbuf, &buf)
if err != nil {
return nil, err
}
var data basics.AccountData
err = protocol.Decode(buf, &data)
if err != nil {
return nil, err
}
var addr basics.Address
if len(addrbuf) != len(addr) {
err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
return nil, err
}
copy(addr[:], addrbuf)
res[addr] = accountDataToOnline(addr, &data, proto)
}
return res, rows.Err()
}
func accountsAll(tx *sql.Tx) (bals map[basics.Address]basics.AccountData, err error) {
rows, err := tx.Query("SELECT address, data FROM accountbase")
if err != nil {
return
}
defer rows.Close()
bals = make(map[basics.Address]basics.AccountData)
for rows.Next() {
var addrbuf []byte
var buf []byte
err = rows.Scan(&addrbuf, &buf)
if err != nil {
return
}
var data basics.AccountData
err = protocol.Decode(buf, &data)
if err != nil {
return
}
var addr basics.Address
if len(addrbuf) != len(addr) {
err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
return
}
copy(addr[:], addrbuf)
bals[addr] = data
}
err = rows.Err()
return
}
func accountsTotals(tx *sql.Tx, catchpointStaging bool) (totals AccountTotals, err error) {
id := ""
if catchpointStaging {
id = "catchpointStaging"
}
row := tx.QueryRow("SELECT online, onlinerewardunits, offline, offlinerewardunits, notparticipating, notparticipatingrewardunits, rewardslevel FROM accounttotals WHERE id=?", id)
err = row.Scan(&totals.Online.Money.Raw, &totals.Online.RewardUnits,
&totals.Offline.Money.Raw, &totals.Offline.RewardUnits,
&totals.NotParticipating.Money.Raw, &totals.NotParticipating.RewardUnits,
&totals.RewardsLevel)
return
}
func accountsPutTotals(tx *sql.Tx, totals AccountTotals, catchpointStaging bool) error {
id := ""
if catchpointStaging {
id = "catchpointStaging"
}
_, err := tx.Exec("REPLACE INTO accounttotals (id, online, onlinerewardunits, offline, offlinerewardunits, notparticipating, notparticipatingrewardunits, rewardslevel) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
id,
totals.Online.Money.Raw, totals.Online.RewardUnits,
totals.Offline.Money.Raw, totals.Offline.RewardUnits,
totals.NotParticipating.Money.Raw, totals.NotParticipating.RewardUnits,
totals.RewardsLevel)
return err
}
// accountsNewRound updates the accountbase and assetcreators by applying the provided deltas to the accounts / creatables.
func accountsNewRound(tx *sql.Tx, updates map[basics.Address]accountDelta, creatables map[basics.CreatableIndex]modifiedCreatable, proto config.ConsensusParams) (err error) {
var insertCreatableIdxStmt, deleteCreatableIdxStmt, deleteStmt, replaceStmt *sql.Stmt
deleteStmt, err = tx.Prepare("DELETE FROM accountbase WHERE address=?")
if err != nil {
return
}
defer deleteStmt.Close()
replaceStmt, err = tx.Prepare("REPLACE INTO accountbase (address, normalizedonlinebalance, data) VALUES (?, ?, ?)")
if err != nil {
return
}
defer replaceStmt.Close()
for addr, data := range updates {
if data.new.IsZero() {
// prune empty accounts
_, err = deleteStmt.Exec(addr[:])
} else {
normBalance := data.new.NormalizedOnlineBalance(proto)
_, err = replaceStmt.Exec(addr[:], normBalance, protocol.Encode(&data.new))
}
if err != nil {
return
}
}
if len(creatables) > 0 {
insertCreatableIdxStmt, err = tx.Prepare("INSERT INTO assetcreators (asset, creator, ctype) VALUES (?, ?, ?)")
if err != nil {
return
}
defer insertCreatableIdxStmt.Close()
deleteCreatableIdxStmt, err = tx.Prepare("DELETE FROM assetcreators WHERE asset=? AND ctype=?")
if err != nil {
return
}
defer deleteCreatableIdxStmt.Close()
for cidx, cdelta := range creatables {
if cdelta.created {
_, err = insertCreatableIdxStmt.Exec(cidx, cdelta.creator[:], cdelta.ctype)
} else {
_, err = deleteCreatableIdxStmt.Exec(cidx, cdelta.ctype)
}
if err != nil {
return
}
}
}
return
}
// totalsNewRounds updates the accountsTotals by applying series of round changes
func totalsNewRounds(tx *sql.Tx, updates []map[basics.Address]accountDelta, accountTotals []AccountTotals, protos []config.ConsensusParams) (err error) {
var ot basics.OverflowTracker
totals, err := accountsTotals(tx, false)
if err != nil {
return
}
for i := 0; i < len(updates); i++ {
totals.applyRewards(accountTotals[i].RewardsLevel, &ot)
for _, data := range updates[i] {
totals.delAccount(protos[i], data.old, &ot)
totals.addAccount(protos[i], data.new, &ot)
}
}
if ot.Overflowed {
err = fmt.Errorf("overflow computing totals")
return
}
err = accountsPutTotals(tx, totals, false)
if err != nil {
return
}
return
}
// updates the round number associated with the current account data.
func updateAccountsRound(tx *sql.Tx, rnd basics.Round, hashRound basics.Round) (err error) {
res, err := tx.Exec("UPDATE acctrounds SET rnd=? WHERE id='acctbase' AND rnd<?", rnd, rnd)
if err != nil {
return
}
aff, err := res.RowsAffected()
if err != nil {
return
}
if aff != 1 {
// try to figure out why we couldn't update the round number.
var base basics.Round
err = tx.QueryRow("SELECT rnd FROM acctrounds WHERE id='acctbase'").Scan(&base)
if err != nil {
return
}
if base > rnd {
err = fmt.Errorf("newRound %d is not after base %d", rnd, base)
return
} else if base != rnd {
err = fmt.Errorf("updateAccountsRound(acctbase, %d): expected to update 1 row but got %d", rnd, aff)
return
}
}
res, err = tx.Exec("INSERT OR REPLACE INTO acctrounds(id,rnd) VALUES('hashbase',?)", hashRound)
if err != nil {
return
}
aff, err = res.RowsAffected()
if err != nil {
return
}
if aff != 1 {
err = fmt.Errorf("updateAccountsRound(hashbase,%d): expected to update 1 row but got %d", hashRound, aff)
return
}
return
}
// totalAccounts returns the total number of accounts
func totalAccounts(ctx context.Context, tx *sql.Tx) (total uint64, err error) {
err = tx.QueryRowContext(ctx, "SELECT count(*) FROM accountbase").Scan(&total)
if err == sql.ErrNoRows {
total = 0
err = nil
return
}
return
}
// reencodeAccounts reads all the accounts in the accountbase table, decode and reencode the account data.
// if the account data is found to have a different encoding, it would update the encoded account on disk.
// on return, it returns the number of modified accounts as well as an error ( if we had any )
func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, err error) {
modifiedAccounts = 0
scannedAccounts := 0
updateStmt, err := tx.PrepareContext(ctx, "UPDATE accountbase SET data = ? WHERE address = ?")
if err != nil {
return 0, err
}
rows, err := tx.QueryContext(ctx, "SELECT address, data FROM accountbase")
if err != nil {
return
}
defer rows.Close()
var addr basics.Address
for rows.Next() {
// once every 1000 accounts we scan through, update the warning deadline.
// as long as the last "chunk" takes less than one second, we should be good to go.
// note that we should be quite liberal on timing here, since it might perform much slower
// on low-power devices.
if scannedAccounts%1000 == 0 {
// The return value from ResetTransactionWarnDeadline can be safely ignored here since it would only default to writing the warning
// message, which would let us know that it failed anyway.
db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(time.Second))
}
var addrbuf []byte
var preencodedAccountData []byte
err = rows.Scan(&addrbuf, &preencodedAccountData)
if err != nil {
return
}
if len(addrbuf) != len(addr) {
err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
return
}
copy(addr[:], addrbuf[:])
scannedAccounts++
// decode and re-encode:
var decodedAccountData basics.AccountData
err = protocol.Decode(preencodedAccountData, &decodedAccountData)
if err != nil {
return
}
reencodedAccountData := protocol.Encode(&decodedAccountData)
if bytes.Compare(preencodedAccountData, reencodedAccountData) == 0 {
// these are identical, no need to store re-encoded account data
continue
}
// we need to update the encoded data.
result, err := updateStmt.ExecContext(ctx, reencodedAccountData, addrbuf)
if err != nil {
return 0, err
}
rowsUpdated, err := result.RowsAffected()
if err != nil {
return 0, err
}
if rowsUpdated != 1 {
return 0, fmt.Errorf("failed to update account %v, number of rows updated was %d instead of 1", addr, rowsUpdated)
}
modifiedAccounts++
}
err = rows.Err()
updateStmt.Close()
return
}
type merkleCommitter struct {
tx *sql.Tx
deleteStmt *sql.Stmt
insertStmt *sql.Stmt
selectStmt *sql.Stmt
}
func makeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err error) {
mc = &merkleCommitter{tx: tx}
accountHashesTable := "accounthashes"
if staging {
accountHashesTable = "catchpointaccounthashes"
}
mc.deleteStmt, err = tx.Prepare("DELETE FROM " + accountHashesTable + " WHERE id=?")
if err != nil {
return nil, err
}
mc.insertStmt, err = tx.Prepare("INSERT OR REPLACE INTO " + accountHashesTable + "(id, data) VALUES(?, ?)")
if err != nil {
return nil, err
}
mc.selectStmt, err = tx.Prepare("SELECT data FROM " + accountHashesTable + " WHERE id = ?")
if err != nil {
return nil, err
}
return mc, nil
}
// StorePage stores a single page in an in-memory persistence.
func (mc *merkleCommitter) StorePage(page uint64, content []byte) error {
if len(content) == 0 {
_, err := mc.deleteStmt.Exec(page)
return err
}
_, err := mc.insertStmt.Exec(page, content)
return err
}
// LoadPage load a single page from an in-memory persistence.
func (mc *merkleCommitter) LoadPage(page uint64) (content []byte, err error) {
err = mc.selectStmt.QueryRow(page).Scan(&content)
if err == sql.ErrNoRows {
content = nil
err = nil
return
} else if err != nil {
return nil, err
}
return content, nil
}
// encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table.
type encodedAccountsBatchIter struct {
rows *sql.Rows
}
// Next returns an array containing the account data, in the same way it appear in the database
// returning accountCount accounts data at a time.
func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int) (bals []encodedBalanceRecord, err error) {
if iterator.rows == nil {
iterator.rows, err = tx.QueryContext(ctx, "SELECT address, data FROM accountbase ORDER BY address")
if err != nil {
return
}
}
// gather up to accountCount encoded accounts.
bals = make([]encodedBalanceRecord, 0, accountCount)
var addr basics.Address
for iterator.rows.Next() {
var addrbuf []byte
var buf []byte
err = iterator.rows.Scan(&addrbuf, &buf)
if err != nil {
iterator.Close()
return
}
if len(addrbuf) != len(addr) {
err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
return
}
copy(addr[:], addrbuf)
bals = append(bals, encodedBalanceRecord{Address: addr, AccountData: buf})
if len(bals) == accountCount {
// we're done with this iteration.
return
}
}
err = iterator.rows.Err()
if err != nil {
iterator.Close()
return
}
// we just finished reading the table.
iterator.Close()
return
}
// Close shuts down the encodedAccountsBatchIter, releasing database resources.
func (iterator *encodedAccountsBatchIter) Close() {
if iterator.rows != nil {
iterator.rows.Close()
iterator.rows = nil
}
}
// orderedAccountsIterStep is used by orderedAccountsIter to define the current step
//msgp:ignore orderedAccountsIterStep
type orderedAccountsIterStep int
const (
// startup step
oaiStepStartup = orderedAccountsIterStep(0)
// delete old ordering table if we have any leftover from previous invocation
oaiStepDeleteOldOrderingTable = orderedAccountsIterStep(0)
// create new ordering table
oaiStepCreateOrderingTable = orderedAccountsIterStep(1)
// query the existing accounts
oaiStepQueryAccounts = orderedAccountsIterStep(2)
// iterate over the existing accounts and insert their hash & address into the staging ordering table
oaiStepInsertAccountData = orderedAccountsIterStep(3)
// create an index on the ordering table so that we can efficiently scan it.
oaiStepCreateOrderingAccountIndex = orderedAccountsIterStep(4)
// query the ordering table
oaiStepSelectFromOrderedTable = orderedAccountsIterStep(5)
// iterate over the ordering table
oaiStepIterateOverOrderedTable = orderedAccountsIterStep(6)
// cleanup and delete ordering table
oaiStepShutdown = orderedAccountsIterStep(7)
// do nothing as we're done.
oaiStepDone = orderedAccountsIterStep(8)
)
// orderedAccountsIter allows us to iterate over the accounts addresses in the order of the account hashes.
type orderedAccountsIter struct {
step orderedAccountsIterStep
rows *sql.Rows
tx *sql.Tx
accountCount int
insertStmt *sql.Stmt
}
// makeOrderedAccountsIter creates an ordered account iterator. Note that due to implementation reasons,
// only a single iterator can be active at a time.
func makeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter {
return &orderedAccountsIter{
tx: tx,
accountCount: accountCount,
step: oaiStepStartup,
}
}
// accountAddressHash is used by Next to return a single account address and the associated hash.
type accountAddressHash struct {
address basics.Address
digest []byte
}
// Next returns an array containing the account address and hash
// the Next function works in multiple processing stages, where it first processs the current accounts and order them
// followed by returning the ordered accounts. In the first phase, it would return empty accountAddressHash array
// and sets the processedRecords to the number of accounts that were processed. On the second phase, the acct
// would contain valid data ( and optionally the account data as well, if was asked in makeOrderedAccountsIter) and
// the processedRecords would be zero. If err is sql.ErrNoRows it means that the iterator have completed it's work and no further
// accounts exists. Otherwise, the caller is expected to keep calling "Next" to retrieve the next set of accounts
// ( or let the Next function make some progress toward that goal )
func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAddressHash, processedRecords int, err error) {
if iterator.step == oaiStepDeleteOldOrderingTable {
// although we're going to delete this table anyway when completing the iterator execution, we'll try to
// clean up any intermediate table.
_, err = iterator.tx.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
if err != nil {
return
}
iterator.step = oaiStepCreateOrderingTable
return
}
if iterator.step == oaiStepCreateOrderingTable {
// create the temporary table
_, err = iterator.tx.ExecContext(ctx, "CREATE TABLE accountsiteratorhashes(address blob, hash blob)")
if err != nil {
return
}
iterator.step = oaiStepQueryAccounts
return
}
if iterator.step == oaiStepQueryAccounts {
// iterate over the existing accounts
iterator.rows, err = iterator.tx.QueryContext(ctx, "SELECT address, data FROM accountbase")
if err != nil {
return
}
// prepare the insert statement into the temporary table
iterator.insertStmt, err = iterator.tx.PrepareContext(ctx, "INSERT INTO accountsiteratorhashes(address, hash) VALUES(?, ?)")
if err != nil {
return
}
iterator.step = oaiStepInsertAccountData
return
}
if iterator.step == oaiStepInsertAccountData {
var addr basics.Address
count := 0
for iterator.rows.Next() {
var addrbuf []byte
var buf []byte
err = iterator.rows.Scan(&addrbuf, &buf)
if err != nil {
iterator.Close(ctx)
return
}
if len(addrbuf) != len(addr) {
err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
iterator.Close(ctx)
return
}
copy(addr[:], addrbuf)
var accountData basics.AccountData
err = protocol.Decode(buf, &accountData)
if err != nil {
iterator.Close(ctx)
return
}
hash := accountHashBuilder(addr, accountData, buf)
_, err = iterator.insertStmt.ExecContext(ctx, addrbuf, hash)
if err != nil {
iterator.Close(ctx)
return
}
count++
if count == iterator.accountCount {
// we're done with this iteration.
processedRecords = count
return
}
}
processedRecords = count
iterator.rows.Close()
iterator.rows = nil
iterator.insertStmt.Close()
iterator.insertStmt = nil
iterator.step = oaiStepCreateOrderingAccountIndex
return
}
if iterator.step == oaiStepCreateOrderingAccountIndex {
// create an index. It shown that even when we're making a single select statement in step 5, it would be better to have this index vs. not having it at all.
// note that this index is using the rowid of the accountsiteratorhashes table.
_, err = iterator.tx.ExecContext(ctx, "CREATE INDEX accountsiteratorhashesidx ON accountsiteratorhashes(hash)")
if err != nil {
iterator.Close(ctx)
return
}
iterator.step = oaiStepSelectFromOrderedTable
return
}
if iterator.step == oaiStepSelectFromOrderedTable {
// select the data from the ordered table
iterator.rows, err = iterator.tx.QueryContext(ctx, "SELECT address, hash FROM accountsiteratorhashes ORDER BY hash")
if err != nil {
iterator.Close(ctx)
return
}
iterator.step = oaiStepIterateOverOrderedTable
return
}
if iterator.step == oaiStepIterateOverOrderedTable {
acct = make([]accountAddressHash, 0, iterator.accountCount)
var addr basics.Address
for iterator.rows.Next() {
var addrbuf []byte
var hash []byte
err = iterator.rows.Scan(&addrbuf, &hash)
if err != nil {
iterator.Close(ctx)
return
}
if len(addrbuf) != len(addr) {
err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
iterator.Close(ctx)
return
}
copy(addr[:], addrbuf)
acct = append(acct, accountAddressHash{address: addr, digest: hash})
if len(acct) == iterator.accountCount {
// we're done with this iteration.
return
}
}
iterator.step = oaiStepShutdown
iterator.rows.Close()
iterator.rows = nil
return
}
if iterator.step == oaiStepShutdown {
err = iterator.Close(ctx)
if err != nil {
return
}
iterator.step = oaiStepDone
// fallthrough
}
return nil, 0, sql.ErrNoRows
}
// Close shuts down the orderedAccountsBuilderIter, releasing database resources.
func (iterator *orderedAccountsIter) Close(ctx context.Context) (err error) {
if iterator.rows != nil {
iterator.rows.Close()
iterator.rows = nil
}
if iterator.insertStmt != nil {
iterator.insertStmt.Close()
iterator.insertStmt = nil
}
_, err = iterator.tx.ExecContext(ctx, "DROP TABLE IF EXISTS accountsiteratorhashes")
return
}
// catchpointPendingHashesIterator allows us to iterate over the hashes in the catchpointpendinghashes table in their order.
type catchpointPendingHashesIterator struct {
hashCount int
tx *sql.Tx
rows *sql.Rows
}
// makeCatchpointPendingHashesIterator create a pending hashes iterator that retrieves the hashes in the catchpointpendinghashes table.
func makeCatchpointPendingHashesIterator(hashCount int, tx *sql.Tx) *catchpointPendingHashesIterator {
return &catchpointPendingHashesIterator{
hashCount: hashCount,
tx: tx,
}
}
// Next returns an array containing the hashes, returning HashCount hashes at a time.
func (iterator *catchpointPendingHashesIterator) Next(ctx context.Context) (hashes [][]byte, err error) {
if iterator.rows == nil {
iterator.rows, err = iterator.tx.QueryContext(ctx, "SELECT data FROM catchpointpendinghashes ORDER BY data")
if err != nil {
return
}
}
// gather up to accountCount encoded accounts.
hashes = make([][]byte, 0, iterator.hashCount)
for iterator.rows.Next() {
var hash []byte
err = iterator.rows.Scan(&hash)
if err != nil {
iterator.Close()
return
}
hashes = append(hashes, hash)
if len(hashes) == iterator.hashCount {
// we're done with this iteration.
return
}
}
err = iterator.rows.Err()
if err != nil {
iterator.Close()
return
}
// we just finished reading the table.
iterator.Close()
return
}
// Close shuts down the catchpointPendingHashesIterator, releasing database resources.
func (iterator *catchpointPendingHashesIterator) Close() {
if iterator.rows != nil {
iterator.rows.Close()
iterator.rows = nil
}
}
| 1 | 41,221 | Can ndelta differ depending on when the deltas are compacted? That is, when intermediate updates are dropped? | algorand-go-algorand | go |
@@ -21,13 +21,12 @@ import net.sourceforge.pmd.lang.rule.xpath.JaxenXPathRuleQuery;
import net.sourceforge.pmd.lang.rule.xpath.SaxonXPathRuleQuery;
import net.sourceforge.pmd.lang.rule.xpath.XPathRuleQuery;
import net.sourceforge.pmd.properties.EnumeratedProperty;
-import net.sourceforge.pmd.properties.PropertySource;
import net.sourceforge.pmd.properties.StringProperty;
/**
* Rule that tries to match an XPath expression against a DOM view of an AST.
- *
- * <p>This rule needs a "xpath" property value in order to function.
+ * <p>
+ * <p>This rule needs a "xpath" property value in order to function.</p>
*/
public class XPathRule extends AbstractRule {
| 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.rule;
import static net.sourceforge.pmd.lang.rule.xpath.XPathRuleQuery.XPATH_1_0;
import static net.sourceforge.pmd.lang.rule.xpath.XPathRuleQuery.XPATH_1_0_COMPATIBILITY;
import static net.sourceforge.pmd.lang.rule.xpath.XPathRuleQuery.XPATH_2_0;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import net.sourceforge.pmd.RuleContext;
import net.sourceforge.pmd.lang.ast.Node;
import net.sourceforge.pmd.lang.rule.xpath.JaxenXPathRuleQuery;
import net.sourceforge.pmd.lang.rule.xpath.SaxonXPathRuleQuery;
import net.sourceforge.pmd.lang.rule.xpath.XPathRuleQuery;
import net.sourceforge.pmd.properties.EnumeratedProperty;
import net.sourceforge.pmd.properties.PropertySource;
import net.sourceforge.pmd.properties.StringProperty;
/**
* Rule that tries to match an XPath expression against a DOM view of an AST.
*
* <p>This rule needs a "xpath" property value in order to function.
*/
public class XPathRule extends AbstractRule {
public static final StringProperty XPATH_DESCRIPTOR = new StringProperty("xpath", "XPath expression", "", 1.0f);
private static final Map<String, String> XPATH_VERSIONS;
static {
Map<String, String> tmp = new HashMap<>();
tmp.put(XPATH_1_0, XPATH_1_0);
tmp.put(XPATH_1_0_COMPATIBILITY, XPATH_1_0_COMPATIBILITY);
tmp.put(XPATH_2_0, XPATH_2_0);
XPATH_VERSIONS = Collections.unmodifiableMap(tmp);
}
public static final EnumeratedProperty<String> VERSION_DESCRIPTOR
= new EnumeratedProperty<>("version",
"XPath specification version", XPATH_VERSIONS, XPATH_1_0, String.class, 2.0f);
private XPathRuleQuery xpathRuleQuery;
public XPathRule() {
definePropertyDescriptor(XPATH_DESCRIPTOR);
definePropertyDescriptor(VERSION_DESCRIPTOR);
}
public XPathRule(String xPath) {
this();
setXPath(xPath);
}
public void setXPath(String xPath) {
setProperty(XPathRule.XPATH_DESCRIPTOR, xPath);
}
public void setVersion(String version) {
setProperty(XPathRule.VERSION_DESCRIPTOR, version);
}
/**
* Apply the rule to all nodes.
*/
@Override
public void apply(List<? extends Node> nodes, RuleContext ctx) {
for (Node node : nodes) {
evaluate(node, ctx);
}
}
/**
* Evaluate the XPath query with the AST node. All matches are reported as
* violations.
*
* @param node
* The Node that to be checked.
* @param data
* The RuleContext.
*/
public void evaluate(Node node, RuleContext data) {
init();
List<Node> nodes = xpathRuleQuery.evaluate(node, data);
if (nodes != null) {
for (Node n : nodes) {
addViolation(data, n, n.getImage());
}
}
}
@Override
public List<String> getRuleChainVisits() {
if (init()) {
for (String nodeName : xpathRuleQuery.getRuleChainVisits()) {
super.addRuleChainVisit(nodeName);
}
}
return super.getRuleChainVisits();
}
private boolean init() {
if (xpathRuleQuery == null) {
String xpath = getProperty(XPATH_DESCRIPTOR);
String version = getProperty(VERSION_DESCRIPTOR);
if (XPATH_1_0.equals(version)) {
xpathRuleQuery = new JaxenXPathRuleQuery();
} else {
xpathRuleQuery = new SaxonXPathRuleQuery();
}
xpathRuleQuery.setXPath(xpath);
xpathRuleQuery.setVersion(version);
xpathRuleQuery.setProperties(this.getPropertiesByPropertyDescriptor());
return true;
}
return false;
}
public boolean hasXPathExpression() {
return StringUtils.isNotBlank(getProperty(XPATH_DESCRIPTOR));
}
/**
* @see PropertySource#dysfunctionReason()
*/
@Override
public String dysfunctionReason() {
return hasXPathExpression() ? null : "Missing xPath expression";
}
}
| 1 | 13,487 | Our checkstyle config likes it better when the `<p>` is before the first word of the next paragraph, and not on a blank line | pmd-pmd | java |
@@ -1498,6 +1498,8 @@ func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *
if err := cmd.Start(); err != nil {
return err
}
+ // we close criuServer so that even if CRIU crashes or unexpectedly exits, runc will not hang.
+ criuServer.Close()
// cmd.Process will be replaced by a restored init.
criuProcess := cmd.Process
| 1 | // +build linux
package libcontainer
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"sync"
"syscall" // only for SysProcAttr and Signal
"time"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/opencontainers/runtime-spec/specs-go"
criurpc "github.com/checkpoint-restore/go-criu/rpc"
"github.com/golang/protobuf/proto"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
)
const stdioFdCount = 3
type linuxContainer struct {
id string
root string
config *configs.Config
cgroupManager cgroups.Manager
intelRdtManager intelrdt.Manager
initPath string
initArgs []string
initProcess parentProcess
initProcessStartTime uint64
criuPath string
newuidmapPath string
newgidmapPath string
m sync.Mutex
criuVersion int
state containerState
created time.Time
}
// State represents a running container's state
type State struct {
BaseState
// Platform specific fields below here
// Specified if the container was started under the rootless mode.
// Set to true if BaseState.Config.RootlessEUID && BaseState.Config.RootlessCgroups
Rootless bool `json:"rootless"`
// Path to all the cgroups setup for a container. Key is cgroup subsystem name
// with the value as the path.
CgroupPaths map[string]string `json:"cgroup_paths"`
// NamespacePaths are filepaths to the container's namespaces. Key is the namespace type
// with the value as the path.
NamespacePaths map[configs.NamespaceType]string `json:"namespace_paths"`
// Container's standard descriptors (std{in,out,err}), needed for checkpoint and restore
ExternalDescriptors []string `json:"external_descriptors,omitempty"`
// Intel RDT "resource control" filesystem path
IntelRdtPath string `json:"intel_rdt_path"`
}
// Container is a libcontainer container object.
//
// Each container is thread-safe within the same process. Since a container can
// be destroyed by a separate process, any function may return that the container
// was not found.
type Container interface {
BaseContainer
// Methods below here are platform specific
// Checkpoint checkpoints the running container's state to disk using the criu(8) utility.
//
// errors:
// Systemerror - System error.
Checkpoint(criuOpts *CriuOpts) error
// Restore restores the checkpointed container to a running state using the criu(8) utility.
//
// errors:
// Systemerror - System error.
Restore(process *Process, criuOpts *CriuOpts) error
// If the Container state is RUNNING or CREATED, sets the Container state to PAUSING and pauses
// the execution of any user processes. Asynchronously, when the container finished being paused the
// state is changed to PAUSED.
// If the Container state is PAUSED, do nothing.
//
// errors:
// ContainerNotExists - Container no longer exists,
// ContainerNotRunning - Container not running or created,
// Systemerror - System error.
Pause() error
// If the Container state is PAUSED, resumes the execution of any user processes in the
// Container before setting the Container state to RUNNING.
// If the Container state is RUNNING, do nothing.
//
// errors:
// ContainerNotExists - Container no longer exists,
// ContainerNotPaused - Container is not paused,
// Systemerror - System error.
Resume() error
// NotifyOOM returns a read-only channel signaling when the container receives an OOM notification.
//
// errors:
// Systemerror - System error.
NotifyOOM() (<-chan struct{}, error)
// NotifyMemoryPressure returns a read-only channel signaling when the container reaches a given pressure level
//
// errors:
// Systemerror - System error.
NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error)
}
// ID returns the container's unique ID
func (c *linuxContainer) ID() string {
return c.id
}
// Config returns the container's configuration
func (c *linuxContainer) Config() configs.Config {
return *c.config
}
func (c *linuxContainer) Status() (Status, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentStatus()
}
func (c *linuxContainer) State() (*State, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentState()
}
func (c *linuxContainer) OCIState() (*specs.State, error) {
c.m.Lock()
defer c.m.Unlock()
return c.currentOCIState()
}
func (c *linuxContainer) Processes() ([]int, error) {
pids, err := c.cgroupManager.GetAllPids()
if err != nil {
return nil, newSystemErrorWithCause(err, "getting all container pids from cgroups")
}
return pids, nil
}
func (c *linuxContainer) Stats() (*Stats, error) {
var (
err error
stats = &Stats{}
)
if stats.CgroupStats, err = c.cgroupManager.GetStats(); err != nil {
return stats, newSystemErrorWithCause(err, "getting container stats from cgroups")
}
if c.intelRdtManager != nil {
if stats.IntelRdtStats, err = c.intelRdtManager.GetStats(); err != nil {
return stats, newSystemErrorWithCause(err, "getting container's Intel RDT stats")
}
}
for _, iface := range c.config.Networks {
switch iface.Type {
case "veth":
istats, err := getNetworkInterfaceStats(iface.HostInterfaceName)
if err != nil {
return stats, newSystemErrorWithCausef(err, "getting network stats for interface %q", iface.HostInterfaceName)
}
stats.Interfaces = append(stats.Interfaces, istats)
}
}
return stats, nil
}
func (c *linuxContainer) Set(config configs.Config) error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status == Stopped {
return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning)
}
if err := c.cgroupManager.Set(&config); err != nil {
// Set configs back
if err2 := c.cgroupManager.Set(c.config); err2 != nil {
logrus.Warnf("Setting back cgroup configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
return err
}
if c.intelRdtManager != nil {
if err := c.intelRdtManager.Set(&config); err != nil {
// Set configs back
if err2 := c.intelRdtManager.Set(c.config); err2 != nil {
logrus.Warnf("Setting back intelrdt configs failed due to error: %v, your state.json and actual configs might be inconsistent.", err2)
}
return err
}
}
// After config setting succeed, update config and states
c.config = &config
_, err = c.updateState(nil)
return err
}
func (c *linuxContainer) Start(process *Process) error {
c.m.Lock()
defer c.m.Unlock()
if process.Init {
if err := c.createExecFifo(); err != nil {
return err
}
}
if err := c.start(process); err != nil {
if process.Init {
c.deleteExecFifo()
}
return err
}
return nil
}
func (c *linuxContainer) Run(process *Process) error {
if err := c.Start(process); err != nil {
return err
}
if process.Init {
return c.exec()
}
return nil
}
func (c *linuxContainer) Exec() error {
c.m.Lock()
defer c.m.Unlock()
return c.exec()
}
func (c *linuxContainer) exec() error {
path := filepath.Join(c.root, execFifoFilename)
pid := c.initProcess.pid()
blockingFifoOpenCh := awaitFifoOpen(path)
for {
select {
case result := <-blockingFifoOpenCh:
return handleFifoResult(result)
case <-time.After(time.Millisecond * 100):
stat, err := system.Stat(pid)
if err != nil || stat.State == system.Zombie {
// could be because process started, ran, and completed between our 100ms timeout and our system.Stat() check.
// see if the fifo exists and has data (with a non-blocking open, which will succeed if the writing process is complete).
if err := handleFifoResult(fifoOpen(path, false)); err != nil {
return errors.New("container process is already dead")
}
return nil
}
}
}
}
func readFromExecFifo(execFifo io.Reader) error {
data, err := ioutil.ReadAll(execFifo)
if err != nil {
return err
}
if len(data) <= 0 {
return fmt.Errorf("cannot start an already running container")
}
return nil
}
func awaitFifoOpen(path string) <-chan openResult {
fifoOpened := make(chan openResult)
go func() {
result := fifoOpen(path, true)
fifoOpened <- result
}()
return fifoOpened
}
func fifoOpen(path string, block bool) openResult {
flags := os.O_RDONLY
if !block {
flags |= syscall.O_NONBLOCK
}
f, err := os.OpenFile(path, flags, 0)
if err != nil {
return openResult{err: newSystemErrorWithCause(err, "open exec fifo for reading")}
}
return openResult{file: f}
}
func handleFifoResult(result openResult) error {
if result.err != nil {
return result.err
}
f := result.file
defer f.Close()
if err := readFromExecFifo(f); err != nil {
return err
}
return os.Remove(f.Name())
}
type openResult struct {
file *os.File
err error
}
func (c *linuxContainer) start(process *Process) error {
parent, err := c.newParentProcess(process)
if err != nil {
return newSystemErrorWithCause(err, "creating new parent process")
}
parent.forwardChildLogs()
if err := parent.start(); err != nil {
// terminate the process to ensure that it properly is reaped.
if err := ignoreTerminateErrors(parent.terminate()); err != nil {
logrus.Warn(err)
}
return newSystemErrorWithCause(err, "starting container process")
}
// generate a timestamp indicating when the container was started
c.created = time.Now().UTC()
if process.Init {
c.state = &createdState{
c: c,
}
state, err := c.updateState(parent)
if err != nil {
return err
}
c.initProcessStartTime = state.InitProcessStartTime
if c.config.Hooks != nil {
s, err := c.currentOCIState()
if err != nil {
return err
}
for i, hook := range c.config.Hooks.Poststart {
if err := hook.Run(s); err != nil {
if err := ignoreTerminateErrors(parent.terminate()); err != nil {
logrus.Warn(err)
}
return newSystemErrorWithCausef(err, "running poststart hook %d", i)
}
}
}
}
return nil
}
func (c *linuxContainer) Signal(s os.Signal, all bool) error {
c.m.Lock()
defer c.m.Unlock()
if all {
return signalAllProcesses(c.cgroupManager, s)
}
status, err := c.currentStatus()
if err != nil {
return err
}
// to avoid a PID reuse attack
if status == Running || status == Created || status == Paused {
if err := c.initProcess.signal(s); err != nil {
return newSystemErrorWithCause(err, "signaling init process")
}
return nil
}
return newGenericError(fmt.Errorf("container not running"), ContainerNotRunning)
}
func (c *linuxContainer) createExecFifo() error {
rootuid, err := c.Config().HostRootUID()
if err != nil {
return err
}
rootgid, err := c.Config().HostRootGID()
if err != nil {
return err
}
fifoName := filepath.Join(c.root, execFifoFilename)
if _, err := os.Stat(fifoName); err == nil {
return fmt.Errorf("exec fifo %s already exists", fifoName)
}
oldMask := unix.Umask(0000)
if err := unix.Mkfifo(fifoName, 0622); err != nil {
unix.Umask(oldMask)
return err
}
unix.Umask(oldMask)
return os.Chown(fifoName, rootuid, rootgid)
}
func (c *linuxContainer) deleteExecFifo() {
fifoName := filepath.Join(c.root, execFifoFilename)
os.Remove(fifoName)
}
// includeExecFifo opens the container's execfifo as a pathfd, so that the
// container cannot access the statedir (and the FIFO itself remains
// un-opened). It then adds the FifoFd to the given exec.Cmd as an inherited
// fd, with _LIBCONTAINER_FIFOFD set to its fd number.
func (c *linuxContainer) includeExecFifo(cmd *exec.Cmd) error {
fifoName := filepath.Join(c.root, execFifoFilename)
fifoFd, err := unix.Open(fifoName, unix.O_PATH|unix.O_CLOEXEC, 0)
if err != nil {
return err
}
cmd.ExtraFiles = append(cmd.ExtraFiles, os.NewFile(uintptr(fifoFd), fifoName))
cmd.Env = append(cmd.Env,
fmt.Sprintf("_LIBCONTAINER_FIFOFD=%d", stdioFdCount+len(cmd.ExtraFiles)-1))
return nil
}
func (c *linuxContainer) newParentProcess(p *Process) (parentProcess, error) {
parentInitPipe, childInitPipe, err := utils.NewSockPair("init")
if err != nil {
return nil, newSystemErrorWithCause(err, "creating new init pipe")
}
messageSockPair := filePair{parentInitPipe, childInitPipe}
parentLogPipe, childLogPipe, err := os.Pipe()
if err != nil {
return nil, fmt.Errorf("Unable to create the log pipe: %s", err)
}
logFilePair := filePair{parentLogPipe, childLogPipe}
cmd := c.commandTemplate(p, childInitPipe, childLogPipe)
if !p.Init {
return c.newSetnsProcess(p, cmd, messageSockPair, logFilePair)
}
// We only set up fifoFd if we're not doing a `runc exec`. The historic
// reason for this is that previously we would pass a dirfd that allowed
// for container rootfs escape (and not doing it in `runc exec` avoided
// that problem), but we no longer do that. However, there's no need to do
// this for `runc exec` so we just keep it this way to be safe.
if err := c.includeExecFifo(cmd); err != nil {
return nil, newSystemErrorWithCause(err, "including execfifo in cmd.Exec setup")
}
return c.newInitProcess(p, cmd, messageSockPair, logFilePair)
}
func (c *linuxContainer) commandTemplate(p *Process, childInitPipe *os.File, childLogPipe *os.File) *exec.Cmd {
cmd := exec.Command(c.initPath, c.initArgs[1:]...)
cmd.Args[0] = c.initArgs[0]
cmd.Stdin = p.Stdin
cmd.Stdout = p.Stdout
cmd.Stderr = p.Stderr
cmd.Dir = c.config.Rootfs
if cmd.SysProcAttr == nil {
cmd.SysProcAttr = &syscall.SysProcAttr{}
}
cmd.Env = append(cmd.Env, fmt.Sprintf("GOMAXPROCS=%s", os.Getenv("GOMAXPROCS")))
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ExtraFiles...)
if p.ConsoleSocket != nil {
cmd.ExtraFiles = append(cmd.ExtraFiles, p.ConsoleSocket)
cmd.Env = append(cmd.Env,
fmt.Sprintf("_LIBCONTAINER_CONSOLE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
)
}
cmd.ExtraFiles = append(cmd.ExtraFiles, childInitPipe)
cmd.Env = append(cmd.Env,
fmt.Sprintf("_LIBCONTAINER_INITPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
fmt.Sprintf("_LIBCONTAINER_STATEDIR=%s", c.root),
)
cmd.ExtraFiles = append(cmd.ExtraFiles, childLogPipe)
cmd.Env = append(cmd.Env,
fmt.Sprintf("_LIBCONTAINER_LOGPIPE=%d", stdioFdCount+len(cmd.ExtraFiles)-1),
fmt.Sprintf("_LIBCONTAINER_LOGLEVEL=%s", p.LogLevel),
)
// NOTE: when running a container with no PID namespace and the parent process spawning the container is
// PID1 the pdeathsig is being delivered to the container's init process by the kernel for some reason
// even with the parent still running.
if c.config.ParentDeathSignal > 0 {
cmd.SysProcAttr.Pdeathsig = syscall.Signal(c.config.ParentDeathSignal)
}
return cmd
}
func (c *linuxContainer) newInitProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*initProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initStandard))
nsMaps := make(map[configs.NamespaceType]string)
for _, ns := range c.config.Namespaces {
if ns.Path != "" {
nsMaps[ns.Type] = ns.Path
}
}
_, sharePidns := nsMaps[configs.NEWPID]
data, err := c.bootstrapData(c.config.Namespaces.CloneFlags(), nsMaps)
if err != nil {
return nil, err
}
init := &initProcess{
cmd: cmd,
messageSockPair: messageSockPair,
logFilePair: logFilePair,
manager: c.cgroupManager,
intelRdtManager: c.intelRdtManager,
config: c.newInitConfig(p),
container: c,
process: p,
bootstrapData: data,
sharePidns: sharePidns,
}
c.initProcess = init
return init, nil
}
func (c *linuxContainer) newSetnsProcess(p *Process, cmd *exec.Cmd, messageSockPair, logFilePair filePair) (*setnsProcess, error) {
cmd.Env = append(cmd.Env, "_LIBCONTAINER_INITTYPE="+string(initSetns))
state, err := c.currentState()
if err != nil {
return nil, newSystemErrorWithCause(err, "getting container's current state")
}
// for setns process, we don't have to set cloneflags as the process namespaces
// will only be set via setns syscall
data, err := c.bootstrapData(0, state.NamespacePaths)
if err != nil {
return nil, err
}
return &setnsProcess{
cmd: cmd,
cgroupPaths: c.cgroupManager.GetPaths(),
rootlessCgroups: c.config.RootlessCgroups,
intelRdtPath: state.IntelRdtPath,
messageSockPair: messageSockPair,
logFilePair: logFilePair,
config: c.newInitConfig(p),
process: p,
bootstrapData: data,
}, nil
}
func (c *linuxContainer) newInitConfig(process *Process) *initConfig {
cfg := &initConfig{
Config: c.config,
Args: process.Args,
Env: process.Env,
User: process.User,
AdditionalGroups: process.AdditionalGroups,
Cwd: process.Cwd,
Capabilities: process.Capabilities,
PassedFilesCount: len(process.ExtraFiles),
ContainerId: c.ID(),
NoNewPrivileges: c.config.NoNewPrivileges,
RootlessEUID: c.config.RootlessEUID,
RootlessCgroups: c.config.RootlessCgroups,
AppArmorProfile: c.config.AppArmorProfile,
ProcessLabel: c.config.ProcessLabel,
Rlimits: c.config.Rlimits,
}
if process.NoNewPrivileges != nil {
cfg.NoNewPrivileges = *process.NoNewPrivileges
}
if process.AppArmorProfile != "" {
cfg.AppArmorProfile = process.AppArmorProfile
}
if process.Label != "" {
cfg.ProcessLabel = process.Label
}
if len(process.Rlimits) > 0 {
cfg.Rlimits = process.Rlimits
}
cfg.CreateConsole = process.ConsoleSocket != nil
cfg.ConsoleWidth = process.ConsoleWidth
cfg.ConsoleHeight = process.ConsoleHeight
return cfg
}
func (c *linuxContainer) Destroy() error {
c.m.Lock()
defer c.m.Unlock()
return c.state.destroy()
}
func (c *linuxContainer) Pause() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
switch status {
case Running, Created:
if err := c.cgroupManager.Freeze(configs.Frozen); err != nil {
return err
}
return c.state.transition(&pausedState{
c: c,
})
}
return newGenericError(fmt.Errorf("container not running or created: %s", status), ContainerNotRunning)
}
func (c *linuxContainer) Resume() error {
c.m.Lock()
defer c.m.Unlock()
status, err := c.currentStatus()
if err != nil {
return err
}
if status != Paused {
return newGenericError(fmt.Errorf("container not paused"), ContainerNotPaused)
}
if err := c.cgroupManager.Freeze(configs.Thawed); err != nil {
return err
}
return c.state.transition(&runningState{
c: c,
})
}
func (c *linuxContainer) NotifyOOM() (<-chan struct{}, error) {
// XXX(cyphar): This requires cgroups.
if c.config.RootlessCgroups {
logrus.Warn("getting OOM notifications may fail if you don't have the full access to cgroups")
}
return notifyOnOOM(c.cgroupManager.GetPaths())
}
func (c *linuxContainer) NotifyMemoryPressure(level PressureLevel) (<-chan struct{}, error) {
// XXX(cyphar): This requires cgroups.
if c.config.RootlessCgroups {
logrus.Warn("getting memory pressure notifications may fail if you don't have the full access to cgroups")
}
return notifyMemoryPressure(c.cgroupManager.GetPaths(), level)
}
var criuFeatures *criurpc.CriuFeatures
func (c *linuxContainer) checkCriuFeatures(criuOpts *CriuOpts, rpcOpts *criurpc.CriuOpts, criuFeat *criurpc.CriuFeatures) error {
var t criurpc.CriuReqType
t = criurpc.CriuReqType_FEATURE_CHECK
// criu 1.8 => 10800
if err := c.checkCriuVersion(10800); err != nil {
// Feature checking was introduced with CRIU 1.8.
// Ignore the feature check if an older CRIU version is used
// and just act as before.
// As all automated PR testing is done using CRIU 1.7 this
// code will not be tested by automated PR testing.
return nil
}
// make sure the features we are looking for are really not from
// some previous check
criuFeatures = nil
req := &criurpc.CriuReq{
Type: &t,
// Theoretically this should not be necessary but CRIU
// segfaults if Opts is empty.
// Fixed in CRIU 2.12
Opts: rpcOpts,
Features: criuFeat,
}
err := c.criuSwrk(nil, req, criuOpts, false, nil)
if err != nil {
logrus.Debugf("%s", err)
return fmt.Errorf("CRIU feature check failed")
}
logrus.Debugf("Feature check says: %s", criuFeatures)
missingFeatures := false
// The outer if checks if the fields actually exist
if (criuFeat.MemTrack != nil) &&
(criuFeatures.MemTrack != nil) {
// The inner if checks if they are set to true
if *criuFeat.MemTrack && !*criuFeatures.MemTrack {
missingFeatures = true
logrus.Debugf("CRIU does not support MemTrack")
}
}
// This needs to be repeated for every new feature check.
// Is there a way to put this in a function. Reflection?
if (criuFeat.LazyPages != nil) &&
(criuFeatures.LazyPages != nil) {
if *criuFeat.LazyPages && !*criuFeatures.LazyPages {
missingFeatures = true
logrus.Debugf("CRIU does not support LazyPages")
}
}
if missingFeatures {
return fmt.Errorf("CRIU is missing features")
}
return nil
}
func parseCriuVersion(path string) (int, error) {
var x, y, z int
out, err := exec.Command(path, "-V").Output()
if err != nil {
return 0, fmt.Errorf("Unable to execute CRIU command: %s", path)
}
x = 0
y = 0
z = 0
if ep := strings.Index(string(out), "-"); ep >= 0 {
// criu Git version format
var version string
if sp := strings.Index(string(out), "GitID"); sp > 0 {
version = string(out)[sp:ep]
} else {
return 0, fmt.Errorf("Unable to parse the CRIU version: %s", path)
}
n, err := fmt.Sscanf(version, "GitID: v%d.%d.%d", &x, &y, &z) // 1.5.2
if err != nil {
n, err = fmt.Sscanf(version, "GitID: v%d.%d", &x, &y) // 1.6
y++
} else {
z++
}
if n < 2 || err != nil {
return 0, fmt.Errorf("Unable to parse the CRIU version: %s %d %s", version, n, err)
}
} else {
// criu release version format
n, err := fmt.Sscanf(string(out), "Version: %d.%d.%d\n", &x, &y, &z) // 1.5.2
if err != nil {
n, err = fmt.Sscanf(string(out), "Version: %d.%d\n", &x, &y) // 1.6
}
if n < 2 || err != nil {
return 0, fmt.Errorf("Unable to parse the CRIU version: %s %d %s", out, n, err)
}
}
return x*10000 + y*100 + z, nil
}
func compareCriuVersion(criuVersion int, minVersion int) error {
// simple function to perform the actual version compare
if criuVersion < minVersion {
return fmt.Errorf("CRIU version %d must be %d or higher", criuVersion, minVersion)
}
return nil
}
// This is used to store the result of criu version RPC
var criuVersionRPC *criurpc.CriuVersion
// checkCriuVersion checks Criu version greater than or equal to minVersion
func (c *linuxContainer) checkCriuVersion(minVersion int) error {
// If the version of criu has already been determined there is no need
// to ask criu for the version again. Use the value from c.criuVersion.
if c.criuVersion != 0 {
return compareCriuVersion(c.criuVersion, minVersion)
}
// First try if this version of CRIU support the version RPC.
// The CRIU version RPC was introduced with CRIU 3.0.
// First, reset the variable for the RPC answer to nil
criuVersionRPC = nil
var t criurpc.CriuReqType
t = criurpc.CriuReqType_VERSION
req := &criurpc.CriuReq{
Type: &t,
}
err := c.criuSwrk(nil, req, nil, false, nil)
if err != nil {
return fmt.Errorf("CRIU version check failed: %s", err)
}
if criuVersionRPC != nil {
logrus.Debugf("CRIU version: %s", criuVersionRPC)
// major and minor are always set
c.criuVersion = int(*criuVersionRPC.Major) * 10000
c.criuVersion += int(*criuVersionRPC.Minor) * 100
if criuVersionRPC.Sublevel != nil {
c.criuVersion += int(*criuVersionRPC.Sublevel)
}
if criuVersionRPC.Gitid != nil {
// runc's convention is that a CRIU git release is
// always the same as increasing the minor by 1
c.criuVersion -= (c.criuVersion % 100)
c.criuVersion += 100
}
return compareCriuVersion(c.criuVersion, minVersion)
}
// This is CRIU without the version RPC and therefore
// older than 3.0. Parsing the output is required.
// This can be remove once runc does not work with criu older than 3.0
c.criuVersion, err = parseCriuVersion(c.criuPath)
if err != nil {
return err
}
return compareCriuVersion(c.criuVersion, minVersion)
}
const descriptorsFilename = "descriptors.json"
func (c *linuxContainer) addCriuDumpMount(req *criurpc.CriuReq, m *configs.Mount) {
mountDest := m.Destination
if strings.HasPrefix(mountDest, c.config.Rootfs) {
mountDest = mountDest[len(c.config.Rootfs):]
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(mountDest),
Val: proto.String(mountDest),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
func (c *linuxContainer) addMaskPaths(req *criurpc.CriuReq) error {
for _, path := range c.config.MaskPaths {
fi, err := os.Stat(fmt.Sprintf("/proc/%d/root/%s", c.initProcess.pid(), path))
if err != nil {
if os.IsNotExist(err) {
continue
}
return err
}
if fi.IsDir() {
continue
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(path),
Val: proto.String("/dev/null"),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
return nil
}
func waitForCriuLazyServer(r *os.File, status string) error {
data := make([]byte, 1)
_, err := r.Read(data)
if err != nil {
return err
}
fd, err := os.OpenFile(status, os.O_TRUNC|os.O_WRONLY, os.ModeAppend)
if err != nil {
return err
}
_, err = fd.Write(data)
if err != nil {
return err
}
fd.Close()
return nil
}
func (c *linuxContainer) handleCriuConfigurationFile(rpcOpts *criurpc.CriuOpts) {
// CRIU will evaluate a configuration starting with release 3.11.
// Settings in the configuration file will overwrite RPC settings.
// Look for annotations. The annotation 'org.criu.config'
// specifies if CRIU should use a different, container specific
// configuration file.
_, annotations := utils.Annotations(c.config.Labels)
configFile, exists := annotations["org.criu.config"]
if exists {
// If the annotation 'org.criu.config' exists and is set
// to a non-empty string, tell CRIU to use that as a
// configuration file. If the file does not exist, CRIU
// will just ignore it.
if configFile != "" {
rpcOpts.ConfigFile = proto.String(configFile)
}
// If 'org.criu.config' exists and is set to an empty
// string, a runc specific CRIU configuration file will
// be not set at all.
} else {
// If the mentioned annotation has not been found, specify
// a default CRIU configuration file.
rpcOpts.ConfigFile = proto.String("/etc/criu/runc.conf")
}
}
func (c *linuxContainer) Checkpoint(criuOpts *CriuOpts) error {
c.m.Lock()
defer c.m.Unlock()
// Checkpoint is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS().
// (CLI prints a warning)
// TODO(avagin): Figure out how to make this work nicely. CRIU 2.0 has
// support for doing unprivileged dumps, but the setup of
// rootless containers might make this complicated.
// criu 1.5.2 => 10502
if err := c.checkCriuVersion(10502); err != nil {
return err
}
if criuOpts.ImagesDirectory == "" {
return fmt.Errorf("invalid directory to save checkpoint")
}
// Since a container can be C/R'ed multiple times,
// the checkpoint directory may already exist.
if err := os.Mkdir(criuOpts.ImagesDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
if criuOpts.WorkDirectory == "" {
criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
}
if err := os.Mkdir(criuOpts.WorkDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
workDir, err := os.Open(criuOpts.WorkDirectory)
if err != nil {
return err
}
defer workDir.Close()
imageDir, err := os.Open(criuOpts.ImagesDirectory)
if err != nil {
return err
}
defer imageDir.Close()
rpcOpts := criurpc.CriuOpts{
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
WorkDirFd: proto.Int32(int32(workDir.Fd())),
LogLevel: proto.Int32(4),
LogFile: proto.String("dump.log"),
Root: proto.String(c.config.Rootfs),
ManageCgroups: proto.Bool(true),
NotifyScripts: proto.Bool(true),
Pid: proto.Int32(int32(c.initProcess.pid())),
ShellJob: proto.Bool(criuOpts.ShellJob),
LeaveRunning: proto.Bool(criuOpts.LeaveRunning),
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
FileLocks: proto.Bool(criuOpts.FileLocks),
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
OrphanPtsMaster: proto.Bool(true),
AutoDedup: proto.Bool(criuOpts.AutoDedup),
LazyPages: proto.Bool(criuOpts.LazyPages),
}
c.handleCriuConfigurationFile(&rpcOpts)
// If the container is running in a network namespace and has
// a path to the network namespace configured, we will dump
// that network namespace as an external namespace and we
// will expect that the namespace exists during restore.
// This basically means that CRIU will ignore the namespace
// and expect to be setup correctly.
nsPath := c.config.Namespaces.PathOf(configs.NEWNET)
if nsPath != "" {
// For this to work we need at least criu 3.11.0 => 31100.
// As there was already a successful version check we will
// not error out if it fails. runc will just behave as it used
// to do and ignore external network namespaces.
err := c.checkCriuVersion(31100)
if err == nil {
// CRIU expects the information about an external namespace
// like this: --external net[<inode>]:<key>
// This <key> is always 'extRootNetNS'.
var netns syscall.Stat_t
err = syscall.Stat(nsPath, &netns)
if err != nil {
return err
}
criuExternal := fmt.Sprintf("net[%d]:extRootNetNS", netns.Ino)
rpcOpts.External = append(rpcOpts.External, criuExternal)
}
}
if !cgroups.IsCgroup2UnifiedMode() && c.checkCriuVersion(31400) == nil {
// CRIU currently cannot handle the v2 freezer correctly
// before release 3.14. For older releases we are telling
// CRIU to not use the cgroup v2 freezer. CRIU will pause
// each process manually using ptrace().
if fcg := c.cgroupManager.GetPaths()["freezer"]; fcg != "" {
rpcOpts.FreezeCgroup = proto.String(fcg)
}
}
// append optional criu opts, e.g., page-server and port
if criuOpts.PageServer.Address != "" && criuOpts.PageServer.Port != 0 {
rpcOpts.Ps = &criurpc.CriuPageServerInfo{
Address: proto.String(criuOpts.PageServer.Address),
Port: proto.Int32(criuOpts.PageServer.Port),
}
}
//pre-dump may need parentImage param to complete iterative migration
if criuOpts.ParentImage != "" {
rpcOpts.ParentImg = proto.String(criuOpts.ParentImage)
rpcOpts.TrackMem = proto.Bool(true)
}
// append optional manage cgroups mode
if criuOpts.ManageCgroupsMode != 0 {
// criu 1.7 => 10700
if err := c.checkCriuVersion(10700); err != nil {
return err
}
mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
rpcOpts.ManageCgroupsMode = &mode
}
var t criurpc.CriuReqType
if criuOpts.PreDump {
feat := criurpc.CriuFeatures{
MemTrack: proto.Bool(true),
}
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
return err
}
t = criurpc.CriuReqType_PRE_DUMP
} else {
t = criurpc.CriuReqType_DUMP
}
req := &criurpc.CriuReq{
Type: &t,
Opts: &rpcOpts,
}
if criuOpts.LazyPages {
// lazy migration requested; check if criu supports it
feat := criurpc.CriuFeatures{
LazyPages: proto.Bool(true),
}
if err := c.checkCriuFeatures(criuOpts, &rpcOpts, &feat); err != nil {
return err
}
statusRead, statusWrite, err := os.Pipe()
if err != nil {
return err
}
rpcOpts.StatusFd = proto.Int32(int32(statusWrite.Fd()))
go waitForCriuLazyServer(statusRead, criuOpts.StatusFd)
}
//no need to dump these information in pre-dump
if !criuOpts.PreDump {
for _, m := range c.config.Mounts {
switch m.Device {
case "bind":
c.addCriuDumpMount(req, m)
case "cgroup":
if cgroups.IsCgroup2UnifiedMode() {
c.addCriuDumpMount(req, m)
continue
}
// cgroup v1
binds, err := getCgroupMounts(m)
if err != nil {
return err
}
for _, b := range binds {
c.addCriuDumpMount(req, b)
}
}
}
if err := c.addMaskPaths(req); err != nil {
return err
}
for _, node := range c.config.Devices {
m := &configs.Mount{Destination: node.Path, Source: node.Path}
c.addCriuDumpMount(req, m)
}
// Write the FD info to a file in the image directory
fdsJSON, err := json.Marshal(c.initProcess.externalDescriptors())
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename), fdsJSON, 0600)
if err != nil {
return err
}
}
err = c.criuSwrk(nil, req, criuOpts, false, nil)
if err != nil {
return err
}
return nil
}
func (c *linuxContainer) addCriuRestoreMount(req *criurpc.CriuReq, m *configs.Mount) {
mountDest := m.Destination
if strings.HasPrefix(mountDest, c.config.Rootfs) {
mountDest = mountDest[len(c.config.Rootfs):]
}
extMnt := &criurpc.ExtMountMap{
Key: proto.String(mountDest),
Val: proto.String(m.Source),
}
req.Opts.ExtMnt = append(req.Opts.ExtMnt, extMnt)
}
func (c *linuxContainer) restoreNetwork(req *criurpc.CriuReq, criuOpts *CriuOpts) {
for _, iface := range c.config.Networks {
switch iface.Type {
case "veth":
veth := new(criurpc.CriuVethPair)
veth.IfOut = proto.String(iface.HostInterfaceName)
veth.IfIn = proto.String(iface.Name)
req.Opts.Veths = append(req.Opts.Veths, veth)
case "loopback":
// Do nothing
}
}
for _, i := range criuOpts.VethPairs {
veth := new(criurpc.CriuVethPair)
veth.IfOut = proto.String(i.HostInterfaceName)
veth.IfIn = proto.String(i.ContainerInterfaceName)
req.Opts.Veths = append(req.Opts.Veths, veth)
}
}
// makeCriuRestoreMountpoints makes the actual mountpoints for the
// restore using CRIU. This function is inspired from the code in
// rootfs_linux.go
func (c *linuxContainer) makeCriuRestoreMountpoints(m *configs.Mount) error {
switch m.Device {
case "cgroup":
// Do nothing for cgroup, CRIU should handle it
case "bind":
// The prepareBindMount() function checks if source
// exists. So it cannot be used for other filesystem types.
if err := prepareBindMount(m, c.config.Rootfs); err != nil {
return err
}
default:
// for all other file-systems just create the mountpoints
dest, err := securejoin.SecureJoin(c.config.Rootfs, m.Destination)
if err != nil {
return err
}
if err := checkProcMount(c.config.Rootfs, dest, ""); err != nil {
return err
}
m.Destination = dest
if err := os.MkdirAll(dest, 0755); err != nil {
return err
}
}
return nil
}
// isPathInPrefixList is a small function for CRIU restore to make sure
// mountpoints, which are on a tmpfs, are not created in the roofs
func isPathInPrefixList(path string, prefix []string) bool {
for _, p := range prefix {
if strings.HasPrefix(path, p+"/") {
return true
}
}
return false
}
// prepareCriuRestoreMounts tries to set up the rootfs of the
// container to be restored in the same way runc does it for
// initial container creation. Even for a read-only rootfs container
// runc modifies the rootfs to add mountpoints which do not exist.
// This function also creates missing mountpoints as long as they
// are not on top of a tmpfs, as CRIU will restore tmpfs content anyway.
func (c *linuxContainer) prepareCriuRestoreMounts(mounts []*configs.Mount) error {
// First get a list of a all tmpfs mounts
tmpfs := []string{}
for _, m := range mounts {
switch m.Device {
case "tmpfs":
tmpfs = append(tmpfs, m.Destination)
}
}
// Now go through all mounts and create the mountpoints
// if the mountpoints are not on a tmpfs, as CRIU will
// restore the complete tmpfs content from its checkpoint.
for _, m := range mounts {
if !isPathInPrefixList(m.Destination, tmpfs) {
if err := c.makeCriuRestoreMountpoints(m); err != nil {
return err
}
}
}
return nil
}
func (c *linuxContainer) Restore(process *Process, criuOpts *CriuOpts) error {
c.m.Lock()
defer c.m.Unlock()
var extraFiles []*os.File
// Restore is unlikely to work if os.Geteuid() != 0 || system.RunningInUserNS().
// (CLI prints a warning)
// TODO(avagin): Figure out how to make this work nicely. CRIU doesn't have
// support for unprivileged restore at the moment.
// criu 1.5.2 => 10502
if err := c.checkCriuVersion(10502); err != nil {
return err
}
if criuOpts.WorkDirectory == "" {
criuOpts.WorkDirectory = filepath.Join(c.root, "criu.work")
}
// Since a container can be C/R'ed multiple times,
// the work directory may already exist.
if err := os.Mkdir(criuOpts.WorkDirectory, 0700); err != nil && !os.IsExist(err) {
return err
}
workDir, err := os.Open(criuOpts.WorkDirectory)
if err != nil {
return err
}
defer workDir.Close()
if criuOpts.ImagesDirectory == "" {
return fmt.Errorf("invalid directory to restore checkpoint")
}
imageDir, err := os.Open(criuOpts.ImagesDirectory)
if err != nil {
return err
}
defer imageDir.Close()
// CRIU has a few requirements for a root directory:
// * it must be a mount point
// * its parent must not be overmounted
// c.config.Rootfs is bind-mounted to a temporary directory
// to satisfy these requirements.
root := filepath.Join(c.root, "criu-root")
if err := os.Mkdir(root, 0755); err != nil {
return err
}
defer os.Remove(root)
root, err = filepath.EvalSymlinks(root)
if err != nil {
return err
}
err = unix.Mount(c.config.Rootfs, root, "", unix.MS_BIND|unix.MS_REC, "")
if err != nil {
return err
}
defer unix.Unmount(root, unix.MNT_DETACH)
t := criurpc.CriuReqType_RESTORE
req := &criurpc.CriuReq{
Type: &t,
Opts: &criurpc.CriuOpts{
ImagesDirFd: proto.Int32(int32(imageDir.Fd())),
WorkDirFd: proto.Int32(int32(workDir.Fd())),
EvasiveDevices: proto.Bool(true),
LogLevel: proto.Int32(4),
LogFile: proto.String("restore.log"),
RstSibling: proto.Bool(true),
Root: proto.String(root),
ManageCgroups: proto.Bool(true),
NotifyScripts: proto.Bool(true),
ShellJob: proto.Bool(criuOpts.ShellJob),
ExtUnixSk: proto.Bool(criuOpts.ExternalUnixConnections),
TcpEstablished: proto.Bool(criuOpts.TcpEstablished),
FileLocks: proto.Bool(criuOpts.FileLocks),
EmptyNs: proto.Uint32(criuOpts.EmptyNs),
OrphanPtsMaster: proto.Bool(true),
AutoDedup: proto.Bool(criuOpts.AutoDedup),
LazyPages: proto.Bool(criuOpts.LazyPages),
},
}
c.handleCriuConfigurationFile(req.Opts)
// Same as during checkpointing. If the container has a specific network namespace
// assigned to it, this now expects that the checkpoint will be restored in a
// already created network namespace.
nsPath := c.config.Namespaces.PathOf(configs.NEWNET)
if nsPath != "" {
// For this to work we need at least criu 3.11.0 => 31100.
// As there was already a successful version check we will
// not error out if it fails. runc will just behave as it used
// to do and ignore external network namespaces.
err := c.checkCriuVersion(31100)
if err == nil {
// CRIU wants the information about an existing network namespace
// like this: --inherit-fd fd[<fd>]:<key>
// The <key> needs to be the same as during checkpointing.
// We are always using 'extRootNetNS' as the key in this.
netns, err := os.Open(nsPath)
defer netns.Close()
if err != nil {
logrus.Errorf("If a specific network namespace is defined it must exist: %s", err)
return fmt.Errorf("Requested network namespace %v does not exist", nsPath)
}
inheritFd := new(criurpc.InheritFd)
inheritFd.Key = proto.String("extRootNetNS")
// The offset of four is necessary because 0, 1, 2 and 3 is already
// used by stdin, stdout, stderr, 'criu swrk' socket.
inheritFd.Fd = proto.Int32(int32(4 + len(extraFiles)))
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
// All open FDs need to be transferred to CRIU via extraFiles
extraFiles = append(extraFiles, netns)
}
}
// This will modify the rootfs of the container in the same way runc
// modifies the container during initial creation.
if err := c.prepareCriuRestoreMounts(c.config.Mounts); err != nil {
return err
}
for _, m := range c.config.Mounts {
switch m.Device {
case "bind":
c.addCriuRestoreMount(req, m)
case "cgroup":
if cgroups.IsCgroup2UnifiedMode() {
c.addCriuRestoreMount(req, m)
continue
}
// cgroup v1
binds, err := getCgroupMounts(m)
if err != nil {
return err
}
for _, b := range binds {
c.addCriuRestoreMount(req, b)
}
}
}
if len(c.config.MaskPaths) > 0 {
m := &configs.Mount{Destination: "/dev/null", Source: "/dev/null"}
c.addCriuRestoreMount(req, m)
}
for _, node := range c.config.Devices {
m := &configs.Mount{Destination: node.Path, Source: node.Path}
c.addCriuRestoreMount(req, m)
}
if criuOpts.EmptyNs&unix.CLONE_NEWNET == 0 {
c.restoreNetwork(req, criuOpts)
}
// append optional manage cgroups mode
if criuOpts.ManageCgroupsMode != 0 {
// criu 1.7 => 10700
if err := c.checkCriuVersion(10700); err != nil {
return err
}
mode := criurpc.CriuCgMode(criuOpts.ManageCgroupsMode)
req.Opts.ManageCgroupsMode = &mode
}
var (
fds []string
fdJSON []byte
)
if fdJSON, err = ioutil.ReadFile(filepath.Join(criuOpts.ImagesDirectory, descriptorsFilename)); err != nil {
return err
}
if err := json.Unmarshal(fdJSON, &fds); err != nil {
return err
}
for i := range fds {
if s := fds[i]; strings.Contains(s, "pipe:") {
inheritFd := new(criurpc.InheritFd)
inheritFd.Key = proto.String(s)
inheritFd.Fd = proto.Int32(int32(i))
req.Opts.InheritFd = append(req.Opts.InheritFd, inheritFd)
}
}
return c.criuSwrk(process, req, criuOpts, true, extraFiles)
}
func (c *linuxContainer) criuApplyCgroups(pid int, req *criurpc.CriuReq) error {
// XXX: Do we need to deal with this case? AFAIK criu still requires root.
if err := c.cgroupManager.Apply(pid); err != nil {
return err
}
if err := c.cgroupManager.Set(c.config); err != nil {
return newSystemError(err)
}
path := fmt.Sprintf("/proc/%d/cgroup", pid)
cgroupsPaths, err := cgroups.ParseCgroupFile(path)
if err != nil {
return err
}
for c, p := range cgroupsPaths {
cgroupRoot := &criurpc.CgroupRoot{
Ctrl: proto.String(c),
Path: proto.String(p),
}
req.Opts.CgRoot = append(req.Opts.CgRoot, cgroupRoot)
}
return nil
}
func (c *linuxContainer) criuSwrk(process *Process, req *criurpc.CriuReq, opts *CriuOpts, applyCgroups bool, extraFiles []*os.File) error {
fds, err := unix.Socketpair(unix.AF_LOCAL, unix.SOCK_SEQPACKET|unix.SOCK_CLOEXEC, 0)
if err != nil {
return err
}
var logPath string
if opts != nil {
logPath = filepath.Join(opts.WorkDirectory, req.GetOpts().GetLogFile())
} else {
// For the VERSION RPC 'opts' is set to 'nil' and therefore
// opts.WorkDirectory does not exist. Set logPath to "".
logPath = ""
}
criuClient := os.NewFile(uintptr(fds[0]), "criu-transport-client")
criuClientFileCon, err := net.FileConn(criuClient)
criuClient.Close()
if err != nil {
return err
}
criuClientCon := criuClientFileCon.(*net.UnixConn)
defer criuClientCon.Close()
criuServer := os.NewFile(uintptr(fds[1]), "criu-transport-server")
defer criuServer.Close()
args := []string{"swrk", "3"}
if c.criuVersion != 0 {
// If the CRIU Version is still '0' then this is probably
// the initial CRIU run to detect the version. Skip it.
logrus.Debugf("Using CRIU %d at: %s", c.criuVersion, c.criuPath)
}
logrus.Debugf("Using CRIU with following args: %s", args)
cmd := exec.Command(c.criuPath, args...)
if process != nil {
cmd.Stdin = process.Stdin
cmd.Stdout = process.Stdout
cmd.Stderr = process.Stderr
}
cmd.ExtraFiles = append(cmd.ExtraFiles, criuServer)
if extraFiles != nil {
cmd.ExtraFiles = append(cmd.ExtraFiles, extraFiles...)
}
if err := cmd.Start(); err != nil {
return err
}
// cmd.Process will be replaced by a restored init.
criuProcess := cmd.Process
defer func() {
criuClientCon.Close()
_, err := criuProcess.Wait()
if err != nil {
return
}
}()
if applyCgroups {
err := c.criuApplyCgroups(criuProcess.Pid, req)
if err != nil {
return err
}
}
var extFds []string
if process != nil {
extFds, err = getPipeFds(criuProcess.Pid)
if err != nil {
return err
}
}
logrus.Debugf("Using CRIU in %s mode", req.GetType().String())
// In the case of criurpc.CriuReqType_FEATURE_CHECK req.GetOpts()
// should be empty. For older CRIU versions it still will be
// available but empty. criurpc.CriuReqType_VERSION actually
// has no req.GetOpts().
if !(req.GetType() == criurpc.CriuReqType_FEATURE_CHECK ||
req.GetType() == criurpc.CriuReqType_VERSION) {
val := reflect.ValueOf(req.GetOpts())
v := reflect.Indirect(val)
for i := 0; i < v.NumField(); i++ {
st := v.Type()
name := st.Field(i).Name
if strings.HasPrefix(name, "XXX_") {
continue
}
value := val.MethodByName("Get" + name).Call([]reflect.Value{})
logrus.Debugf("CRIU option %s with value %v", name, value[0])
}
}
data, err := proto.Marshal(req)
if err != nil {
return err
}
_, err = criuClientCon.Write(data)
if err != nil {
return err
}
buf := make([]byte, 10*4096)
oob := make([]byte, 4096)
for true {
n, oobn, _, _, err := criuClientCon.ReadMsgUnix(buf, oob)
if err != nil {
return err
}
if n == 0 {
return fmt.Errorf("unexpected EOF")
}
if n == len(buf) {
return fmt.Errorf("buffer is too small")
}
resp := new(criurpc.CriuResp)
err = proto.Unmarshal(buf[:n], resp)
if err != nil {
return err
}
if !resp.GetSuccess() {
typeString := req.GetType().String()
if typeString == "VERSION" {
// If the VERSION RPC fails this probably means that the CRIU
// version is too old for this RPC. Just return 'nil'.
return nil
}
return fmt.Errorf("criu failed: type %s errno %d\nlog file: %s", typeString, resp.GetCrErrno(), logPath)
}
t := resp.GetType()
switch {
case t == criurpc.CriuReqType_VERSION:
logrus.Debugf("CRIU version: %s", resp)
criuVersionRPC = resp.GetVersion()
break
case t == criurpc.CriuReqType_FEATURE_CHECK:
logrus.Debugf("Feature check says: %s", resp)
criuFeatures = resp.GetFeatures()
case t == criurpc.CriuReqType_NOTIFY:
if err := c.criuNotifications(resp, process, cmd, opts, extFds, oob[:oobn]); err != nil {
return err
}
t = criurpc.CriuReqType_NOTIFY
req = &criurpc.CriuReq{
Type: &t,
NotifySuccess: proto.Bool(true),
}
data, err = proto.Marshal(req)
if err != nil {
return err
}
_, err = criuClientCon.Write(data)
if err != nil {
return err
}
continue
case t == criurpc.CriuReqType_RESTORE:
case t == criurpc.CriuReqType_DUMP:
case t == criurpc.CriuReqType_PRE_DUMP:
default:
return fmt.Errorf("unable to parse the response %s", resp.String())
}
break
}
criuClientCon.CloseWrite()
// cmd.Wait() waits cmd.goroutines which are used for proxying file descriptors.
// Here we want to wait only the CRIU process.
st, err := criuProcess.Wait()
if err != nil {
return err
}
// In pre-dump mode CRIU is in a loop and waits for
// the final DUMP command.
// The current runc pre-dump approach, however, is
// start criu in PRE_DUMP once for a single pre-dump
// and not the whole series of pre-dump, pre-dump, ...m, dump
// If we got the message CriuReqType_PRE_DUMP it means
// CRIU was successful and we need to forcefully stop CRIU
if !st.Success() && *req.Type != criurpc.CriuReqType_PRE_DUMP {
return fmt.Errorf("criu failed: %s\nlog file: %s", st.String(), logPath)
}
return nil
}
// block any external network activity
func lockNetwork(config *configs.Config) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err := strategy.detach(config); err != nil {
return err
}
}
return nil
}
func unlockNetwork(config *configs.Config) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err = strategy.attach(config); err != nil {
return err
}
}
return nil
}
func (c *linuxContainer) criuNotifications(resp *criurpc.CriuResp, process *Process, cmd *exec.Cmd, opts *CriuOpts, fds []string, oob []byte) error {
notify := resp.GetNotify()
if notify == nil {
return fmt.Errorf("invalid response: %s", resp.String())
}
logrus.Debugf("notify: %s\n", notify.GetScript())
switch {
case notify.GetScript() == "post-dump":
f, err := os.Create(filepath.Join(c.root, "checkpoint"))
if err != nil {
return err
}
f.Close()
case notify.GetScript() == "network-unlock":
if err := unlockNetwork(c.config); err != nil {
return err
}
case notify.GetScript() == "network-lock":
if err := lockNetwork(c.config); err != nil {
return err
}
case notify.GetScript() == "setup-namespaces":
if c.config.Hooks != nil {
s, err := c.currentOCIState()
if err != nil {
return nil
}
s.Pid = int(notify.GetPid())
for i, hook := range c.config.Hooks.Prestart {
if err := hook.Run(s); err != nil {
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
}
}
}
case notify.GetScript() == "post-restore":
pid := notify.GetPid()
p, err := os.FindProcess(int(pid))
if err != nil {
return err
}
cmd.Process = p
r, err := newRestoredProcess(cmd, fds)
if err != nil {
return err
}
process.ops = r
if err := c.state.transition(&restoredState{
imageDir: opts.ImagesDirectory,
c: c,
}); err != nil {
return err
}
// create a timestamp indicating when the restored checkpoint was started
c.created = time.Now().UTC()
if _, err := c.updateState(r); err != nil {
return err
}
if err := os.Remove(filepath.Join(c.root, "checkpoint")); err != nil {
if !os.IsNotExist(err) {
logrus.Error(err)
}
}
case notify.GetScript() == "orphan-pts-master":
scm, err := unix.ParseSocketControlMessage(oob)
if err != nil {
return err
}
fds, err := unix.ParseUnixRights(&scm[0])
if err != nil {
return err
}
master := os.NewFile(uintptr(fds[0]), "orphan-pts-master")
defer master.Close()
// While we can access console.master, using the API is a good idea.
if err := utils.SendFd(process.ConsoleSocket, master.Name(), master.Fd()); err != nil {
return err
}
}
return nil
}
func (c *linuxContainer) updateState(process parentProcess) (*State, error) {
if process != nil {
c.initProcess = process
}
state, err := c.currentState()
if err != nil {
return nil, err
}
err = c.saveState(state)
if err != nil {
return nil, err
}
return state, nil
}
func (c *linuxContainer) saveState(s *State) error {
f, err := os.Create(filepath.Join(c.root, stateFilename))
if err != nil {
return err
}
defer f.Close()
return utils.WriteJSON(f, s)
}
func (c *linuxContainer) deleteState() error {
return os.Remove(filepath.Join(c.root, stateFilename))
}
func (c *linuxContainer) currentStatus() (Status, error) {
if err := c.refreshState(); err != nil {
return -1, err
}
return c.state.status(), nil
}
// refreshState needs to be called to verify that the current state on the
// container is what is true. Because consumers of libcontainer can use it
// out of process we need to verify the container's status based on runtime
// information and not rely on our in process info.
func (c *linuxContainer) refreshState() error {
paused, err := c.isPaused()
if err != nil {
return err
}
if paused {
return c.state.transition(&pausedState{c: c})
}
t := c.runType()
switch t {
case Created:
return c.state.transition(&createdState{c: c})
case Running:
return c.state.transition(&runningState{c: c})
}
return c.state.transition(&stoppedState{c: c})
}
func (c *linuxContainer) runType() Status {
if c.initProcess == nil {
return Stopped
}
pid := c.initProcess.pid()
stat, err := system.Stat(pid)
if err != nil {
return Stopped
}
if stat.StartTime != c.initProcessStartTime || stat.State == system.Zombie || stat.State == system.Dead {
return Stopped
}
// We'll create exec fifo and blocking on it after container is created,
// and delete it after start container.
if _, err := os.Stat(filepath.Join(c.root, execFifoFilename)); err == nil {
return Created
}
return Running
}
func (c *linuxContainer) isPaused() (bool, error) {
var fcg, filename, pausedState string
if !cgroups.IsCgroup2UnifiedMode() {
fcg = c.cgroupManager.GetPaths()["freezer"]
if fcg == "" {
// A container doesn't have a freezer cgroup
return false, nil
}
filename = "freezer.state"
pausedState = "FROZEN"
} else {
var err error
fcg, err = c.cgroupManager.GetUnifiedPath()
if err != nil {
// should not happen
return false, err
}
filename = "cgroup.freeze"
pausedState = "1"
}
data, err := ioutil.ReadFile(filepath.Join(fcg, filename))
if err != nil {
// If freezer cgroup is not mounted, the container would just be not paused.
if os.IsNotExist(err) || errors.Is(err, syscall.ENODEV) {
return false, nil
}
return false, newSystemErrorWithCause(err, "checking if container is paused")
}
return bytes.Equal(bytes.TrimSpace(data), []byte(pausedState)), nil
}
func (c *linuxContainer) currentState() (*State, error) {
var (
startTime uint64
externalDescriptors []string
pid = -1
)
if c.initProcess != nil {
pid = c.initProcess.pid()
startTime, _ = c.initProcess.startTime()
externalDescriptors = c.initProcess.externalDescriptors()
}
intelRdtPath, err := intelrdt.GetIntelRdtPath(c.ID())
if err != nil {
intelRdtPath = ""
}
state := &State{
BaseState: BaseState{
ID: c.ID(),
Config: *c.config,
InitProcessPid: pid,
InitProcessStartTime: startTime,
Created: c.created,
},
Rootless: c.config.RootlessEUID && c.config.RootlessCgroups,
CgroupPaths: c.cgroupManager.GetPaths(),
IntelRdtPath: intelRdtPath,
NamespacePaths: make(map[configs.NamespaceType]string),
ExternalDescriptors: externalDescriptors,
}
if pid > 0 {
for _, ns := range c.config.Namespaces {
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
for _, nsType := range configs.NamespaceTypes() {
if !configs.IsNamespaceSupported(nsType) {
continue
}
if _, ok := state.NamespacePaths[nsType]; !ok {
ns := configs.Namespace{Type: nsType}
state.NamespacePaths[ns.Type] = ns.GetPath(pid)
}
}
}
return state, nil
}
func (c *linuxContainer) currentOCIState() (*specs.State, error) {
bundle, annotations := utils.Annotations(c.config.Labels)
state := &specs.State{
Version: specs.Version,
ID: c.ID(),
Bundle: bundle,
Annotations: annotations,
}
status, err := c.currentStatus()
if err != nil {
return nil, err
}
state.Status = status.String()
if status != Stopped {
if c.initProcess != nil {
state.Pid = c.initProcess.pid()
}
}
return state, nil
}
// orderNamespacePaths sorts namespace paths into a list of paths that we
// can setns in order.
func (c *linuxContainer) orderNamespacePaths(namespaces map[configs.NamespaceType]string) ([]string, error) {
paths := []string{}
for _, ns := range configs.NamespaceTypes() {
// Remove namespaces that we don't need to join.
if !c.config.Namespaces.Contains(ns) {
continue
}
if p, ok := namespaces[ns]; ok && p != "" {
// check if the requested namespace is supported
if !configs.IsNamespaceSupported(ns) {
return nil, newSystemError(fmt.Errorf("namespace %s is not supported", ns))
}
// only set to join this namespace if it exists
if _, err := os.Lstat(p); err != nil {
return nil, newSystemErrorWithCausef(err, "running lstat on namespace path %q", p)
}
// do not allow namespace path with comma as we use it to separate
// the namespace paths
if strings.ContainsRune(p, ',') {
return nil, newSystemError(fmt.Errorf("invalid path %s", p))
}
paths = append(paths, fmt.Sprintf("%s:%s", configs.NsName(ns), p))
}
}
return paths, nil
}
func encodeIDMapping(idMap []configs.IDMap) ([]byte, error) {
data := bytes.NewBuffer(nil)
for _, im := range idMap {
line := fmt.Sprintf("%d %d %d\n", im.ContainerID, im.HostID, im.Size)
if _, err := data.WriteString(line); err != nil {
return nil, err
}
}
return data.Bytes(), nil
}
// bootstrapData encodes the necessary data in netlink binary format
// as a io.Reader.
// Consumer can write the data to a bootstrap program
// such as one that uses nsenter package to bootstrap the container's
// init process correctly, i.e. with correct namespaces, uid/gid
// mapping etc.
func (c *linuxContainer) bootstrapData(cloneFlags uintptr, nsMaps map[configs.NamespaceType]string) (io.Reader, error) {
// create the netlink message
r := nl.NewNetlinkRequest(int(InitMsg), 0)
// write cloneFlags
r.AddData(&Int32msg{
Type: CloneFlagsAttr,
Value: uint32(cloneFlags),
})
// write custom namespace paths
if len(nsMaps) > 0 {
nsPaths, err := c.orderNamespacePaths(nsMaps)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: NsPathsAttr,
Value: []byte(strings.Join(nsPaths, ",")),
})
}
// write namespace paths only when we are not joining an existing user ns
_, joinExistingUser := nsMaps[configs.NEWUSER]
if !joinExistingUser {
// write uid mappings
if len(c.config.UidMappings) > 0 {
if c.config.RootlessEUID && c.newuidmapPath != "" {
r.AddData(&Bytemsg{
Type: UidmapPathAttr,
Value: []byte(c.newuidmapPath),
})
}
b, err := encodeIDMapping(c.config.UidMappings)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: UidmapAttr,
Value: b,
})
}
// write gid mappings
if len(c.config.GidMappings) > 0 {
b, err := encodeIDMapping(c.config.GidMappings)
if err != nil {
return nil, err
}
r.AddData(&Bytemsg{
Type: GidmapAttr,
Value: b,
})
if c.config.RootlessEUID && c.newgidmapPath != "" {
r.AddData(&Bytemsg{
Type: GidmapPathAttr,
Value: []byte(c.newgidmapPath),
})
}
if requiresRootOrMappingTool(c.config) {
r.AddData(&Boolmsg{
Type: SetgroupAttr,
Value: true,
})
}
}
}
if c.config.OomScoreAdj != nil {
// write oom_score_adj
r.AddData(&Bytemsg{
Type: OomScoreAdjAttr,
Value: []byte(fmt.Sprintf("%d", *c.config.OomScoreAdj)),
})
}
// write rootless
r.AddData(&Boolmsg{
Type: RootlessEUIDAttr,
Value: c.config.RootlessEUID,
})
return bytes.NewReader(r.Serialize()), nil
}
// ignoreTerminateErrors returns nil if the given err matches an error known
// to indicate that the terminate occurred successfully or err was nil, otherwise
// err is returned unaltered.
func ignoreTerminateErrors(err error) error {
if err == nil {
return nil
}
s := err.Error()
switch {
case strings.Contains(s, "process already finished"), strings.Contains(s, "Wait was already called"):
return nil
}
return err
}
func requiresRootOrMappingTool(c *configs.Config) bool {
gidMap := []configs.IDMap{
{ContainerID: 0, HostID: os.Getegid(), Size: 1},
}
return !reflect.DeepEqual(c.GidMappings, gidMap)
}
| 1 | 19,029 | This looks to be the only place where we return early before the close below, so if the "double close" is bothering people, closing manually here (and removing the `defer`) could be an option | opencontainers-runc | go |
@@ -91,8 +91,8 @@ gulp.task( 'default', () => {
);
} );
-gulp.task( 'qunit', function() {
- execSync( 'node-qunit-phantomjs ./tests/qunit/index.html', { stdio: [ 0, 1, 2 ] } );
+gulp.task( 'jest', function() {
+ execSync( 'npm run test:js', { stdio: [ 0, 1, 2 ] } );
} );
gulp.task( 'phpunit', function() { | 1 | /**
* Gulp config.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import gulp from 'gulp';
import requireDir from 'require-dir';
import runSequence from 'run-sequence';
import livereload from 'gulp-livereload';
import { execSync } from 'child_process';
const phpunit = require( 'gulp-phpunit' );
requireDir( './gulp-tasks' );
/**
* Gulp task to run all SVG processes in a sequential order.
*/
gulp.task( 'build', () => {
runSequence(
'webpack',
'svg',
'imagemin',
'copy-vendor'
);
} );
/**
* Gulp task to watch for file changes and run the associated processes.
*/
gulp.task( 'watch', () => {
livereload.listen( { basePath: 'dist' } );
gulp.watch( './assets/sass/**/*.scss', [ 'build' ] );
gulp.watch( './assets/svg/**/*.svg', [ 'build' ] );
gulp.watch( './assets/js/*.js', [ 'build' ] );
gulp.watch( './assets/js/modules/**/*.js', [ 'build' ] );
} );
/**
* Gulp task to livereload file changes in browser.
*/
gulp.task( 'local', () => {
runSequence(
'build',
'browser-sync'
);
} );
/**
* Gulp task to minify and combine svg's.
*/
gulp.task( 'svg', () => {
runSequence( 'svgstore' );
runSequence( 'svgmin' );
} );
/**
* Gulp task to run the default release processes in a sequential order.
*/
gulp.task( 'release', () => {
runSequence(
'svg',
'imagemin',
'copy-vendor'
);
} );
/**
* Gulp task to run the default build processes in a sequential order.
*/
gulp.task( 'default', () => {
runSequence(
'webpack',
'phpcs',
'copy-vendor'
);
} );
gulp.task( 'qunit', function() {
execSync( 'node-qunit-phantomjs ./tests/qunit/index.html', { stdio: [ 0, 1, 2 ] } );
} );
gulp.task( 'phpunit', function() {
gulp.src( '' )
.pipe( phpunit( './vendor/bin/phpunit' ) );
} );
/**
* Gulp task to run the default tests.
*/
gulp.task( 'test', () => {
runSequence(
'qunit',
'phpunit'
);
} );
| 1 | 24,765 | This task can probably be deleted since we don't need gulp to run Jest. | google-site-kit-wp | js |
@@ -23,6 +23,9 @@ require 'socket'
module Selenium
module WebDriver
class SocketPoller
+ NOT_CONNECTED_ERRORS = [Errno::ECONNREFUSED, Errno::ENOTCONN, SocketError]
+ NOT_CONNECTED_ERRORS << Errno::EPERM if Platform.cygwin?
+
def initialize(host, port, timeout = 0, interval = 0.25)
@host = host
@port = Integer(port) | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
require 'selenium/webdriver/common/platform'
require 'socket'
module Selenium
module WebDriver
class SocketPoller
def initialize(host, port, timeout = 0, interval = 0.25)
@host = host
@port = Integer(port)
@timeout = Float(timeout)
@interval = interval
end
#
# Returns true if the server is listening within the given timeout,
# false otherwise.
#
# @return [Boolean]
#
def connected?
with_timeout { listening? }
end
#
# Returns true if the server has stopped listening within the given timeout,
# false otherwise.
#
# @return [Boolean]
#
def closed?
with_timeout { !listening? }
end
private
CONNECT_TIMEOUT = 5
NOT_CONNECTED_ERRORS = [Errno::ECONNREFUSED, Errno::ENOTCONN, SocketError]
NOT_CONNECTED_ERRORS << Errno::EPERM if Platform.cygwin?
CONNECTED_ERRORS = [Errno::EISCONN]
CONNECTED_ERRORS << Errno::EINVAL if Platform.windows?
if Platform.jruby?
# we use a plain TCPSocket here since JRuby has issues select()ing on a connecting socket
# see http://jira.codehaus.org/browse/JRUBY-5165
def listening?
TCPSocket.new(@host, @port).close
true
rescue *NOT_CONNECTED_ERRORS
false
end
else
def listening?
addr = Socket.getaddrinfo(@host, @port, Socket::AF_INET, Socket::SOCK_STREAM)
sock = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0)
sockaddr = Socket.pack_sockaddr_in(@port, addr[0][3])
begin
sock.connect_nonblock sockaddr
rescue Errno::EINPROGRESS
retry if IO.select(nil, [sock], nil, CONNECT_TIMEOUT)
raise Errno::ECONNREFUSED
rescue *CONNECTED_ERRORS
# yay!
end
sock.close
true
rescue *NOT_CONNECTED_ERRORS
sock.close if sock
$stderr.puts [@host, @port].inspect if $DEBUG
false
end
end
def with_timeout
max_time = time_now + @timeout
(
return true if yield
wait
) until time_now > max_time
false
end
def wait
sleep @interval
end
# for testability
def time_now
Time.now
end
end # SocketPoller
end # WebDriver
end # Selenium
| 1 | 13,776 | Doesn't this need to include `Errno::ECONNRESET` to fix the issue? | SeleniumHQ-selenium | js |
@@ -28,6 +28,19 @@ module Faker
def quote
fetch('movie.quote')
end
+
+ ##
+ # Produces a qirector from a movie.
+ #
+ # @return [String]
+ #
+ # @example
+ # Faker::Movie.qirector #=> "Quentin Tarantino"
+ #
+ # @faker.version next
+ def director
+ fetch('movie.director')
+ end
end
end
end | 1 | # frozen_string_literal: true
module Faker
class Movie < Base
class << self
##
# Produces a title from a movie.
#
# @return [String]
#
# @example
# Faker::Movie.title #=> "The Lord of the Rings: The Two Towers"
#
# @faker.version 2.13.0
def title
fetch('movie.title')
end
##
# Produces a quote from a movie.
#
# @return [String]
#
# @example
# Faker::Movie.quote #=> "Bumble bee tuna"
#
# @faker.version 1.8.1
def quote
fetch('movie.quote')
end
end
end
end
| 1 | 10,228 | never heard of a qirector... what's their role again? (also should be 'for a movie') | faker-ruby-faker | rb |
@@ -86,6 +86,14 @@ kvstore::ResultCode QueryBoundProcessor::processVertex(PartitionID partId, Verte
}
}
vResp.set_tag_data(std::move(td));
+ } else if (tagContexts_.empty() && onlyVertexProps_) {
+ std::vector<cpp2::TagData> td;
+ auto ret = collectVertexProps(partId, vId, td);
+ if (ret != kvstore::ResultCode::ERR_KEY_NOT_FOUND
+ && ret != kvstore::ResultCode::SUCCEEDED) {
+ return ret;
+ }
+ vResp.set_tag_data(std::move(td));
}
if (onlyVertexProps_) { | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "storage/query/QueryBoundProcessor.h"
#include <algorithm>
#include "time/Duration.h"
#include "dataman/RowReader.h"
#include "dataman/RowWriter.h"
namespace nebula {
namespace storage {
kvstore::ResultCode QueryBoundProcessor::processEdgeImpl(const PartitionID partId,
const VertexID vId,
const EdgeType edgeType,
const std::vector<PropContext>& props,
FilterContext& fcontext,
cpp2::VertexData& vdata) {
RowSetWriter rsWriter;
auto ret = collectEdgeProps(
partId, vId, edgeType, props, &fcontext,
[&, this](RowReader* reader, folly::StringPiece k, const std::vector<PropContext>& p) {
RowWriter writer(rsWriter.schema());
PropsCollector collector(&writer);
this->collectProps(reader, k, p, &fcontext, &collector);
rsWriter.addRow(writer);
});
if (ret != kvstore::ResultCode::SUCCEEDED) {
return ret;
}
if (!rsWriter.data().empty()) {
vdata.edge_data.emplace_back(apache::thrift::FragileConstructor::FRAGILE, edgeType,
std::move(rsWriter.data()));
}
return ret;
}
kvstore::ResultCode QueryBoundProcessor::processEdge(PartitionID partId, VertexID vId,
FilterContext& fcontext,
cpp2::VertexData& vdata) {
for (const auto& ec : edgeContexts_) {
RowSetWriter rsWriter;
auto edgeType = ec.first;
auto& props = ec.second;
if (!props.empty()) {
CHECK(!onlyVertexProps_);
auto ret = processEdgeImpl(partId, vId, edgeType, props, fcontext, vdata);
if (ret != kvstore::ResultCode::SUCCEEDED) {
return ret;
}
}
}
return kvstore::ResultCode::SUCCEEDED;
}
kvstore::ResultCode QueryBoundProcessor::processVertex(PartitionID partId, VertexID vId) {
cpp2::VertexData vResp;
vResp.set_vertex_id(vId);
FilterContext fcontext;
if (!tagContexts_.empty()) {
std::vector<cpp2::TagData> td;
for (auto& tc : tagContexts_) {
RowWriter writer;
PropsCollector collector(&writer);
VLOG(3) << "partId " << partId << ", vId " << vId << ", tagId " << tc.tagId_
<< ", prop size " << tc.props_.size();
auto ret = collectVertexProps(partId, vId, tc.tagId_, tc.props_, &fcontext, &collector);
if (ret == kvstore::ResultCode::ERR_KEY_NOT_FOUND) {
continue;
}
if (ret != kvstore::ResultCode::SUCCEEDED) {
return ret;
}
if (writer.size() > 1) {
td.emplace_back(apache::thrift::FragileConstructor::FRAGILE,
tc.tagId_,
writer.encode());
}
}
vResp.set_tag_data(std::move(td));
}
if (onlyVertexProps_) {
std::lock_guard<std::mutex> lg(this->lock_);
vertices_.emplace_back(std::move(vResp));
return kvstore::ResultCode::SUCCEEDED;
}
kvstore::ResultCode ret;
ret = processEdge(partId, vId, fcontext, vResp);
if (ret != kvstore::ResultCode::SUCCEEDED) {
return ret;
}
if (!vResp.edge_data.empty()) {
// Only return the vertex if edges existed.
std::lock_guard<std::mutex> lg(this->lock_);
vertices_.emplace_back(std::move(vResp));
}
return kvstore::ResultCode::SUCCEEDED;
}
void QueryBoundProcessor::onProcessFinished(int32_t retNum) {
(void)retNum;
resp_.set_vertices(std::move(vertices_));
std::unordered_map<TagID, nebula::cpp2::Schema> vertexSchema;
if (!this->tagContexts_.empty()) {
for (auto& tc : this->tagContexts_) {
nebula::cpp2::Schema respTag;
for (auto& prop : tc.props_) {
if (prop.returned_) {
respTag.columns.emplace_back(
columnDef(std::move(prop.prop_.name), prop.type_.type));
}
}
if (!respTag.columns.empty()) {
auto it = vertexSchema.find(tc.tagId_);
if (it == vertexSchema.end()) {
vertexSchema.emplace(tc.tagId_, respTag);
}
}
}
if (!vertexSchema.empty()) {
resp_.set_vertex_schema(std::move(vertexSchema));
}
}
std::unordered_map<EdgeType, nebula::cpp2::Schema> edgeSchema;
if (!edgeContexts_.empty()) {
for (const auto& ec : edgeContexts_) {
nebula::cpp2::Schema respEdge;
RowSetWriter rsWriter;
auto& props = ec.second;
for (auto& p : props) {
respEdge.columns.emplace_back(columnDef(std::move(p.prop_.name), p.type_.type));
}
if (!respEdge.columns.empty()) {
auto it = edgeSchema.find(ec.first);
if (it == edgeSchema.end()) {
edgeSchema.emplace(ec.first, std::move(respEdge));
}
}
}
if (!edgeSchema.empty()) {
resp_.set_edge_schema(std::move(edgeSchema));
}
}
}
} // namespace storage
} // namespace nebula
| 1 | 26,502 | Implement the "collect all props for one vertex" logic inside QueryVertexPropsProcessor | vesoft-inc-nebula | cpp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.