Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def read_temple_config():
"""Reads the temple YAML configuration file in the repository"""
with open(temple.constants.TEMPLE_CONFIG_FILE) as temple_config_file:
return yaml.load(temple_config_file, Loader=yaml.SafeLoader) |
def write_temple_config(temple_config, template, version):
"""Writes the temple YAML configuration"""
with open(temple.constants.TEMPLE_CONFIG_FILE, 'w') as temple_config_file:
versioned_config = {
**temple_config,
**{'_version': version, '_template': template},
}
yaml.dump(versioned_config, temple_config_file, Dumper=yaml.SafeDumper) |
def get_cookiecutter_config(template, default_config=None, version=None):
"""Obtains the configuration used for cookiecutter templating
Args:
template: Path to the template
default_config (dict, optional): The default configuration
version (str, optional): The git SHA or branch to use when
checking out template. Defaults to latest version
Returns:
tuple: The cookiecutter repo directory and the config dict
"""
default_config = default_config or {}
config_dict = cc_config.get_user_config()
repo_dir, _ = cc_repository.determine_repo_dir(
template=template,
abbreviations=config_dict['abbreviations'],
clone_to_dir=config_dict['cookiecutters_dir'],
checkout=version,
no_input=True)
context_file = os.path.join(repo_dir, 'cookiecutter.json')
context = cc_generate.generate_context(
context_file=context_file,
default_context={**config_dict['default_context'], **default_config})
return repo_dir, cc_prompt.prompt_for_config(context) |
def set_cmd_env_var(value):
"""Decorator that sets the temple command env var to value"""
def func_decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
previous_cmd_env_var = os.getenv(temple.constants.TEMPLE_ENV_VAR)
os.environ[temple.constants.TEMPLE_ENV_VAR] = value
try:
ret_val = function(*args, **kwargs)
finally:
if previous_cmd_env_var is None:
del os.environ[temple.constants.TEMPLE_ENV_VAR]
else:
os.environ[temple.constants.TEMPLE_ENV_VAR] = previous_cmd_env_var
return ret_val
return wrapper
return func_decorator |
def _call_api(self, verb, url, **request_kwargs):
"""Perform a github API call
Args:
verb (str): Can be "post", "put", or "get"
url (str): The base URL with a leading slash for Github API (v3)
auth (str or HTTPBasicAuth): A Github API token or a HTTPBasicAuth object
"""
api = 'https://api.github.com{}'.format(url)
auth_headers = {'Authorization': 'token {}'.format(self.api_token)}
headers = {**auth_headers, **request_kwargs.pop('headers', {})}
return getattr(requests, verb)(api, headers=headers, **request_kwargs) |
def deploy(target):
"""Deploys the package and documentation.
Proceeds in the following steps:
1. Ensures proper environment variables are set and checks that we are on Circle CI
2. Tags the repository with the new version
3. Creates a standard distribution and a wheel
4. Updates version.py to have the proper version
5. Commits the ChangeLog, AUTHORS, and version.py file
6. Pushes to PyPI
7. Pushes the tags and newly committed files
Raises:
`EnvironmentError`:
- Not running on CircleCI
- `*_PYPI_USERNAME` and/or `*_PYPI_PASSWORD` environment variables
are missing
- Attempting to deploy to production from a branch that isn't master
"""
# Ensure proper environment
if not os.getenv(CIRCLECI_ENV_VAR): # pragma: no cover
raise EnvironmentError('Must be on CircleCI to run this script')
current_branch = os.getenv('CIRCLE_BRANCH')
if (target == 'PROD') and (current_branch != 'master'):
raise EnvironmentError((
'Refusing to deploy to production from branch {current_branch!r}. '
'Production deploys can only be made from master.'
).format(current_branch=current_branch))
if target in ('PROD', 'TEST'):
pypi_username = os.getenv('{target}_PYPI_USERNAME'.format(target=target))
pypi_password = os.getenv('{target}_PYPI_PASSWORD'.format(target=target))
else:
raise ValueError(
"Deploy target must be 'PROD' or 'TEST', got {target!r}.".format(target=target))
if not (pypi_username and pypi_password): # pragma: no cover
raise EnvironmentError((
"Missing '{target}_PYPI_USERNAME' and/or '{target}_PYPI_PASSWORD' "
"environment variables. These are required to push to PyPI."
).format(target=target))
# Twine requires these environment variables to be set. Subprocesses will
# inherit these when we invoke them, so no need to pass them on the command
# line. We want to avoid that in case something's logging each command run.
os.environ['TWINE_USERNAME'] = pypi_username
os.environ['TWINE_PASSWORD'] = pypi_password
# Set up git on circle to push to the current branch
_shell('git config --global user.email "[email protected]"')
_shell('git config --global user.name "Circle CI"')
_shell('git config push.default current')
# Obtain the version to deploy
ret = _shell('make version', stdout=subprocess.PIPE)
version = ret.stdout.decode('utf-8').strip()
print('Deploying version {version!r}...'.format(version=version))
# Tag the version
_shell('git tag -f -a {version} -m "Version {version}"'.format(version=version))
# Update the version
_shell(
'sed -i.bak "s/^__version__ = .*/__version__ = {version!r}/" */version.py'.format(
version=version))
# Create a standard distribution and a wheel
_shell('python setup.py sdist bdist_wheel')
# Add the updated ChangeLog and AUTHORS
_shell('git add ChangeLog AUTHORS */version.py')
# Start the commit message with "Merge" so that PBR will ignore it in the
# ChangeLog. Use [skip ci] to ensure CircleCI doesn't recursively deploy.
_shell('git commit --no-verify -m "Merge autogenerated files [skip ci]"')
# Push the distributions to PyPI.
_pypi_push('dist')
# Push the tag and AUTHORS / ChangeLog after successful PyPI deploy
_shell('git push --follow-tags')
print('Deployment complete. Latest version is {version}.'.format(version=version)) |
def report(func):
"""
Decorator for method run. This method will be execute before the execution
from the method with this decorator.
"""
def execute(self, *args, **kwargs):
try:
print "[>] Executing {n} report. . . ".format(n=self.__class__.NAME)
if hasattr(self, 'test'):
if self.test():
return func(self, *args, **kwargs)
else:
print colored("[!] The initial test for class {c} has not been successful".format(c=self.__class__.__name__), "red")
else:
return func(self, *args, **kwargs)
except Exception as e:
print colored("Error en la ejecución del report {n}: {e}".format(n=self.__class__.NAME, e = e), "red")
return execute |
def run(self):
"""
Finds .DS_Store files into path
"""
filename = ".DS_Store"
command = "find {path} -type f -name \"{filename}\" ".format(path = self.path, filename = filename)
cmd = CommandHelper(command)
cmd.execute()
files = cmd.output.split("\n")
for f in files:
if not f.endswith(filename):
continue
# Ignore paths excluded
rel_path = f.replace(self.path, "")
if rel_path.startswith(tuple(self.CONFIG['exclude_paths'])):
continue
issue = Issue()
issue.name = "File .DS_Store detected"
issue.potential = False
issue.severity = Issue.SEVERITY_LOW
# Get only relative path
issue.file = rel_path
self.saveIssue(issue) |
def run(self):
"""
Method executed dynamically by framework. This method will do a http request to
endpoint setted into config file with the issues and other data.
"""
options = {}
if bool(self.config['use_proxy']):
options['proxies'] = {"http": self.config['proxy'], "https": self.config['proxy']}
options["url"] = self.config['url']
options["data"] = {"issues": json.dumps(map(lambda x: x.__todict__(), self.issues))}
if 'get' == self.config['method'].lower():
requests.get(**options)
else:
requests.post(**options) |
def path(self, value):
"""
Setter for 'path' property
Args:
value (str): Absolute path to scan
"""
if not value.endswith('/'):
self._path = '{v}/'.format(v=value)
else:
self._path = value |
def parseConfig(cls, value):
"""
Parse the config values
Args:
value (dict): Dictionary which contains the checker config
Returns:
dict: The checker config with parsed values
"""
if 'enabled' in value:
value['enabled'] = bool(value['enabled'])
if 'exclude_paths' in value:
value['exclude_paths'] = [n.strip() for n in ast.literal_eval(value['exclude_paths'])]
return value |
def isInstalled(value):
"""
Check if a software is installed into machine.
Args:
value (str): Software's name
Returns:
bool: True if the software is installed. False else
"""
function = """
function is_installed {
local return_=1;
type $1 >/dev/null 2>&1 || { local return_=0; };
echo "$return_";
}"""
command = """bash -c '{f}; echo $(is_installed \"{arg}\")'""".format(f = function, arg=value)
cmd = CommandHelper(command)
cmd.execute()
return "1" in cmd.output |
def getOSName(self):
"""
Get the OS name. If OS is linux, returns the Linux distribution name
Returns:
str: OS name
"""
_system = platform.system()
if _system in [self.__class__.OS_WINDOWS, self.__class__.OS_MAC, self.__class__.OS_LINUX]:
if _system == self.__class__.OS_LINUX:
_dist = platform.linux_distribution()[0]
if _dist.lower() == self.__class__.OS_UBUNTU.lower():
return self.__class__.OS_UBUNTU
elif _dist.lower() == self.__class__.OS_DEBIAN.lower():
return self.__class__.OS_DEBIAN
elif _dist.lower() == self.__class__.OS_CENTOS.lower():
return self.__class__.OS_CENTOS
elif _dist.lower() == self.__class__.OS_REDHAT.lower():
return self.__class__.OS_REDHAT
elif _dist.lower() == self.__class__.OS_KALI.lower():
return self.__class__.OS_KALI
return _system
else:
return None |
def execute(self, shell = True):
"""
Executes the command setted into class
Args:
shell (boolean): Set True if command is a shell command. Default: True
"""
process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=shell)
self.output, self.errors = process.communicate() |
def _debug(message, color=None, attrs=None):
"""
Print a message if the class attribute 'verbose' is enabled
Args:
message (str): Message to print
"""
if attrs is None:
attrs = []
if color is not None:
print colored(message, color, attrs=attrs)
else:
if len(attrs) > 0:
print colored(message, "white", attrs=attrs)
else:
print message |
def setup():
"""
Creates required directories and copy checkers and reports.
"""
# # Check if dir is writable
# if not os.access(AtomShieldsScanner.HOME, os.W_OK):
# AtomShieldsScanner.HOME = os.path.expanduser("~/.atomshields")
# AtomShieldsScanner.CHECKERS_DIR = os.path.join(AtomShieldsScanner.HOME, "checkers")
# AtomShieldsScanner.REPORTS_DIR = os.path.join(AtomShieldsScanner.HOME, "reports")
if not os.path.isdir(AtomShieldsScanner.CHECKERS_DIR):
os.makedirs(AtomShieldsScanner.CHECKERS_DIR)
if not os.path.isdir(AtomShieldsScanner.REPORTS_DIR):
os.makedirs(AtomShieldsScanner.REPORTS_DIR)
# Copy all checkers
for f in AtomShieldsScanner._getFiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), "checkers"), "*.py"):
AtomShieldsScanner.installChecker(f)
# Copy all reports
for f in AtomShieldsScanner._getFiles(os.path.join(os.path.dirname(os.path.realpath(__file__)), "reports"), "*.py"):
AtomShieldsScanner.installReport(f)
AtomShieldsScanner._executeMassiveMethod(path=AtomShieldsScanner.CHECKERS_DIR, method="install", args={})
config_dir = os.path.dirname(AtomShieldsScanner.CONFIG_PATH)
if not os.path.isdir(config_dir):
os.makedirs(config_dir) |
def _addConfig(instance, config, parent_section):
"""
Writes a section for a plugin.
Args:
instance (object): Class instance for plugin
config (object): Object (ConfigParser) which the current config
parent_section (str): Parent section for plugin. Usually 'checkers' or 'reports'
"""
try:
section_name = "{p}/{n}".format(p = parent_section, n=instance.NAME.lower())
config.add_section(section_name)
for k in instance.CONFIG.keys():
config.set(section_name, k, instance.CONFIG[k])
except Exception as e:
print "[!] %s" % e |
def getConfig(self, section = None):
"""
Returns a dictionary which contains the current config. If a section is setted,
only will returns the section config
Args:
section (str): (Optional) Section name.
Returns:
dict: Representation of current config
"""
data = {}
if section is None:
for s in self.config.sections():
if '/' in s:
# Subsection
parent, _s = s.split('/')
data[parent][_s] = dict(self.config.items(s))
else:
data[s] = dict(self.config.items(s))
else:
# Only one section will be returned
data = dict(self.config.items(section))
return data |
def _getClassInstance(path, args=None):
"""
Returns a class instance from a .py file.
Args:
path (str): Absolute path to .py file
args (dict): Arguments passed via class constructor
Returns:
object: Class instance or None
"""
if not path.endswith(".py"):
return None
if args is None:
args = {}
classname = AtomShieldsScanner._getClassName(path)
basename = os.path.basename(path).replace(".py", "")
sys.path.append(os.path.dirname(path))
try:
mod = __import__(basename, globals(), locals(), [classname], -1)
class_ = getattr(mod, classname)
instance = class_(**args)
except Exception as e:
AtomShieldsScanner._debug("[!] %s" % e)
return None
finally:
sys.path.remove(os.path.dirname(path))
return instance |
def _executeMassiveMethod(path, method, args=None, classArgs = None):
"""
Execute an specific method for each class instance located in path
Args:
path (str): Absolute path which contains the .py files
method (str): Method to execute into class instance
Returns:
dict: Dictionary which contains the response for every class instance.
The dictionary keys are the value of 'NAME' class variable.
"""
response = {}
if args is None:
args = {}
if classArgs is None:
classArgs = {}
sys.path.append(path)
exclude = ["__init__.py", "base.py"]
for f in AtomShieldsScanner._getFiles(path, "*.py", exclude=exclude):
try:
instance = AtomShieldsScanner._getClassInstance(path = f, args = classArgs)
if instance is not None:
if callable(method):
args["instance"] = instance
output = method(**args)
response[instance.__class__.NAME] = output
else:
if hasattr(instance, method):
output = getattr(instance, method)(**args)
response[instance.__class__.NAME] = output
else:
continue
except Exception as e:
AtomShieldsScanner._debug("[!] %s" % e)
sys.path.remove(path)
return response |
def run(self):
"""
Run a scan in the path setted.
"""
self.checkProperties()
self.debug("[*] Iniciando escaneo de AtomShields con las siguientes propiedades. . . ")
self.showScanProperties()
self.loadConfig()
# Init time counter
init_ts = datetime.now()
# Execute plugins
cwd = os.getcwd()
os.chdir(self.path)
issues = self.executeCheckers()
os.chdir(cwd)
# Finish time counter
end_ts = datetime.now()
duration = '{}'.format(end_ts - init_ts)
# Process and set issues
for plugin in issues.keys():
value = issues[plugin]
if isinstance(value, list):
map(self.saveIssue, value)
else:
self.saveIssue(value)
# Execute reports
print ""
self.executeReports()
# Print summary output.
self.debug("")
self.debug("Duration: {t}".format(t=duration))
self.showSummary()
return self.issues |
def install():
"""
Install all the dependences
"""
cmd = CommandHelper()
cmd.install("npm")
cmd = CommandHelper()
cmd.install("nodejs-legacy")
# Install retre with npm
cmd = CommandHelper()
cmd.command = "npm install -g retire"
cmd.execute()
if cmd.errors:
from termcolor import colored
print colored(cmd.errors, "red")
else:
print cmd.output |
def potential(self, value):
"""
Setter for 'potential' property
Args:
value (bool): True if a potential is required. False else
"""
if value:
self._potential = True
else:
self._potential = False |
def get(name, default=None, allow_default=True):
""" Shortcut method for getting a setting value.
:param str name: Setting key name.
:param default: Default value of setting if it's not explicitly
set. Defaults to `None`
:param bool allow_default: If true, use the parameter default as
default if the key is not set, else raise
:exc:`KeyError`. Defaults to `None`
:raises: :exc:`KeyError` if allow_default is false and the setting is
not set.
"""
return Config().get(name, default, allow_default=allow_default) |
def env(key, default):
"""
Helper to try to get a setting from the environment, or pyconfig, or
finally use a provided default.
"""
value = os.environ.get(key, None)
if value is not None:
log.info(' %s = %r', key.lower().replace('_', '.'), value)
return value
key = key.lower().replace('_', '.')
value = get(key)
if value is not None:
return value
return default |
def env_key(key, default):
"""
Try to get `key` from the environment.
This mutates `key` to replace dots with underscores and makes it all
uppercase.
my.database.host => MY_DATABASE_HOST
"""
env = key.upper().replace('.', '_')
return os.environ.get(env, default) |
def set(self, name, value):
""" Changes a setting value.
This implements a locking mechanism to ensure some level of thread
safety.
:param str name: Setting key name.
:param value: Setting value.
"""
if not self.settings.get('pyconfig.case_sensitive', False):
name = name.lower()
log.info(" %s = %s", name, repr(value))
# Acquire our lock to change the config
with self.mut_lock:
self.settings[name] = value |
def _update(self, conf_dict, base_name=None):
""" Updates the current configuration with the values in `conf_dict`.
:param dict conf_dict: Dictionary of key value settings.
:param str base_name: Base namespace for setting keys.
"""
for name in conf_dict:
# Skip private names
if name.startswith('_'):
continue
value = conf_dict[name]
# Skip Namespace if it's imported
if value is Namespace:
continue
# Use a base namespace
if base_name:
name = base_name + '.' + name
if isinstance(value, Namespace):
for name, value in value.iteritems(name):
self.set(name, value)
# Automatically call any functions in the settings module, and if
# they return a value other than None, that value becomes a setting
elif callable(value):
value = value()
if value is not None:
self.set(name, value)
else:
self.set(name, value) |
def load(self, clear=False):
"""
Loads all the config plugin modules to build a working configuration.
If there is a ``localconfig`` module on the python path, it will be
loaded last, overriding other settings.
:param bool clear: Clear out the previous settings before loading
"""
if clear:
self.settings = {}
defer = []
# Load all config plugins
for conf in pkg_resources.iter_entry_points('pyconfig'):
if conf.attrs:
raise RuntimeError("config must be a module")
mod_name = conf.module_name
base_name = conf.name if conf.name != 'any' else None
log.info("Loading module '%s'", mod_name)
mod_dict = runpy.run_module(mod_name)
# If this module wants to be deferred, save it for later
if mod_dict.get('deferred', None) is deferred:
log.info("Deferring module '%s'", mod_name)
mod_dict.pop('deferred')
defer.append((mod_name, base_name, mod_dict))
continue
self._update(mod_dict, base_name)
# Load deferred modules
for mod_name, base_name, mod_dict in defer:
log.info("Loading deferred module '%s'", mod_name)
self._update(mod_dict, base_name)
if etcd().configured:
# Load etcd stuff
mod_dict = etcd().load()
if mod_dict:
self._update(mod_dict)
# Allow localconfig overrides
mod_dict = None
try:
mod_dict = runpy.run_module('localconfig')
except ImportError:
pass
except ValueError as err:
if getattr(err, 'message') != '__package__ set to non-string':
raise
# This is a bad work-around to make this work transparently...
# shouldn't really access core stuff like this, but Fuck It[tm]
mod_name = 'localconfig'
if sys.version_info < (2, 7):
loader, code, fname = runpy._get_module_details(mod_name)
else:
_, loader, code, fname = runpy._get_module_details(mod_name)
mod_dict = runpy._run_code(code, {}, {}, mod_name, fname, loader,
pkg_name=None)
if mod_dict:
log.info("Loading module 'localconfig'")
self._update(mod_dict)
self.call_reload_hooks() |
def get(self, name, default, allow_default=True):
""" Return a setting value.
:param str name: Setting key name.
:param default: Default value of setting if it's not explicitly
set.
:param bool allow_default: If true, use the parameter default as
default if the key is not set, else raise
:exc:`LookupError`
:raises: :exc:`LookupError` if allow_default is false and the setting is
not set.
"""
if not self.settings.get('pyconfig.case_sensitive', False):
name = name.lower()
if name not in self.settings:
if not allow_default:
raise LookupError('No setting "{name}"'.format(name=name))
self.settings[name] = default
return self.settings[name] |
def init(self, hosts=None, cacert=None, client_cert=None, client_key=None):
"""
Handle creating the new etcd client instance and other business.
:param hosts: Host string or list of hosts (default: `'127.0.0.1:2379'`)
:param cacert: CA cert filename (optional)
:param client_cert: Client cert filename (optional)
:param client_key: Client key filename (optional)
:type ca: str
:type cert: str
:type key: str
"""
# Try to get the etcd module
try:
import etcd
self.module = etcd
except ImportError:
pass
if not self.module:
return
self._parse_jetconfig()
# Check env for overriding configuration or pyconfig setting
hosts = env('PYCONFIG_ETCD_HOSTS', hosts)
protocol = env('PYCONFIG_ETCD_PROTOCOL', None)
cacert = env('PYCONFIG_ETCD_CACERT', cacert)
client_cert = env('PYCONFIG_ETCD_CERT', client_cert)
client_key = env('PYCONFIG_ETCD_KEY', client_key)
# Parse auth string if there is one
username = None
password = None
auth = env('PYCONFIG_ETCD_AUTH', None)
if auth:
auth = auth.split(':')
auth.append('')
username = auth[0]
password = auth[1]
# Create new etcd instance
hosts = self._parse_hosts(hosts)
if hosts is None:
return
kw = {}
# Need this when passing a list of hosts to python-etcd, which we
# always do, even if it's a list of one
kw['allow_reconnect'] = True
# Grab optional protocol argument
if protocol:
kw['protocol'] = protocol
# Add auth to constructor if we got it
if username:
kw['username'] = username
if password:
kw['password'] = password
# Assign the SSL args if we have 'em
if cacert:
kw['ca_cert'] = os.path.abspath(cacert)
if client_cert and client_key:
kw['cert'] = ((os.path.abspath(client_cert),
os.path.abspath(client_key)))
elif client_cert:
kw['cert'] = os.path.abspath(client_cert)
if cacert or client_cert or client_key:
kw['protocol'] = 'https'
self.client = self.module.Client(hosts, **kw) |
def load(self, prefix=None, depth=None):
"""
Return a dictionary of settings loaded from etcd.
"""
prefix = prefix or self.prefix
prefix = '/' + prefix.strip('/') + '/'
if depth is None:
depth = self.inherit_depth
if not self.configured:
log.debug("etcd not available")
return
if self.watching:
log.info("Starting watcher for %r", prefix)
self.start_watching()
log.info("Loading from etcd %r", prefix)
try:
result = self.client.get(prefix)
except self.module.EtcdKeyNotFound:
result = None
if not result:
log.info("No configuration found")
return {}
# Iterate over the returned keys from etcd
update = {}
for item in result.children:
key = item.key
value = item.value
# Try to parse them as JSON strings, just in case it works
try:
value = pytool.json.from_json(value)
except:
pass
# Make the key lower-case if we're not case-sensitive
if not self.case_sensitive:
key = key.lower()
# Strip off the prefix that we're using
if key.startswith(prefix):
key = key[len(prefix):]
# Store the key/value to update the config
update[key] = value
# Access cached settings directly to avoid recursion
inherited = Config().settings.get(self.inherit_key,
update.get(self.inherit_key, None))
if depth > 0 and inherited:
log.info(" ... inheriting ...")
inherited = self.load(inherited, depth - 1) or {}
inherited.update(update)
update = inherited
return update |
def get_watcher(self):
"""
Return a etcd watching generator which yields events as they happen.
"""
if not self.watching:
raise StopIteration()
return self.client.eternal_watch(self.prefix, recursive=True) |
def start_watching(self):
""" Begins watching etcd for changes. """
# Don't create a new watcher thread if we already have one running
if self.watcher and self.watcher.is_alive():
return
# Create a new watcher thread and start it
self.watcher = Watcher()
self.watcher.start() |
def _parse_hosts(self, hosts):
"""
Return hosts parsed into a tuple of tuples.
:param hosts: String or list of hosts
"""
# Default host
if hosts is None:
return
# If it's a string, we allow comma separated strings
if isinstance(hosts, six.string_types):
# Split comma-separated list
hosts = [host.strip() for host in hosts.split(',')]
# Split host and port
hosts = [host.split(':') for host in hosts]
# Coerce ports to int
hosts = [(host[0], int(host[1])) for host in hosts]
# The python-etcd client explicitly checks for a tuple type
return tuple(hosts) |
def _parse_jetconfig(self):
"""
Undocumented cross-compatability functionality with jetconfig
(https://github.com/shakefu/jetconfig) that is very sloppy.
"""
conf = env('JETCONFIG_ETCD', None)
if not conf:
return
import urlparse
auth = None
port = None
conf = conf.split(',').pop()
entry = urlparse.urlparse(conf)
scheme = entry.scheme
host = entry.netloc or entry.path # Path is where it goes if there's no
# scheme on the URL
if '@' in host:
auth, host = host.split('@')
if ':' in host:
host, port = host.split(':')
if not port and scheme == 'https':
port = '443'
if scheme:
os.environ['PYCONFIG_ETCD_PROTOCOL'] = scheme
if auth:
os.environ['PYCONFIG_ETCD_AUTH'] = auth
if port:
host = host + ":" + port
os.environ['PYCONFIG_ETCD_HOSTS'] = host |
def main():
"""
Main script for `pyconfig` command.
"""
parser = argparse.ArgumentParser(description="Helper for working with "
"pyconfigs")
target_group = parser.add_mutually_exclusive_group()
target_group.add_argument('-f', '--filename',
help="parse an individual file or directory",
metavar='F')
target_group.add_argument('-m', '--module',
help="parse a package or module, recursively looking inside it",
metavar='M')
parser.add_argument('-v', '--view-call',
help="show the actual pyconfig call made (default: show namespace)",
action='store_true')
parser.add_argument('-l', '--load-configs',
help="query the currently set value for each key found",
action='store_true')
key_group = parser.add_mutually_exclusive_group()
key_group.add_argument('-a', '--all',
help="show keys which don't have defaults set",
action='store_true')
key_group.add_argument('-k', '--only-keys',
help="show a list of discovered keys without values",
action='store_true')
parser.add_argument('-n', '--natural-sort',
help="sort by filename and line (default: alphabetical by key)",
action='store_true')
parser.add_argument('-s', '--source',
help="show source annotations (implies --natural-sort)",
action='store_true')
parser.add_argument('-c', '--color',
help="toggle output colors (default: %s)" % bool(pygments),
action='store_const', default=bool(pygments),
const=(not bool(pygments)))
args = parser.parse_args()
if args.color and not pygments:
_error("Pygments is required for color output.\n"
" pip install pygments")
if args.module:
_handle_module(args)
if args.filename:
_handle_file(args) |
def _handle_module(args):
"""
Handles the -m argument.
"""
module = _get_module_filename(args.module)
if not module:
_error("Could not load module or package: %r", args.module)
elif isinstance(module, Unparseable):
_error("Could not determine module source: %r", args.module)
_parse_and_output(module, args) |
def _error(msg, *args):
"""
Print an error message and exit.
:param msg: A message to print
:type msg: str
"""
print(msg % args, file=sys.stderr)
sys.exit(1) |
def _get_module_filename(module):
"""
Return the filename of `module` if it can be imported.
If `module` is a package, its directory will be returned.
If it cannot be imported ``None`` is returned.
If the ``__file__`` attribute is missing, or the module or package is a
compiled egg, then an :class:`Unparseable` instance is returned, since the
source can't be retrieved.
:param module: A module name, such as ``'test.test_config'``
:type module: str
"""
# Split up the module and its containing package, if it has one
module = module.split('.')
package = '.'.join(module[:-1])
module = module[-1]
try:
if not package:
# We aren't accessing a module within a package, but rather a top
# level package, so it's a straight up import
module = __import__(module)
else:
# Import the package containing our desired module
package = __import__(package, fromlist=[module])
# Get the module from that package
module = getattr(package, module, None)
filename = getattr(module, '__file__', None)
if not filename:
# No filename? Nothing to do here
return Unparseable()
# If we get a .pyc, strip the c to get .py so we can parse the source
if filename.endswith('.pyc'):
filename = filename[:-1]
if not os.path.exists(filename) and os.path.isfile(filename):
# If there's only a .pyc and no .py it's a compile package or
# egg and we can't get at the source for parsing
return Unparseable()
# If we have a package, we want the directory not the init file
if filename.endswith('__init__.py'):
filename = filename[:-11]
# Yey, we found it
return filename
except ImportError:
# Definitely not a valid module or package
return |
def _parse_and_output(filename, args):
"""
Parse `filename` appropriately and then output calls according to the
`args` specified.
:param filename: A file or directory
:param args: Command arguments
:type filename: str
"""
relpath = os.path.dirname(filename)
if os.path.isfile(filename):
calls = _parse_file(filename, relpath)
elif os.path.isdir(filename):
calls = _parse_dir(filename, relpath)
else:
# XXX(shakefu): This is an error of some sort, maybe symlinks?
# Probably need some thorough testing
_error("Could not determine file type: %r", filename)
if not calls:
# XXX(shakefu): Probably want to change this to not be an error and
# just be a normal fail (e.g. command runs, no output).
_error("No pyconfig calls.")
if args.load_configs:
# We want to iterate over the configs and add any keys which haven't
# already been found
keys = set()
for call in calls:
keys.add(call.key)
# Iterate the loaded keys and make _PyconfigCall instances
conf = pyconfig.Config()
for key, value in conf.settings.items():
if key in keys:
continue
calls.append(_PyconfigCall('set', key, value, [None]*4))
_output(calls, args) |
def _output(calls, args):
"""
Outputs `calls`.
:param calls: List of :class:`_PyconfigCall` instances
:param args: :class:`~argparse.ArgumentParser` instance
:type calls: list
:type args: argparse.ArgumentParser
"""
# Sort the keys appropriately
if args.natural_sort or args.source:
calls = sorted(calls, key=lambda c: (c.filename, c.lineno))
else:
calls = sorted(calls, key=lambda c: c.key)
out = []
# Handle displaying only the list of keys
if args.only_keys:
keys = set()
for call in calls:
if call.key in keys:
continue
out.append(_format_call(call, args))
keys.add(call.key)
out = '\n'.join(out)
if args.color:
out = _colorize(out)
print(out, end=' ')
# We're done here
return
# Build a list of keys which have default values available, so that we can
# toggle between displaying only those keys with defaults and all keys
keys = set()
for call in calls:
if call.default:
keys.add(call.key)
for call in calls:
if not args.all and not call.default and call.key in keys:
continue
out.append(_format_call(call, args))
out = '\n'.join(out)
if args.color:
out = _colorize(out)
print(out, end=' ') |
def _format_call(call, args):
"""
Return `call` formatted appropriately for `args`.
:param call: A pyconfig call object
:param args: Arguments from the command
:type call: :class:`_PyconfigCall`
"""
out = ''
if args.source:
out += call.annotation() + '\n'
if args.only_keys:
out += call.get_key()
return out
if args.view_call:
out += call.as_call()
elif args.load_configs:
out += call.as_live()
else:
out += call.as_namespace()
return out |
def _colorize(output):
"""
Return `output` colorized with Pygments, if available.
"""
if not pygments:
return output
# Available styles
# ['monokai', 'manni', 'rrt', 'perldoc', 'borland', 'colorful', 'default',
# 'murphy', 'vs', 'trac', 'tango', 'fruity', 'autumn', 'bw', 'emacs',
# 'vim', 'pastie', 'friendly', 'native']
return pygments.highlight(output,
pygments.lexers.PythonLexer(),
pygments.formatters.Terminal256Formatter(style='monokai')) |
def _parse_dir(directory, relpath):
"""
Return a list of :class:`_PyconfigCall` from recursively parsing
`directory`.
:param directory: Directory to walk looking for python files
:param relpath: Path to make filenames relative to
:type directory: str
:type relpath: str
"""
relpath = os.path.dirname(relpath)
pyconfig_calls = []
for root, dirs, files in os.walk(directory):
for filename in files:
if not filename.endswith('.py'):
continue
filename = os.path.join(root, filename)
pyconfig_calls.extend(_parse_file(filename, relpath))
return pyconfig_calls |
def _parse_file(filename, relpath=None):
"""
Return a list of :class:`_PyconfigCall` from parsing `filename`.
:param filename: A file to parse
:param relpath: Relative directory to strip (optional)
:type filename: str
:type relpath: str
"""
with open(filename, 'r') as source:
source = source.read()
pyconfig_calls = []
try:
nodes = ast.parse(source, filename=filename)
except SyntaxError:
# XXX(Jake): We might want to handle this differently
return []
# Look for UTF-8 encoding
first_lines = source[0:200]
match = re.match('^#.*coding[:=].?([a-zA-Z0-9-_]+).*', first_lines)
if match:
try:
coding = match.group(1)
source = source.decode(coding)
except:
print("# Error decoding file, may not parse correctly:", filename)
try:
# Split the source into lines so we can reference it easily
source = source.split('\n')
except:
print("# Error parsing file, ignoring:", filename);
return []
# Make the filename relative to the given path, if needed
if relpath:
filename = os.path.relpath(filename, relpath)
for call in ast.walk(nodes):
if not isinstance(call, _ast.Call):
# Skip any node that isn't a Call
continue
func = call.func
if not isinstance(call.func, _ast.Attribute):
# We're looking for calls to pyconfig.*, so the function has to be
# an Attribute node, otherwise skip it
continue
if getattr(func.value, 'id', None) != 'pyconfig':
# If the Attribute value isn't a Name (doesn't have an `id`) or it
# isn't 'pyconfig', then we skip
continue
if func.attr not in ['get', 'set', 'setting']:
# If the Attribute attr isn't one of the pyconfig API methods, then
# we skip
continue
# Now we parse the call arguments as best we can
args = []
if call.args:
arg = call.args[0]
if isinstance(arg, _ast.Str):
args.append(arg.s)
else:
args.append(_map_arg(arg))
for arg in call.args[1:]:
args.append(_map_arg(arg))
line = (filename, source[call.lineno-1], call.lineno, call.col_offset)
call = _PyconfigCall(func.attr, args[0], args[1:], line)
pyconfig_calls.append(call)
return pyconfig_calls |
def _map_arg(arg):
"""
Return `arg` appropriately parsed or mapped to a usable value.
"""
# Grab the easy to parse values
if isinstance(arg, _ast.Str):
return repr(arg.s)
elif isinstance(arg, _ast.Num):
return arg.n
elif isinstance(arg, _ast.Name):
name = arg.id
if name == 'True':
return True
elif name == 'False':
return False
elif name == 'None':
return None
return name
else:
# Everything else we don't bother with
return Unparseable() |
def as_namespace(self, namespace=None):
"""
Return this call as if it were being assigned in a pyconfig namespace.
If `namespace` is specified and matches the top level of this call's
:attr:`key`, then that section of the key will be removed.
"""
key = self.key
if namespace and key.startswith(namespace):
key = key[len(namespace) + 1:]
return "%s = %s" % (self.get_key(), self._default() or NotSet()) |
def as_live(self):
"""
Return this call as if it were being assigned in a pyconfig namespace,
but load the actual value currently available in pyconfig.
"""
key = self.get_key()
default = pyconfig.get(key)
if default:
default = repr(default)
else:
default = self._default() or NotSet()
return "%s = %s" % (key, default) |
def as_call(self):
"""
Return this call as it is called in its source.
"""
default = self._default()
default = ', ' + default if default else ''
return "pyconfig.%s(%r%s)" % (self.method, self.get_key(), default) |
def get_key(self):
"""
Return the call key, even if it has to be parsed from the source.
"""
if not isinstance(self.key, Unparseable):
return self.key
line = self.source[self.col_offset:]
regex = re.compile('''pyconfig\.[eginst]+\(([^,]+).*?\)''')
match = regex.match(line)
if not match:
return Unparseable()
return "<%s>" % match.group(1) |
def _default_value_only(self):
"""
Return only the default value, if there is one.
"""
line = self.source[self.col_offset:]
regex = re.compile('''pyconfig\.[eginst]+\(['"][^)]+?['"], ?(.*?)\)''')
match = regex.match(line)
if not match:
return ''
return match.group(1) |
def _default(self):
"""
Return the default argument, formatted nicely.
"""
try:
# Check if it's iterable
iter(self.default)
except TypeError:
return repr(self.default)
# This is to look for unparsable values, and if we find one, we try to
# directly parse the string
for v in self.default:
if isinstance(v, Unparseable):
default = self._default_value_only()
if default:
return default
# Otherwise just make it a string and go
return ', '.join(str(v) for v in self.default) |
def _get_param_names(self):
"""
Get mappable parameters from YAML.
"""
template = Template(self.yaml_string)
names = ['yaml_string'] # always include the template
for match in re.finditer(template.pattern, template.template):
name = match.group('named') or match.group('braced')
assert name is not None
names.append(name)
return names |
def _get_dataset(self, X, y=None):
"""
Construct a pylearn2 dataset.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Labels.
"""
from pylearn2.datasets import DenseDesignMatrix
X = np.asarray(X)
assert X.ndim > 1
if y is not None:
y = self._get_labels(y)
if X.ndim == 2:
return DenseDesignMatrix(X=X, y=y)
return DenseDesignMatrix(topo_view=X, y=y) |
def _get_labels(self, y):
"""
Construct pylearn2 dataset labels.
Parameters
----------
y : array_like, optional
Labels.
"""
y = np.asarray(y)
if y.ndim == 1:
return y.reshape((y.size, 1))
assert y.ndim == 2
return y |
def fit(self, X, y=None):
"""
Build a trainer and run main_loop.
Parameters
----------
X : array_like
Training examples.
y : array_like, optional
Labels.
"""
from pylearn2.config import yaml_parse
from pylearn2.train import Train
# build trainer
params = self.get_params()
yaml_string = Template(self.yaml_string).substitute(params)
self.trainer = yaml_parse.load(yaml_string)
assert isinstance(self.trainer, Train)
if self.trainer.dataset is not None:
raise ValueError('Train YAML database must evaluate to None.')
self.trainer.dataset = self._get_dataset(X, y)
# update monitoring dataset(s)
if (hasattr(self.trainer.algorithm, 'monitoring_dataset') and
self.trainer.algorithm.monitoring_dataset is not None):
monitoring_dataset = self.trainer.algorithm.monitoring_dataset
if len(monitoring_dataset) == 1 and '' in monitoring_dataset:
monitoring_dataset[''] = self.trainer.dataset
else:
monitoring_dataset['train'] = self.trainer.dataset
self.trainer.algorithm._set_monitoring_dataset(monitoring_dataset)
else:
self.trainer.algorithm._set_monitoring_dataset(
self.trainer.dataset)
# run main loop
self.trainer.main_loop() |
def _predict(self, X, method='fprop'):
"""
Get model predictions.
See pylearn2.scripts.mlp.predict_csv and
http://fastml.com/how-to-get-predictions-from-pylearn2/.
Parameters
----------
X : array_like
Test dataset.
method : str
Model method to call for prediction.
"""
import theano
X_sym = self.trainer.model.get_input_space().make_theano_batch()
y_sym = getattr(self.trainer.model, method)(X_sym)
f = theano.function([X_sym], y_sym, allow_input_downcast=True)
return f(X) |
def _get_labels(self, y):
"""
Construct pylearn2 dataset labels.
Parameters
----------
y : array_like, optional
Labels.
"""
y = np.asarray(y)
assert y.ndim == 1
# convert to one-hot
labels = np.unique(y).tolist()
oh = np.zeros((y.size, len(labels)), dtype=float)
for i, label in enumerate(y):
oh[i, labels.index(label)] = 1.
return oh |
def load(self):
"""
Load the dataset using pylearn2.config.yaml_parse.
"""
from pylearn2.config import yaml_parse
from pylearn2.datasets import Dataset
dataset = yaml_parse.load(self.yaml_string)
assert isinstance(dataset, Dataset)
data = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True).next()
if len(data) == 2:
X, y = data
y = np.squeeze(y)
if self.one_hot:
y = np.argmax(y, axis=1)
else:
X = data
y = None
return X, y |
def fit(self):
"""
Fits the model with random restarts.
:return:
"""
self.model.optimize_restarts(num_restarts=self.num_restarts, verbose=False) |
def _create_kernel(self):
"""
creates an additive kernel
"""
# Check kernels
kernels = self.kernel_params
if not isinstance(kernels, list):
raise RuntimeError('Must provide enumeration of kernels')
for kernel in kernels:
if sorted(list(kernel.keys())) != ['name', 'options', 'params']:
raise RuntimeError(
'strategy/params/kernels must contain keys: "name", "options", "params"')
# Turn into entry points.
# TODO use eval to allow user to specify internal variables for kernels (e.g. V) in config file.
kernels = []
for kern in self.kernel_params:
params = kern['params']
options = kern['options']
name = kern['name']
kernel_ep = load_entry_point(name, 'strategy/params/kernels')
if issubclass(kernel_ep, KERNEL_BASE_CLASS):
if options['independent']:
# TODO Catch errors here? Estimator entry points don't catch instantiation errors
kernel = np.sum([kernel_ep(1, active_dims=[i], **params) for i in range(self.n_dims)])
else:
kernel = kernel_ep(self.n_dims, **params)
if not isinstance(kernel, KERNEL_BASE_CLASS):
raise RuntimeError('strategy/params/kernel must load a'
'GPy derived Kernel')
kernels.append(kernel)
self.kernel = np.sum(kernels) |
def fit_and_score_estimator(estimator, parameters, cv, X, y=None, scoring=None,
iid=True, n_jobs=1, verbose=1,
pre_dispatch='2*n_jobs'):
"""Fit and score an estimator with cross-validation
This function is basically a copy of sklearn's
model_selection._BaseSearchCV._fit(), which is the core of the GridSearchCV
fit() method. Unfortunately, that class does _not_ return the training
set scores, which we want to save in the database, and because of the
way it's written, you can't change it by subclassing or monkeypatching.
This function uses some undocumented internal sklearn APIs (non-public).
It was written against sklearn version 0.16.1. Prior Versions are likely
to fail due to changes in the design of cross_validation module.
Returns
-------
out : dict, with keys 'mean_test_score' 'test_scores', 'train_scores'
The scores on the training and test sets, as well as the mean test set
score.
"""
scorer = check_scoring(estimator, scoring=scoring)
n_samples = num_samples(X)
X, y = check_arrays(X, y, allow_lists=True, sparse_format='csr',
allow_nans=True)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv=cv, y=y, classifier=is_classifier(estimator))
out = Parallel(
n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, parameters,
fit_params=None)
for train, test in cv.split(X, y))
assert len(out) == cv.n_splits
train_scores, test_scores = [], []
n_train_samples, n_test_samples = [], []
for test_score, n_test, train_score, n_train, _ in out:
train_scores.append(train_score)
test_scores.append(test_score)
n_test_samples.append(n_test)
n_train_samples.append(n_train)
train_scores, test_scores = map(list, check_arrays(train_scores,
test_scores,
warn_nans=True,
replace_nans=True))
if iid:
if verbose > 0 and is_msmbuilder_estimator(estimator):
print('[CV] Using MSMBuilder API n_samples averaging')
print('[CV] n_train_samples: %s' % str(n_train_samples))
print('[CV] n_test_samples: %s' % str(n_test_samples))
mean_test_score = np.average(test_scores, weights=n_test_samples)
mean_train_score = np.average(train_scores, weights=n_train_samples)
else:
mean_test_score = np.average(test_scores)
mean_train_score = np.average(train_scores)
grid_scores = {
'mean_test_score': mean_test_score, 'test_scores': test_scores,
'mean_train_score': mean_train_score, 'train_scores': train_scores,
'n_test_samples': n_test_samples, 'n_train_samples': n_train_samples}
return grid_scores |
def init_subclass_by_name(baseclass, short_name, params):
"""
Find the subclass, `kls` of baseclass with class attribute `short_name`
that matches the supplied `short_name`, and then instantiate and return
that class with:
return kls(**params)
This function also tries its best to catch any possible TypeErrors due
to binding of the arguments, and rethrows them as nicely formatted
RuntimeErrors that are suitable for showing to users.
"""
sc = baseclass.__subclasses__()
for kls in sc:
if kls.short_name == short_name or \
(_is_collection(kls.short_name) and short_name in kls.short_name):
try:
return kls(**params)
except TypeError as e:
spec = inspect.getargspec(kls.__init__)
# try to give nice errors to the user if the params failed
# to bind
if 'unexpected' in str(e):
avail = join_quoted(spec.args[1:])
raise RuntimeError(
"%s's %s. Available params for this subclass are: %s."
% (short_name, str(e), avail))
elif 'takes exactly' in str(e):
required = join_quoted(spec.args[1:-len(spec.defaults)])
raise RuntimeError(
"%s's %s. Required params for this subclass are %s."
% (short_name, str(e), required))
elif 'takes at least' in str(e):
required = join_quoted(spec.args[1:-len(spec.defaults)])
optional = join_quoted(spec.args[-len(spec.defaults):])
raise RuntimeError(
"%s's %s. Required params for this subclass are: %s. "
"Optional params are: %s" % (
short_name, str(e), required, optional))
# :(
raise
chain = itertools.chain.from_iterable(
e.short_name if _is_collection(e.short_name) else [e.short_name]
for e in sc)
avail_names = ', '.join(str(n) for n in chain)
raise ValueError('"%s" is not a recognized subclass. available names '
'are: %s' % (short_name, avail_names)) |
def dict_merge(base, top):
"""Recursively merge two dictionaries, with the elements from `top`
taking precedence over elements from `top`.
Returns
-------
out : dict
A new dict, containing the merged records.
"""
out = dict(top)
for key in base:
if key in top:
if isinstance(base[key], dict) and isinstance(top[key], dict):
out[key] = dict_merge(base[key], top[key])
else:
out[key] = base[key]
return out |
def in_directory(path):
"""Context manager (with statement) that changes the current directory
during the context.
"""
curdir = os.path.abspath(os.curdir)
os.chdir(path)
yield
os.chdir(curdir) |
def format_timedelta(td_object):
"""Format a timedelta object for display to users
Returns
-------
str
"""
def get_total_seconds(td):
# timedelta.total_seconds not in py2.6
return (td.microseconds +
(td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
seconds = int(get_total_seconds(td_object))
periods = [('year', 60*60*24*365),
('month', 60*60*24*30),
('day', 60*60*24),
('hour', 60*60),
('minute', 60),
('second', 1)]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append("%s %s" % (period_value, period_name))
else:
strings.append("%s %ss" % (period_value, period_name))
return ", ".join(strings) |
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method
if (X.dtype.char in np.typecodes['AllFloat'] and
not np.isfinite(X.sum()) and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype) |
def _warn_if_not_finite(X):
"""UserWarning if array contains non-finite elements"""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method
if (X.dtype.char in np.typecodes['AllFloat'] and
not np.isfinite(X.sum()) and not np.isfinite(X).all()):
warnings.warn("Result contains NaN, infinity"
" or a value too large for %r." % X.dtype,
category=UserWarning) |
def num_samples(x, is_nested=False):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if is_nested:
return sum(num_samples(xx, is_nested=False) for xx in x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x) |
def check_arrays(*arrays, **options):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
By default lists and tuples are converted to numpy arrays.
It is possible to enforce certain properties, such as dtype, continguity
and sparse matrix format (if a sparse matrix is passed).
Converting lists to arrays can be disabled by setting ``allow_lists=True``.
Lists can then contain arbitrary objects and are not checked for dtype,
finiteness or anything else but length. Arrays are still checked
and possibly converted.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays, unless allow_lists is specified.
sparse_format : 'csr', 'csc' or 'dense', None by default
If not None, any scipy.sparse matrix is converted to
Compressed Sparse Rows or Compressed Sparse Columns representations.
If 'dense', an error is raised when a sparse array is
passed.
copy : boolean, False by default
If copy is True, ensure that returned arrays are copies of the original
(if not already converted to another format earlier in the process).
check_ccontiguous : boolean, False by default
Check that the arrays are C contiguous
dtype : a numpy dtype instance, None by default
Enforce a specific dtype.
warn_nans : boolean, False by default
Prints warning if nans in the arrays
Disables allow_nans
replace_nans : boolean, False by default
Replace nans in the arrays with zeros
allow_lists : bool
Allow lists of arbitrary objects as input, just check their length.
Disables
allow_nans : boolean, False by default
Allows nans in the arrays
allow_nd : boolean, False by default
Allows arrays of more than 2 dimensions.
"""
sparse_format = options.pop('sparse_format', None)
if sparse_format not in (None, 'csr', 'csc', 'dense'):
raise ValueError('Unexpected sparse format: %r' % sparse_format)
copy = options.pop('copy', False)
check_ccontiguous = options.pop('check_ccontiguous', False)
dtype = options.pop('dtype', None)
warn_nans = options.pop('warn_nans', False)
replace_nans = options.pop('replace_nans', False)
allow_lists = options.pop('allow_lists', False)
allow_nans = options.pop('allow_nans', False)
allow_nd = options.pop('allow_nd', False)
if options:
raise TypeError("Unexpected keyword arguments: %r" % options.keys())
if len(arrays) == 0:
return None
n_samples = num_samples(arrays[0])
checked_arrays = []
for array in arrays:
array_orig = array
if array is None:
# special case: ignore optional y=None kwarg pattern
checked_arrays.append(array)
continue
size = num_samples(array)
if size != n_samples:
raise ValueError("Found array with dim %d. Expected %d"
% (size, n_samples))
if not allow_lists or hasattr(array, "shape"):
if sp.issparse(array):
if sparse_format == 'csr':
array = array.tocsr()
elif sparse_format == 'csc':
array = array.tocsc()
elif sparse_format == 'dense':
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if check_ccontiguous:
array.data = np.ascontiguousarray(array.data, dtype=dtype)
elif hasattr(array, 'data'):
array.data = np.asarray(array.data, dtype=dtype)
elif array.dtype != dtype:
array = array.astype(dtype)
if not allow_nans:
if hasattr(array, 'data'):
_assert_all_finite(array.data)
else:
_assert_all_finite(array.values())
else:
if check_ccontiguous:
array = np.ascontiguousarray(array, dtype=dtype)
else:
array = np.asarray(array, dtype=dtype)
if warn_nans:
allow_nans = True
_warn_if_not_finite(array)
if replace_nans:
array = np.nan_to_num(array)
if not allow_nans:
_assert_all_finite(array)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if copy and array is array_orig:
array = array.copy()
checked_arrays.append(array)
return checked_arrays |
def is_repeated_suggestion(params, history):
"""
Parameters
----------
params : dict
Trial param set
history : list of 3-tuples
History of past function evaluations. Each element in history
should be a tuple `(params, score, status)`, where `params` is a
dict mapping parameter names to values
Returns
-------
is_repeated_suggestion : bool
"""
if any(params == hparams and hstatus == 'SUCCEEDED'
for hparams, hscore, hstatus in history):
return True
else:
return False |
def suggest(self, history, searchspace):
"""
Suggest params to maximize an objective function based on the
function evaluation history using a tree of Parzen estimators (TPE),
as implemented in the hyperopt package.
Use of this function requires that hyperopt be installed.
"""
# This function is very odd, because as far as I can tell there's
# no real documented API for any of the internals of hyperopt. Its
# execution model is that hyperopt calls your objective function
# (instead of merely providing you with suggested points, and then
# you calling the function yourself), and its very tricky (for me)
# to use the internal hyperopt data structures to get these predictions
# out directly.
# so they path we take in this function is to construct a synthetic
# hyperopt.Trials database which from the `history`, and then call
# hyoperopt.fmin with a dummy objective function that logs the value
# used, and then return that value to our client.
# The form of the hyperopt.Trials database isn't really documented in
# the code -- most of this comes from reverse engineering it, by
# running fmin() on a simple function and then inspecting the form of
# the resulting trials object.
if 'hyperopt' not in sys.modules:
raise ImportError('No module named hyperopt')
random = check_random_state(self.seed)
hp_searchspace = searchspace.to_hyperopt()
trials = Trials()
for i, (params, scores, status) in enumerate(history):
if status == 'SUCCEEDED':
# we're doing maximization, hyperopt.fmin() does minimization,
# so we need to swap the sign
result = {'loss': -np.mean(scores), 'status': STATUS_OK}
elif status == 'PENDING':
result = {'status': STATUS_RUNNING}
elif status == 'FAILED':
result = {'status': STATUS_FAIL}
else:
raise RuntimeError('unrecognized status: %s' % status)
# the vals key in the trials dict is basically just the params
# dict, but enum variables (hyperopt hp.choice() nodes) are
# different, because the index of the parameter is specified
# in vals, not the parameter itself.
vals = {}
for var in searchspace:
if isinstance(var, EnumVariable):
# get the index in the choices of the parameter, and use
# that.
matches = [
i for i, c in enumerate(var.choices)
if c == params[var.name]
]
assert len(matches) == 1
vals[var.name] = matches
else:
# the other big difference is that all of the param values
# are wrapped in length-1 lists.
vals[var.name] = [params[var.name]]
trials.insert_trial_doc({
'misc': {
'cmd': ('domain_attachment', 'FMinIter_Domain'),
'idxs': dict((k, [i]) for k in hp_searchspace.keys()),
'tid': i,
'vals': vals,
'workdir': None
},
'result': result,
'tid': i,
# bunch of fixed fields that hyperopt seems to require
'owner': None,
'spec': None,
'state': 2,
'book_time': None,
'exp_key': None,
'refresh_time': None,
'version': 0
})
trials.refresh()
chosen_params_container = []
def suggest(*args, **kwargs):
return tpe.suggest(*args,
**kwargs,
gamma=self.gamma,
n_startup_jobs=self.seeds)
def mock_fn(x):
# http://stackoverflow.com/a/3190783/1079728
# to get around no nonlocal keywork in python2
chosen_params_container.append(x)
return 0
fmin(fn=mock_fn,
algo=tpe.suggest,
space=hp_searchspace,
trials=trials,
max_evals=len(trials.trials) + 1,
**self._hyperopt_fmin_random_kwarg(random))
chosen_params = chosen_params_container[0]
return chosen_params |
def _merge_defaults(self, config):
"""The config object loads its values from two sources, with the
following precedence:
1. data/default_config.yaml
2. The config file itself, passed in to this object in the
constructor as `path`.
in case of conflict, the config file dominates.
"""
fn = resource_filename('osprey', join('data', 'default_config.yaml'))
with open(fn) as f:
default = parse(f)
return reduce(dict_merge, [default, config]) |
def fromdict(cls, config, check_fields=True):
"""Create a Config object from config dict directly."""
m = super(Config, cls).__new__(cls)
m.path = '.'
m.verbose = False
m.config = m._merge_defaults(config)
if check_fields:
m._check_fields()
return m |
def get_value(self, field, default=None):
"""Get an entry from within a section, using a '/' delimiter"""
section, key = field.split('/')
return self.get_section(section).get(key, default) |
def estimator(self):
"""Get the estimator, an instance of a (subclass of)
sklearn.base.BaseEstimator
It can be loaded either from a pickle, from a string using eval(),
or from an entry point.
e.g.
estimator:
# only one of the following can actually be active in a given
# config file.
pickle: path-to-pickle-file.pkl
eval: "Pipeline([('cluster': KMeans())])"
entry_point: sklearn.linear_model.LogisticRegression
module: myestimator
"""
module_path = self.get_value('estimator/module')
if module_path is not None:
with prepend_syspath(dirname(abspath(self.path))):
estimator_module = importlib.import_module(module_path)
estimator = estimator_module.estimator()
if not isinstance(estimator, sklearn.base.BaseEstimator):
raise RuntimeError('estimator/pickle must load a '
'sklearn-derived Estimator')
return estimator
evalstring = self.get_value('estimator/eval')
if evalstring is not None:
got = self.get_value('estimator/eval_scope')
if isinstance(got, six.string_types):
got = [got]
elif isinstance(got, list):
pass
else:
raise RuntimeError('unexpected type for estimator/eval_scope')
scope = {}
for pkg_name in got:
if pkg_name in eval_scopes.__all__:
scope.update(getattr(eval_scopes, pkg_name)())
else:
try:
pkg = importlib.import_module(pkg_name)
except ImportError as e:
raise RuntimeError(str(e))
scope.update(eval_scopes.import_all_estimators(pkg))
try:
estimator = eval(evalstring, {}, scope)
if not isinstance(estimator, sklearn.base.BaseEstimator):
raise RuntimeError('estimator/pickle must load a '
'sklearn-derived Estimator')
return estimator
except:
print('-'*78, file=sys.stderr)
print('Error parsing estimator/eval', file=sys.stderr)
print('-'*78, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
print('-'*78, file=sys.stderr)
sys.exit(1)
entry_point = self.get_value('estimator/entry_point')
if entry_point is not None:
estimator = load_entry_point(entry_point, 'estimator/entry_point')
if issubclass(estimator, sklearn.base.BaseEstimator):
estimator = estimator(
**self.get_value('estimator/params', default={}))
if not isinstance(estimator, sklearn.base.BaseEstimator):
raise RuntimeError('estimator/pickle must load a '
'sklearn-derived Estimator')
return estimator
# load estimator from pickle field
pkl = self.get_value('estimator/pickle')
if pkl is not None:
pickl_dir = dirname(abspath(self.path))
path = join(pickl_dir, pkl)
if not isfile(path):
raise RuntimeError('estimator/pickle %s is not a file' % pkl)
with open(path, 'rb') as f:
with prepend_syspath(pickl_dir):
estimator = cPickle.load(f)
if not isinstance(estimator, sklearn.base.BaseEstimator):
raise RuntimeError('estimator/pickle must load a '
'sklearn-derived Estimator')
return estimator
raise RuntimeError('no estimator field') |
def sha1(self):
"""SHA1 hash of the config file itself."""
with open(self.path, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest() |
def get_best_candidate(self):
"""
Returns
----------
best_candidate : the best candidate hyper-parameters as defined by
"""
# TODO make this best mean response
self.incumbent = self.surrogate.Y.max()
# Objective function
def z(x):
# TODO make spread of points around x and take mean value.
x = x.copy().reshape(-1, self.n_dims)
y_mean, y_var = self.surrogate.predict(x)
af = self._acquisition_function(y_mean=y_mean, y_var=y_var)
# TODO make -1 dependent on flag in inputs for either max or minimization
return (-1) * af
# Optimization loop
af_values = []
af_args = []
for i in range(self.n_iter):
init = self._get_random_point()
res = minimize(z, init, bounds=self.n_dims * [(0., 1.)],
options={'maxiter': int(self.max_iter), 'disp': 0})
af_args.append(res.x)
af_values.append(res.fun)
# Choose the best
af_values = np.array(af_values).flatten()
af_args = np.array(af_args)
best_index = int(np.argmin(af_values))
best_candidate = af_args[best_index]
return best_candidate |
def plot_1(data, *args):
"""Plot 1. All iterations (scatter plot)"""
df_all = pd.DataFrame(data)
df_params = nonconstant_parameters(data)
return build_scatter_tooltip(
x=df_all['id'], y=df_all['mean_test_score'], tt=df_params,
title='All Iterations') |
def plot_2(data, *args):
"""Plot 2. Running best score (scatter plot)"""
df_all = pd.DataFrame(data)
df_params = nonconstant_parameters(data)
x = [df_all['id'][0]]
y = [df_all['mean_test_score'][0]]
params = [df_params.loc[0]]
for i in range(len(df_all)):
if df_all['mean_test_score'][i] > y[-1]:
x.append(df_all['id'][i])
y.append(df_all['mean_test_score'][i])
params.append(df_params.loc[i])
return build_scatter_tooltip(
x=x, y=y, tt=pd.DataFrame(params), title='Running best') |
def plot_3(data, ss, *args):
"""t-SNE embedding of the parameters, colored by score
"""
if len(data) <= 1:
warnings.warn("Only one datapoint. Could not compute t-SNE embedding.")
return None
scores = np.array([d['mean_test_score'] for d in data])
# maps each parameters to a vector of floats
warped = np.array([ss.point_to_unit(d['parameters']) for d in data])
# Embed into 2 dimensions with t-SNE
X = TSNE(n_components=2).fit_transform(warped)
e_scores = np.exp(scores)
mine, maxe = np.min(e_scores), np.max(e_scores)
color = (e_scores - mine) / (maxe - mine)
mapped_colors = list(map(rgb2hex, cm.get_cmap('RdBu_r')(color)))
p = bk.figure(title='t-SNE (unsupervised)', tools=TOOLS)
df_params = nonconstant_parameters(data)
df_params['score'] = scores
df_params['x'] = X[:, 0]
df_params['y'] = X[:, 1]
df_params['color'] = mapped_colors
df_params['radius'] = 1
p.circle(
x='x', y='y', color='color', radius='radius',
source=ColumnDataSource(data=df_params), fill_alpha=0.6,
line_color=None)
cp = p
hover = cp.select(dict(type=HoverTool))
format_tt = [(s, '@%s' % s) for s in df_params.columns]
hover.tooltips = OrderedDict([("index", "$index")] + format_tt)
xax, yax = p.axis
xax.axis_label = 't-SNE coord 1'
yax.axis_label = 't-SNE coord 2'
return p |
def plot_4(data, *args):
"""Scatter plot of score vs each param
"""
params = nonconstant_parameters(data)
scores = np.array([d['mean_test_score'] for d in data])
order = np.argsort(scores)
for key in params.keys():
if params[key].dtype == np.dtype('bool'):
params[key] = params[key].astype(np.int)
p_list = []
for key in params.keys():
x = params[key][order]
y = scores[order]
params = params.loc[order]
try:
radius = (np.max(x) - np.min(x)) / 100.0
except:
print("error making plot4 for '%s'" % key)
continue
p_list.append(build_scatter_tooltip(
x=x, y=y, radius=radius, add_line=False, tt=params,
xlabel=key, title='Score vs %s' % key))
return p_list |
def add_jump(self, name, min, max, num, warp=None, var_type=float):
""" An integer/float-valued enumerable with `num` items, bounded
between [`min`, `max`]. Note that the right endpoint of the interval
includes `max`. This is a wrapper around the add_enum. `jump` can be
a float or int.
"""
if not isinstance(var_type, type):
if var_type == 'int':
var_type = int
elif var_type == 'float':
var_type = float
else:
raise ValueError('var_type (%s) is not supported. use '
'"int" or "float",' % (var_type))
min, max = map(var_type, (min, max))
num = int(num)
if not warp:
choices = np.linspace(min, max, num=num, dtype=var_type)
elif (min >= 0) and warp == 'log':
choices = np.logspace(np.log10(min), np.log10(max), num=num,
dtype=var_type)
elif (min <= 0)and warp == 'log':
raise ValueError('variable %s: log-warping requires min > 0')
else:
raise ValueError('variable %s: warp=%s is not supported. use '
'None or "log",' % (name, warp))
self.variables[name] = EnumVariable(name, choices.tolist()) |
def add_int(self, name, min, max, warp=None):
"""An integer-valued dimension bounded between `min` <= x <= `max`.
Note that the right endpoint of the interval includes `max`.
When `warp` is None, the base measure associated with this dimension
is a categorical distribution with each weight on each of the integers
in [min, max]. With `warp == 'log'`, the base measure is a uniform
distribution on the log of the variable, with bounds at `log(min)` and
`log(max)`. This is appropriate for variables that are "naturally" in
log-space. Other `warp` functions are not supported (yet), but may be
at a later time. Please note that this functionality is not supported
for `hyperopt_tpe`.
"""
min, max = map(int, (min, max))
if max < min:
raise ValueError('variable %s: max < min error' % name)
if warp not in (None, 'log'):
raise ValueError('variable %s: warp=%s is not supported. use '
'None or "log",' % (name, warp))
if min <= 0 and warp == 'log':
raise ValueError('variable %s: log-warping requires min > 0')
self.variables[name] = IntVariable(name, min, max, warp) |
def add_float(self, name, min, max, warp=None):
"""A floating point-valued dimension bounded `min` <= x < `max`
When `warp` is None, the base measure associated with this dimension
is a uniform distribution on [min, max). With `warp == 'log'`, the
base measure is a uniform distribution on the log of the variable,
with bounds at `log(min)` and `log(max)`. This is appropriate for
variables that are "naturally" in log-space. Other `warp` functions
are not supported (yet), but may be at a later time.
"""
min, max = map(float, (min, max))
if not min < max:
raise ValueError('variable %s: min >= max error' % name)
if warp not in (None, 'log'):
raise ValueError('variable %s: warp=%s is not supported. use '
'None or "log",' % (name, warp))
if min <= 0 and warp == 'log':
raise ValueError('variable %s: log-warping requires min > 0')
self.variables[name] = FloatVariable(name, min, max, warp) |
def add_enum(self, name, choices):
"""An enumeration-valued dimension.
The base measure associated with this dimension is a categorical
distribution with equal weight on each element in `choices`.
"""
if not isinstance(choices, Iterable):
raise ValueError('variable %s: choices must be iterable' % name)
self.variables[name] = EnumVariable(name, choices) |
def bresenham(x0, y0, x1, y1):
"""Yield integer coordinates on the line from (x0, y0) to (x1, y1).
Input coordinates should be integers.
The result will contain both the start and the end point.
"""
dx = x1 - x0
dy = y1 - y0
xsign = 1 if dx > 0 else -1
ysign = 1 if dy > 0 else -1
dx = abs(dx)
dy = abs(dy)
if dx > dy:
xx, xy, yx, yy = xsign, 0, 0, ysign
else:
dx, dy = dy, dx
xx, xy, yx, yy = 0, ysign, xsign, 0
D = 2*dy - dx
y = 0
for x in range(dx + 1):
yield x0 + x*xx + y*yx, y0 + x*xy + y*yy
if D >= 0:
y += 1
D -= 2*dx
D += 2*dy |
def log_callback(wrapped_function):
"""Decorator that produces DEBUG level log messages before and after
calling a parser method.
If a callback raises an IgnoredMatchException the log will show 'IGNORED'
instead to indicate that the parser will not create any objects from
the matched string.
Example:
DEBUG:poyo.parser:parse_simple <- 123: 456.789
DEBUG:poyo.parser:parse_int <- 123
DEBUG:poyo.parser:parse_int -> 123
DEBUG:poyo.parser:parse_float <- 456.789
DEBUG:poyo.parser:parse_float -> 456.789
DEBUG:poyo.parser:parse_simple -> <Simple name: 123, value: 456.789>
"""
def debug_log(message):
"""Helper to log an escaped version of the given message to DEBUG"""
logger.debug(message.encode('unicode_escape').decode())
@functools.wraps(wrapped_function)
def _wrapper(parser, match, **kwargs):
func_name = wrapped_function.__name__
debug_log(u'{func_name} <- {matched_string}'.format(
func_name=func_name,
matched_string=match.group(),
))
try:
result = wrapped_function(parser, match, **kwargs)
except IgnoredMatchException:
debug_log(u'{func_name} -> IGNORED'.format(func_name=func_name))
raise
debug_log(u'{func_name} -> {result}'.format(
func_name=func_name,
result=result,
))
return result
return _wrapper |
def find_match(self):
"""Try to find a pattern that matches the source and calll a parser
method to create Python objects.
A callback that raises an IgnoredMatchException indicates that the
given string data is ignored by the parser and no objects are created.
If none of the pattern match a NoMatchException is raised.
"""
for pattern, callback in self.rules:
match = pattern.match(self.source, pos=self.pos)
if not match:
continue
try:
node = callback(match)
except IgnoredMatchException:
pass
else:
self.seen.append(node)
return match
raise NoMatchException(
'None of the known patterns match for {}'
''.format(self.source[self.pos:])
) |
def add_child(self, child):
"""If the given object is an instance of Child add it to self and
register self as a parent.
"""
if not isinstance(child, ChildMixin):
raise TypeError(
'Requires instance of TreeElement. '
'Got {}'.format(type(child))
)
child.parent = self
self._children.append(child) |
def get_ip_packet(data, client_port, server_port, is_loopback=False):
""" if client_port is 0 any client_port is good """
header = _loopback if is_loopback else _ethernet
try:
header.unpack(data)
except Exception as ex:
raise ValueError('Bad header: %s' % ex)
tcp_p = getattr(header.data, 'data', None)
if type(tcp_p) != dpkt.tcp.TCP:
raise ValueError('Not a TCP packet')
if tcp_p.dport == server_port:
if client_port != 0 and tcp_p.sport != client_port:
raise ValueError('Request from different client')
elif tcp_p.sport == server_port:
if client_port != 0 and tcp_p.dport != client_port:
raise ValueError('Reply for different client')
else:
raise ValueError('Packet not for/from client/server')
return header.data |
def listening_ports():
""" Reads listening ports from /proc/net/tcp """
ports = []
if not os.path.exists(PROC_TCP):
return ports
with open(PROC_TCP) as fh:
for line in fh:
if '00000000:0000' not in line:
continue
parts = line.lstrip(' ').split(' ')
if parts[2] != '00000000:0000':
continue
local_port = parts[1].split(':')[1]
local_port = int('0x' + local_port, base=16)
ports.append(local_port)
return ports |
def report(self):
""" get stats & show them """
self._output.write('\r')
sort_by = 'avg'
results = {}
for key, latencies in self._latencies_by_method.items():
result = {}
result['count'] = len(latencies)
result['avg'] = sum(latencies) / len(latencies)
result['min'] = min(latencies)
result['max'] = max(latencies)
latencies = sorted(latencies)
result['p90'] = percentile(latencies, 0.90)
result['p95'] = percentile(latencies, 0.95)
result['p99'] = percentile(latencies, 0.99)
result['p999'] = percentile(latencies, 0.999)
results[key] = result
headers = ['method', 'count', 'avg', 'min', 'max', 'p90', 'p95', 'p99', 'p999']
data = []
results = sorted(results.items(), key=lambda it: it[1][sort_by], reverse=True)
def row(key, res):
data = [key] + [res[header] for header in headers[1:]]
return tuple(data)
data = [row(key, result) for key, result in results]
self._output.write('%s\n' % tabulate(data, headers=headers))
self._output.flush() |
def of_structs(cls, a, b):
"""
Diff two thrift structs and return the result as a ThriftDiff instance
"""
t_diff = ThriftDiff(a, b)
t_diff._do_diff()
return t_diff |
def of_messages(cls, msg_a, msg_b):
"""
Diff two thrift messages by comparing their args, raises exceptions if
for some reason the messages can't be diffed. Only args of type 'struct'
are compared.
Returns a list of ThriftDiff results - one for each struct arg
"""
ok_to_diff, reason = cls.can_diff(msg_a, msg_b)
if not ok_to_diff:
raise ValueError(reason)
return [cls.of_structs(x.value, y.value)
for x, y in zip(msg_a.args, msg_b.args)
if x.field_type == 'struct'] |
def can_diff(msg_a, msg_b):
"""
Check if two thrift messages are diff ready.
Returns a tuple of (boolean, reason_string), i.e. (False, reason_string)
if the messages can not be diffed along with the reason and
(True, None) for the opposite case
"""
if msg_a.method != msg_b.method:
return False, 'method name of messages do not match'
if len(msg_a.args) != len(msg_b.args) \
or not msg_a.args.is_isomorphic_to(msg_b.args):
return False, 'argument signature of methods do not match'
return True, None |
def is_isomorphic_to(self, other):
"""
Returns true if all fields of other struct are isomorphic to this
struct's fields
"""
return (isinstance(other, self.__class__)
and
len(self.fields) == len(other.fields)
and
all(a.is_isomorphic_to(b) for a, b in zip(self.fields,
other.fields))) |
def is_isomorphic_to(self, other):
"""
Returns true if other field's meta data (everything except value)
is same as this one
"""
return (isinstance(other, self.__class__)
and self.field_type == other.field_type
and self.field_id == other.field_id) |
def read(cls, data,
protocol=None,
fallback_protocol=TBinaryProtocol,
finagle_thrift=False,
max_fields=MAX_FIELDS,
max_list_size=MAX_LIST_SIZE,
max_map_size=MAX_MAP_SIZE,
max_set_size=MAX_SET_SIZE,
read_values=False):
""" tries to deserialize a message, might fail if data is missing """
# do we have enough data?
if len(data) < cls.MIN_MESSAGE_SIZE:
raise ValueError('not enough data')
if protocol is None:
protocol = cls.detect_protocol(data, fallback_protocol)
trans = TTransport.TMemoryBuffer(data)
proto = protocol(trans)
# finagle-thrift prepends a RequestHeader
#
# See: http://git.io/vsziG
header = None
if finagle_thrift:
try:
header = ThriftStruct.read(
proto,
max_fields,
max_list_size,
max_map_size,
max_set_size,
read_values)
except:
# reset stream, maybe it's not finagle-thrift
trans = TTransport.TMemoryBuffer(data)
proto = protocol(trans)
# unpack the message
method, mtype, seqid = proto.readMessageBegin()
mtype = cls.message_type_to_str(mtype)
if len(method) == 0 or method.isspace() or method.startswith(' '):
raise ValueError('no method name')
if len(method) > cls.MAX_METHOD_LENGTH:
raise ValueError('method name too long')
# we might have made it until this point by mere chance, so filter out
# suspicious method names
valid = range(33, 127)
if any(ord(char) not in valid for char in method):
raise ValueError('invalid method name' % method)
args = ThriftStruct.read(
proto,
max_fields,
max_list_size,
max_map_size,
max_set_size,
read_values)
proto.readMessageEnd()
# Note: this is a bit fragile, the right thing would be to count bytes
# as we read them (i.e.: when calling readI32, etc).
msglen = trans._buffer.tell()
return cls(method, mtype, seqid, args, header, msglen), msglen |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.