text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def validate(self):
"""
validates the feature configuration, and returns a list of errors (empty list if no error)
validate should:
* required variables
* warn on unused variables
errors should either be reported via self._log_error(), or raise an exception
"""
if self.target:
for k in self.target.keys():
if k in self.deprecated_options:
self.logger.warn(
self.deprecated_options[k].format(option=k, feature=self.feature_name))
elif (k not in self.valid_options and k not in self.required_options and
'*' not in self.valid_options):
self.logger.warn("Unused option %s in %s!" % (k, self.feature_name))
for k in self.required_options:
if not self.target.has(k):
self._log_error(
"Required option %s not present in feature %s!" % (k, self.feature_name)) | 0.007828 |
def debug_shell(user_ns, user_global_ns, traceback=None, execWrapper=None):
"""
Spawns some interactive shell. Tries to use IPython if available.
Falls back to :func:`pdb.post_mortem` or :func:`simple_debug_shell`.
:param dict[str] user_ns:
:param dict[str] user_global_ns:
:param traceback:
:param execWrapper:
:return: nothing
"""
ipshell = None
try:
# noinspection PyPackageRequirements
import IPython
have_ipython = True
except ImportError:
have_ipython = False
if not ipshell and traceback and have_ipython:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.core.debugger import Pdb
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.terminal.debugger import TerminalPdb
# noinspection PyPackageRequirements,PyUnresolvedReferences
from IPython.terminal.ipapp import TerminalIPythonApp
ipapp = TerminalIPythonApp.instance()
ipapp.interact = False # Avoid output (banner, prints)
ipapp.initialize(argv=[])
def_colors = ipapp.shell.colors
pdb_obj = TerminalPdb(def_colors)
pdb_obj.botframe = None # not sure. exception otherwise at quit
def ipshell():
"""
Run the IPython shell.
"""
pdb_obj.interaction(None, traceback=traceback)
except Exception:
print("IPython Pdb exception:")
better_exchook(*sys.exc_info(), autodebugshell=False)
if not ipshell and have_ipython:
# noinspection PyBroadException
try:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import IPython
# noinspection PyPackageRequirements,PyUnresolvedReferences
import IPython.terminal.embed
class DummyMod(object):
"""Dummy module"""
module = DummyMod()
module.__dict__ = user_global_ns
module.__name__ = "_DummyMod"
if "__name__" not in user_ns:
user_ns = user_ns.copy()
user_ns["__name__"] = "_DummyUserNsMod"
ipshell = IPython.terminal.embed.InteractiveShellEmbed.instance(
user_ns=user_ns, user_module=module)
except Exception:
print("IPython not available:")
better_exchook(*sys.exc_info(), autodebugshell=False)
else:
if execWrapper:
old = ipshell.run_code
ipshell.run_code = lambda code: execWrapper(lambda: old(code))
if ipshell:
ipshell()
else:
print("Use simple debug shell:")
if traceback:
import pdb
pdb.post_mortem(traceback)
else:
simple_debug_shell(user_global_ns, user_ns) | 0.000336 |
def get_institute_details(institute):
""" Get details for this user. """
result = []
for datastore in _get_datastores():
value = datastore.get_institute_details(institute)
value['datastore'] = datastore.config['DESCRIPTION']
result.append(value)
return result | 0.003344 |
def encrypt_put_item(encrypt_method, crypto_config_method, write_method, **kwargs):
# type: (Callable, Callable, Callable, **Any) -> Dict
# TODO: narrow this down
"""Transparently encrypt an item before putting it to the table.
:param callable encrypt_method: Method to use to encrypt items
:param callable crypto_config_method: Method that accepts ``kwargs`` and provides a :class:`CryptoConfig`
:param callable write_method: Method that writes to the table
:param **kwargs: Keyword arguments to pass to ``write_method``
:return: DynamoDB response
:rtype: dict
"""
crypto_config, ddb_kwargs = crypto_config_method(**kwargs)
ddb_kwargs["Item"] = encrypt_method(
item=ddb_kwargs["Item"],
crypto_config=crypto_config.with_item(_item_transformer(encrypt_method)(ddb_kwargs["Item"])),
)
return write_method(**ddb_kwargs) | 0.004499 |
def find_field_generator_templates(obj):
"""
Return dictionary with the names and instances of
all tohu.BaseGenerator occurring in the given
object's class & instance namespaces.
"""
cls_dict = obj.__class__.__dict__
obj_dict = obj.__dict__
#debug_print_dict(cls_dict, 'cls_dict')
#debug_print_dict(obj_dict, 'obj_dict')
field_gens = {}
update_with_tohu_generators(field_gens, cls_dict)
update_with_tohu_generators(field_gens, obj_dict)
return field_gens | 0.005894 |
def get(self, **params):
'''
Returns details for a specific offer.
.. code-block:: python
amadeus.shopping.hotel_offer('XXX').get
:rtype: amadeus.Response
:raises amadeus.ResponseError: if the request could not be completed
'''
return self.client.get('/v2/shopping/hotel-offers/{0}'
.format(self.offer_id), **params) | 0.004785 |
def generate_tokens(doc, regex=CRE_TOKEN, strip=True, nonwords=False):
r"""Return a sequence of words or tokens, using a re.match iteratively through the str
>>> doc = "John D. Rock\n\nObjective: \n\tSeeking a position as Software --Architect-- / _Project Lead_ that can utilize my expertise and"
>>> doc += " experiences in business application development and proven records in delivering 90's software. \n\nSummary: \n\tSoftware Architect"
>>> doc += " who has gone through several full product-delivery life cycles from requirements gathering to deployment / production, and"
>>> doc += " skilled in all areas of software development from client-side JavaScript to database modeling. With strong experiences in:"
>>> doc += " \n\tRequirements gathering and analysis."
>>> len(list(generate_tokens(doc, strip=False, nonwords=True)))
82
>>> seq = list(generate_tokens(doc, strip=False, nonwords=False))
>>> len(seq)
70
>>> '.' in seq or ':' in seq
False
>>> s = set(generate_tokens(doc, strip=False, nonwords=True))
>>> all(t in s for t in ('D', '.', ':', '_Project', 'Lead_', "90's", "Architect", "product-delivery"))
True
"""
if isinstance(regex, basestring):
regex = re.compile(regex)
for w in regex.finditer(doc):
if w:
w = w.group()
if strip:
w = w.strip(r'-_*`()}{' + r"'")
if w and (nonwords or not re.match(r'^' + RE_NONWORD + '$', w)):
yield w | 0.00462 |
def get(self, key: Any, default: Any = None) -> Any:
"""
读取使用 req like koa
"""
return self._req_cookies.get(key, default) | 0.013072 |
def _extract_level(self, topic_str):
"""Turn 'engine.0.INFO.extra' into (logging.INFO, 'engine.0.extra')"""
topics = topic_str.split('.')
for idx,t in enumerate(topics):
level = getattr(logging, t, None)
if level is not None:
break
if level is None:
level = logging.INFO
else:
topics.pop(idx)
return level, '.'.join(topics) | 0.011038 |
def sum_queryset(qs: QuerySet, key: str= 'amount', default=Decimal(0)) -> Decimal:
"""
Returns aggregate sum of queryset 'amount' field.
:param qs: QuerySet
:param key: Field to sum (default: 'amount')
:param default: Default value if no results
:return: Sum of 'amount' field values (coalesced 0 if None)
"""
res = qs.aggregate(b=Sum(key))['b']
return default if res is None else res | 0.007143 |
def connect(self, index):
"""Connect signals needed for dependency updates."""
# Determine which model is the target model as either side of the relation
# may be passed as `field`.
if index.object_type == self.field.rel.model:
self.model = self.field.rel.related_model
self.accessor = self.field.rel.field.attname
else:
self.model = self.field.rel.model
if self.field.rel.symmetrical:
# Symmetrical m2m relation on self has no reverse accessor.
raise NotImplementedError(
'Dependencies on symmetrical M2M relations are not supported due '
'to strange handling of the m2m_changed signal which only makes '
'half of the relation visible during signal execution. For now you '
'need to use symmetrical=False on the M2M field definition.'
)
else:
self.accessor = self.field.rel.get_accessor_name()
# Connect signals.
signals = super().connect(index)
m2m_signal = ElasticSignal(self, 'process_m2m', pass_kwargs=True)
m2m_signal.connect(m2m_changed, sender=self.field.through)
signals.append(m2m_signal)
# If the relation has a custom through model, we need to subscribe to it.
if not self.field.rel.through._meta.auto_created: # pylint: disable=protected-access
signal = ElasticSignal(self, 'process_m2m_through_save', pass_kwargs=True)
signal.connect(post_save, sender=self.field.rel.through)
signals.append(signal)
signal = ElasticSignal(self, 'process_m2m_through_pre_delete', pass_kwargs=True)
signal.connect(pre_delete, sender=self.field.rel.through)
signals.append(signal)
signal = ElasticSignal(self, 'process_m2m_through_post_delete', pass_kwargs=True)
signal.connect(post_delete, sender=self.field.rel.through)
signals.append(signal)
return signals | 0.00578 |
def add_values(self, values):
'''add some data to the graph'''
if self.child.is_alive():
self.parent_pipe.send(values) | 0.013699 |
def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True,
tol=0.1, **kwargs):
"""
Another constructor that creates an instance from a molecule.
Covalent bonds and other bond-based topologies (angles and
dihedrals) can be automatically determined. Cannot be used for
non bond-based topologies, e.g., improper dihedrals.
Args:
molecule (Molecule): Input molecule.
bond (bool): Whether find bonds. If set to False, angle and
dihedral searching will be skipped. Default to True.
angle (bool): Whether find angles. Default to True.
dihedral (bool): Whether find dihedrals. Default to True.
tol (float): Bond distance tolerance. Default to 0.1.
Not recommended to alter.
**kwargs: Other kwargs supported by Topology.
"""
real_bonds = molecule.get_covalent_bonds(tol=tol)
bond_list = [list(map(molecule.index, [b.site1, b.site2]))
for b in real_bonds]
if not all((bond, bond_list)):
# do not search for others if not searching for bonds or no bonds
return cls(sites=molecule, **kwargs)
else:
angle_list, dihedral_list = [], []
dests, freq = np.unique(bond_list, return_counts=True)
hubs = dests[np.where(freq > 1)].tolist()
bond_arr = np.array(bond_list)
if len(hubs) > 0:
hub_spokes = {}
for hub in hubs:
ix = np.any(np.isin(bond_arr, hub), axis=1)
bonds = np.unique(bond_arr[ix]).tolist()
bonds.remove(hub)
hub_spokes[hub] = bonds
# skip angle or dihedral searching if too few bonds or hubs
dihedral = False if len(bond_list) < 3 or len(hubs) < 2 \
else dihedral
angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle
if angle:
for k, v in hub_spokes.items():
angle_list.extend([[i, k, j] for i, j in
itertools.combinations(v, 2)])
if dihedral:
hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)]
for i, j in hub_cons.tolist():
ks = [k for k in hub_spokes[i] if k != j]
ls = [l for l in hub_spokes[j] if l != i]
dihedral_list.extend([[k, i, j, l] for k, l in
itertools.product(ks, ls)
if k != l])
topologies = {k: v for k, v
in zip(SECTION_KEYWORDS["topology"][:3],
[bond_list, angle_list, dihedral_list])
if len(v) > 0}
topologies = None if len(topologies) == 0 else topologies
return cls(sites=molecule, topologies=topologies, **kwargs) | 0.001642 |
def _sample_coef(self, X, y, weights=None, n_draws=100, n_bootstraps=1,
objective='auto'):
"""Simulate from the posterior of the coefficients.
NOTE: A `gridsearch` is done `n_bootstraps` many times, so keep
`n_bootstraps` small. Make `n_bootstraps < n_draws` to take advantage
of the expensive bootstrap samples of the smoothing parameters.
Parameters
-----------
X : array of shape (n_samples, m_features)
input data
y : array of shape (n_samples,)
response vector
weights : np.array of shape (n_samples,)
sample weights
n_draws : positive int, optional (default=100
The number of samples to draw from the posterior distribution of
the coefficients and smoothing parameters
n_bootstraps : positive int, optional (default=1
The number of bootstrap samples to draw from simulations of the
response (from the already fitted model) to estimate the
distribution of the smoothing parameters given the response data.
If `n_bootstraps` is 1, then only the already fitted model's
smoothing parameters is used.
objective : string, optional (default='auto'
metric to optimize in grid search. must be in
['AIC', 'AICc', 'GCV', 'UBRE', 'auto']
if 'auto', then grid search will optimize GCV for models with
unknown scale and UBRE for models with known scale.
Returns
-------
coef_samples : array of shape (n_draws, n_samples)
Approximate simulations of the coefficients drawn from the
posterior distribution of the coefficients and smoothing
parameters given the response data
References
----------
Simon N. Wood, 2006. Generalized Additive Models: an introduction with
R. Section 4.9.3 (pages 198–199) and Section 5.4.2 (page 256–257).
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
if n_bootstraps < 1:
raise ValueError('n_bootstraps must be >= 1;'
' got {}'.format(n_bootstraps))
if n_draws < 1:
raise ValueError('n_draws must be >= 1;'
' got {}'.format(n_draws))
coef_bootstraps, cov_bootstraps = (
self._bootstrap_samples_of_smoothing(X, y, weights=weights,
n_bootstraps=n_bootstraps,
objective=objective))
coef_draws = self._simulate_coef_from_bootstraps(
n_draws, coef_bootstraps, cov_bootstraps)
return coef_draws | 0.001065 |
def postinit(self, func=None, args=None, keywords=None):
"""Do some setup after initialisation.
:param func: What is being called.
:type func: NodeNG or None
:param args: The positional arguments being given to the call.
:type args: list(NodeNG) or None
:param keywords: The keyword arguments being given to the call.
:type keywords: list(NodeNG) or None
"""
self.func = func
self.args = args
self.keywords = keywords | 0.003937 |
def draggable(self, value: Union[bool, str]) -> None:
"""Set ``draggable`` property.
``value`` is boolean or string.
"""
if value is False:
self.removeAttribute('draggable')
else:
self.setAttribute('draggable', value) | 0.007092 |
def tabstr2list(data):
"""tabstr2list"""
alist = data.split(os.linesep)
blist = alist[1].split('\t')
clist = []
for num in range(0, len(alist)):
ilist = alist[num].split('\t')
clist = clist+[ilist]
cclist = clist[:-1]
#the last element is turning out to be empty
#this is because the string ends with a os.linesep
return cclist | 0.018229 |
def generate(self, **kwargs):
"""
Generate some text from the database. By default only 70 words are
generated, but you can change this using keyword arguments.
Keyword arguments:
- ``wlen``: maximum length (words)
- ``words``: a list of words to use to begin the text with
"""
words = list(map(self._sanitize, kwargs.get('words', [])))
max_wlen = kwargs.get('wlen', 70)
wlen = len(words)
if wlen < 2:
if not self._db:
return ''
if wlen == 0:
words = sample(self._db.keys(), 1)[0].split(self._WSEP)
elif wlen == 1:
spl = [k for k in self._db.keys()
if k.startswith(words[0]+self._WSEP)]
words.append(sample(spl, 1)[0].split(self._WSEP)[1])
wlen = 2
while wlen < max_wlen:
next_word = self._get(words[-2], words[-1])
if next_word is None:
break
words.append(next_word)
wlen += 1
return ' '.join(words) | 0.001786 |
def assert_succeeds(exception, msg_fmt="{msg}"):
"""Fail if a specific exception is raised within the context.
This assertion should be used for cases, where successfully running a
function signals a successful test, and raising the exception of a
certain type signals a test failure. All other raised exceptions are
passed on and will usually still result in a test error. This can be
used to signal the intent of a block.
>>> l = ["foo", "bar"]
>>> with assert_succeeds(ValueError):
... i = l.index("foo")
...
>>> with assert_succeeds(ValueError):
... raise ValueError()
...
Traceback (most recent call last):
...
AssertionError: ValueError was unexpectedly raised
>>> with assert_succeeds(ValueError):
... raise TypeError("Wrong Error")
...
Traceback (most recent call last):
...
TypeError: Wrong Error
The following msg_fmt arguments are supported:
* msg - the default error message
* exc_type - exception type
* exc_name - exception type name
* exception - exception that was raised
"""
class _AssertSucceeds(object):
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type and issubclass(exc_type, exception):
msg = exception.__name__ + " was unexpectedly raised"
fail(
msg_fmt.format(
msg=msg,
exc_type=exception,
exc_name=exception.__name__,
exception=exc_val,
)
)
return _AssertSucceeds() | 0.00059 |
def dir(self, filetype, **kwargs):
"""Return the directory containing a file of a given type.
Parameters
----------
filetype : str
File type parameter.
Returns
-------
dir : str
Directory containing the file.
"""
full = kwargs.get('full', None)
if not full:
full = self.full(filetype, **kwargs)
return os.path.dirname(full) | 0.004435 |
def add_or_update(data, item, value):
"""
Add or update value in configuration file format used by proftpd.
Args:
data (str): Configuration file as string.
item (str): What option will be added/updated.
value (str): Value of option.
Returns:
str: updated configuration
"""
data = data.splitlines()
# to list of bytearrays (this is useful, because their reference passed to
# other functions can be changed, and it will change objects in arrays
# unlike strings)
data = map(lambda x: bytearray(x), data)
# search for the item in raw (ucommented) values
conf = filter(lambda x: x.strip() and x.strip().split()[0] == item, data)
if conf:
conf[0][:] = conf[0].strip().split()[0] + " " + value
else:
# search for the item in commented values, if found, uncomment it
comments = filter(
lambda x: x.strip().startswith("#")
and len(x.split("#")) >= 2
and x.split("#")[1].split()
and x.split("#")[1].split()[0] == item,
data
)
if comments:
comments[0][:] = comments[0].split("#")[1].split()[0] + " " + value
else:
# add item, if not found in raw/commented values
data.append(item + " " + value + "\n")
return "\n".join(map(lambda x: str(x), data)) | 0.002835 |
def _get_siblings(self, pos):
"""lists the parent directory of pos """
parent = self.parent_position(pos)
siblings = [pos]
if parent is not None:
siblings = self._list_dir(parent)
return siblings | 0.008097 |
def hydrate_size(data, force=False):
"""Add file and dir sizes.
Add sizes to ``basic:file:``, ``list:basic:file``, ``basic:dir:``
and ``list:basic:dir:`` fields.
``force`` parameter is used to recompute file sizes also on objects
that already have these values, e.g. in migrations.
"""
from .data import Data # prevent circular import
def get_dir_size(path):
"""Get directory size."""
total_size = 0
for dirpath, _, filenames in os.walk(path):
for file_name in filenames:
file_path = os.path.join(dirpath, file_name)
if not os.path.isfile(file_path): # Skip all "not normal" files (links, ...)
continue
total_size += os.path.getsize(file_path)
return total_size
def get_refs_size(obj, obj_path):
"""Calculate size of all references of ``obj``.
:param dict obj: Data object's output field (of type file/dir).
:param str obj_path: Path to ``obj``.
"""
total_size = 0
for ref in obj.get('refs', []):
ref_path = data.location.get_path(filename=ref)
if ref_path in obj_path:
# It is a common case that ``obj['file']`` is also contained in
# one of obj['ref']. In that case, we need to make sure that it's
# size is not counted twice:
continue
if os.path.isfile(ref_path):
total_size += os.path.getsize(ref_path)
elif os.path.isdir(ref_path):
total_size += get_dir_size(ref_path)
return total_size
def add_file_size(obj):
"""Add file size to the basic:file field."""
if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj and not force:
return
path = data.location.get_path(filename=obj['file'])
if not os.path.isfile(path):
raise ValidationError("Referenced file does not exist ({})".format(path))
obj['size'] = os.path.getsize(path)
obj['total_size'] = obj['size'] + get_refs_size(obj, path)
def add_dir_size(obj):
"""Add directory size to the basic:dir field."""
if data.status in [Data.STATUS_DONE, Data.STATUS_ERROR] and 'size' in obj and not force:
return
path = data.location.get_path(filename=obj['dir'])
if not os.path.isdir(path):
raise ValidationError("Referenced dir does not exist ({})".format(path))
obj['size'] = get_dir_size(path)
obj['total_size'] = obj['size'] + get_refs_size(obj, path)
data_size = 0
for field_schema, fields in iterate_fields(data.output, data.process.output_schema):
name = field_schema['name']
value = fields[name]
if 'type' in field_schema:
if field_schema['type'].startswith('basic:file:'):
add_file_size(value)
data_size += value.get('total_size', 0)
elif field_schema['type'].startswith('list:basic:file:'):
for obj in value:
add_file_size(obj)
data_size += obj.get('total_size', 0)
elif field_schema['type'].startswith('basic:dir:'):
add_dir_size(value)
data_size += value.get('total_size', 0)
elif field_schema['type'].startswith('list:basic:dir:'):
for obj in value:
add_dir_size(obj)
data_size += obj.get('total_size', 0)
data.size = data_size | 0.002235 |
def resolve_theme(self, name):
"""
From given theme name, return theme file path from
``settings.CODEMIRROR_THEMES`` map.
Arguments:
name (string): Theme name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_THEMES``.
Returns:
string: Theme file path.
"""
if name not in settings.CODEMIRROR_THEMES:
msg = ("Given theme name '{}' does not exists in "
"'settings.CODEMIRROR_THEMES'.")
raise UnknowThemeError(msg.format(name))
return settings.CODEMIRROR_THEMES.get(name) | 0.003012 |
def firmware_image_create(self, datafile, name, **kwargs): # noqa: E501
"""Create an image # noqa: E501
Create a firmware image. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.firmware_image_create(datafile, name, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param file datafile: The firmware image file to upload (required)
:param str name: The name of the firmware image (required)
:param str description: The description of the firmware image
:return: FirmwareImage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.firmware_image_create_with_http_info(datafile, name, **kwargs) # noqa: E501
else:
(data) = self.firmware_image_create_with_http_info(datafile, name, **kwargs) # noqa: E501
return data | 0.001757 |
def stream_agent_show(self, metric_key, department_id=None, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/apis#get-single-agent-status"
api_path = "/stream/agents/{metric_key}"
api_path = api_path.format(metric_key=metric_key)
api_query = {}
if "query" in kwargs.keys():
api_query.update(kwargs["query"])
del kwargs["query"]
if department_id:
api_query.update({
"department_id": department_id,
})
return self.call(api_path, query=api_query, **kwargs) | 0.005128 |
def get_table(self, table_name, primary_id=None, primary_type=None):
"""Load or create a table.
This is now the same as ``create_table``.
::
table = db.get_table('population')
# you can also use the short-hand syntax:
table = db['population']
"""
return self.create_table(table_name, primary_id, primary_type) | 0.005168 |
def compute_eig(self):
"""
Compute the three eigenvalues of the tensor: eig1, eig2, ei3.
"""
self.eig1 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eig2 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
self.eig3 = _SHGrid.from_array(_np.zeros_like(self.vxx.data),
grid='DH')
for i in range(self.nlat):
for j in range(self.nlon):
a = _np.array([[self.vxx.data[i, j],
self.vxy.data[i, j],
self.vxz.data[i, j]],
[self.vyx.data[i, j],
self.vyy.data[i, j],
self.vyz.data[i, j]],
[self.vzx.data[i, j],
self.vzy.data[i, j],
self.vzz.data[i, j]]])
eigs = _eigvalsh(a)
self.eig1.data[i, j] = eigs[2]
self.eig2.data[i, j] = eigs[1]
self.eig3.data[i, j] = eigs[0] | 0.001652 |
def _find_mapreduce_yaml(start, checked):
"""Traverse the directory tree identified by start until a directory already
in checked is encountered or the path of mapreduce.yaml is found.
Checked is present both to make loop termination easy to reason about and so
that the same directories do not get rechecked.
Args:
start: the path to start in and work upward from
checked: the set of already examined directories
Returns:
the path of mapreduce.yaml file or None if not found.
"""
dir = start
while dir not in checked:
checked.add(dir)
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
dir = os.path.dirname(dir)
return None | 0.009126 |
def _verify(self, path_prefix=None):
"""Verifies that this schema's doc spec is valid and makes sense."""
for field, spec in self.doc_spec.iteritems():
path = self._append_path(path_prefix, field)
# Standard dict-based spec
if isinstance(spec, dict):
self._verify_field_spec(spec, path)
else:
raise SchemaFormatException("Invalid field definition for {}", path) | 0.00655 |
def _normalize_operation(operation):
"""Normalizes the given operation string. For now, this simply means
converting the given string to uppercase, looking it up in
:attr:`~.ops_map`, and returning the corresponding class if
present.
Args:
operation (str): The operation string to convert.
Returns:
The class corresponding to the given string,
:class:`~.CreateOperation` or :class:`~TransferOperation`.
.. important:: If the :meth:`str.upper` step, or the
:attr:`~.ops_map` lookup fails, the given ``operation``
argument is returned.
"""
try:
operation = operation.upper()
except AttributeError:
pass
try:
operation = ops_map[operation]()
except KeyError:
pass
return operation | 0.001221 |
def shorter_name(key):
"""Return a shorter name for an id.
Does this by only taking the last part of the URI,
after the last / and the last #. Also replaces - and . with _.
Parameters
----------
key: str
Some URI
Returns
-------
key_short: str
A shortened, but more ambiguous, identifier
"""
key_short = key
for sep in ['#', '/']:
ind = key_short.rfind(sep)
if ind is not None:
key_short = key_short[ind+1:]
else:
key_short = key_short
return key_short.replace('-', '_').replace('.', '_') | 0.001647 |
def _ssh_build_mic(self, session_id, username, service, auth_method):
"""
Create the SSH2 MIC filed for gssapi-with-mic.
:param str session_id: The SSH session ID
:param str username: The name of the user who attempts to login
:param str service: The requested SSH service
:param str auth_method: The requested SSH authentication mechanism
:return: The MIC as defined in RFC 4462. The contents of the
MIC field are:
string session_identifier,
byte SSH_MSG_USERAUTH_REQUEST,
string user-name,
string service (ssh-connection),
string authentication-method
(gssapi-with-mic or gssapi-keyex)
"""
mic = self._make_uint32(len(session_id))
mic += session_id
mic += struct.pack("B", MSG_USERAUTH_REQUEST)
mic += self._make_uint32(len(username))
mic += username.encode()
mic += self._make_uint32(len(service))
mic += service.encode()
mic += self._make_uint32(len(auth_method))
mic += auth_method.encode()
return mic | 0.001671 |
def join(self, table, one=None,
operator=None, two=None, type='inner', where=False):
"""
Add a join clause to the query
:param table: The table to join with, can also be a JoinClause instance
:type table: str or JoinClause
:param one: The first column of the join condition
:type one: str
:param operator: The operator of the join condition
:type operator: str
:param two: The second column of the join condition
:type two: str
:param type: The join type
:type type: str
:param where: Whether to use a "where" rather than a "on"
:type where: bool
:return: The current QueryBuilder instance
:rtype: QueryBuilder
"""
if isinstance(table, JoinClause):
self.joins.append(table)
else:
if one is None:
raise ArgumentError('Missing "one" argument')
join = JoinClause(table, type)
self.joins.append(join.on(
one, operator, two, 'and', where
))
return self | 0.002674 |
def csoftmax(tensor, inv_cumulative_att):
""" It is a implementation of the constrained softmax (csoftmax).
Based on the paper:
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers"
Args:
tensor: A tensorflow tensor is score. This tensor have dimensionality [None, n_tokens]
inv_cumulative_att: A inverse cumulative attention tensor with dimensionality [None, n_tokens]
Returns:
cs: Tensor at the output with dimensionality [None, n_tokens]
"""
shape_ten = tensor.shape
shape_cum = inv_cumulative_att.shape
merge_tensor = [tensor, inv_cumulative_att]
cs, _ = tf.map_fn(csoftmax_for_slice, merge_tensor, dtype=[tf.float32, tf.float32]) # [bs, L]
return cs | 0.006203 |
def mainloop(self):
""" The main loop.
"""
if not self.args:
self.parser.error("No metafiles given, nothing to do!")
if 1 < sum(bool(i) for i in (self.options.no_ssl, self.options.reannounce, self.options.reannounce_all)):
self.parser.error("Conflicting options --no-ssl, --reannounce and --reannounce-all!")
# Set filter criteria for metafiles
filter_url_prefix = None
if self.options.reannounce:
# <scheme>://<netloc>/<path>?<query>
filter_url_prefix = urlparse.urlsplit(self.options.reannounce, allow_fragments=False)
filter_url_prefix = urlparse.urlunsplit((
filter_url_prefix.scheme, filter_url_prefix.netloc, '/', '', '' # bogus pylint: disable=E1103
))
self.LOG.info("Filtering for metafiles with announce URL prefix %r..." % filter_url_prefix)
if self.options.reannounce_all:
self.options.reannounce = self.options.reannounce_all
else:
# When changing the announce URL w/o changing the domain, don't change the info hash!
self.options.no_cross_seed = True
# Resolve tracker alias, if URL doesn't look like an URL
if self.options.reannounce and not urlparse.urlparse(self.options.reannounce).scheme:
tracker_alias, idx = self.options.reannounce, "0"
if '.' in tracker_alias:
tracker_alias, idx = tracker_alias.split('.', 1)
try:
idx = int(idx, 10)
_, tracker_url = config.lookup_announce_alias(tracker_alias)
self.options.reannounce = tracker_url[idx]
except (KeyError, IndexError, TypeError, ValueError) as exc:
raise error.UserError("Unknown tracker alias or bogus URL %r (%s)!" % (
self.options.reannounce, exc))
# go through given files
bad = 0
changed = 0
for filename in self.args:
try:
# Read and remember current content
metainfo = bencode.bread(filename)
old_metainfo = bencode.bencode(metainfo)
except (EnvironmentError, KeyError, bencode.BencodeError) as exc:
self.LOG.warning("Skipping bad metafile %r (%s: %s)" % (filename, type(exc).__name__, exc))
bad += 1
else:
# Check metafile integrity
try:
metafile.check_meta(metainfo)
except ValueError as exc:
self.LOG.warn("Metafile %r failed integrity check: %s" % (filename, exc,))
if not self.options.no_skip:
continue
# Skip any metafiles that don't meet the pre-conditions
if filter_url_prefix and not metainfo['announce'].startswith(filter_url_prefix):
self.LOG.warn("Skipping metafile %r no tracked by %r!" % (filename, filter_url_prefix,))
continue
# Keep resume info safe
libtorrent_resume = {}
if "libtorrent_resume" in metainfo:
try:
libtorrent_resume["bitfield"] = metainfo["libtorrent_resume"]["bitfield"]
except KeyError:
pass # nothing to remember
libtorrent_resume["files"] = copy.deepcopy(metainfo["libtorrent_resume"]["files"])
# Change private flag?
if self.options.make_private and not metainfo["info"].get("private", 0):
self.LOG.info("Setting private flag...")
metainfo["info"]["private"] = 1
if self.options.make_public and metainfo["info"].get("private", 0):
self.LOG.info("Clearing private flag...")
del metainfo["info"]["private"]
# Remove non-standard keys?
if self.options.clean or self.options.clean_all or self.options.clean_xseed:
metafile.clean_meta(metainfo, including_info=not self.options.clean, logger=self.LOG.info)
# Restore resume info?
if self.options.clean_xseed:
if libtorrent_resume:
self.LOG.info("Restoring key 'libtorrent_resume'...")
metainfo.setdefault("libtorrent_resume", {})
metainfo["libtorrent_resume"].update(libtorrent_resume)
else:
self.LOG.warn("No resume information found!")
# Clean rTorrent data?
if self.options.clean_rtorrent:
for key in self.RT_RESUMT_KEYS:
if key in metainfo:
self.LOG.info("Removing key %r..." % (key,))
del metainfo[key]
# Change announce URL?
if self.options.reannounce:
metainfo['announce'] = self.options.reannounce
if "announce-list" in metainfo:
del metainfo["announce-list"]
if not self.options.no_cross_seed:
# Enforce unique hash per tracker
metainfo["info"]["x_cross_seed"] = hashlib.md5(self.options.reannounce).hexdigest()
if self.options.no_ssl:
# We're assuming here the same (default) port is used
metainfo['announce'] = (metainfo['announce']
.replace("https://", "http://").replace(":443/", ":80/"))
# Change comment or creation date?
if self.options.comment is not None:
if self.options.comment:
metainfo["comment"] = self.options.comment
elif "comment" in metainfo:
del metainfo["comment"]
if self.options.bump_date:
metainfo["creation date"] = int(time.time())
if self.options.no_date and "creation date" in metainfo:
del metainfo["creation date"]
# Add fast-resume data?
if self.options.hashed:
try:
metafile.add_fast_resume(metainfo, self.options.hashed.replace("{}", metainfo["info"]["name"]))
except EnvironmentError as exc:
self.fatal("Error making fast-resume data (%s)" % (exc,))
raise
# Set specific keys?
metafile.assign_fields(metainfo, self.options.set)
replace_fields(metainfo, self.options.regex)
# Write new metafile, if changed
new_metainfo = bencode.bencode(metainfo)
if new_metainfo != old_metainfo:
if self.options.output_directory:
filename = os.path.join(self.options.output_directory, os.path.basename(filename))
self.LOG.info("Writing %r..." % filename)
if not self.options.dry_run:
bencode.bwrite(filename, metainfo)
if "libtorrent_resume" in metainfo:
# Also write clean version
filename = filename.replace(".torrent", "-no-resume.torrent")
del metainfo["libtorrent_resume"]
self.LOG.info("Writing %r..." % filename)
bencode.bwrite(filename, metainfo)
else:
self.LOG.info("Changing %r..." % filename)
if not self.options.dry_run:
# Write to temporary file
tempname = os.path.join(
os.path.dirname(filename),
'.' + os.path.basename(filename),
)
self.LOG.debug("Writing %r..." % tempname)
bencode.bwrite(tempname, metainfo)
# Replace existing file
if os.name != "posix":
# cannot rename to existing target on WIN32
os.remove(filename)
try:
os.rename(tempname, filename)
except EnvironmentError as exc:
# TODO: Try to write directly, keeping a backup!
raise error.LoggableError("Can't rename tempfile %r to %r (%s)" % (
tempname, filename, exc
))
changed += 1
# Print summary
if changed:
self.LOG.info("%s %d metafile(s)." % (
"Would've changed" if self.options.dry_run else "Changed", changed
))
if bad:
self.LOG.warn("Skipped %d bad metafile(s)!" % (bad)) | 0.003471 |
def build_ml_phyml(alignment, outfile, work_dir=".", **kwargs):
"""
build maximum likelihood tree of DNA seqs with PhyML
"""
phy_file = op.join(work_dir, "work", "aln.phy")
AlignIO.write(alignment, file(phy_file, "w"), "phylip-relaxed")
phyml_cl = PhymlCommandline(cmd=PHYML_BIN("phyml"), input=phy_file, **kwargs)
logging.debug("Building ML tree using PhyML: %s" % phyml_cl)
stdout, stderr = phyml_cl()
tree_file = phy_file + "_phyml_tree.txt"
if not op.exists(tree_file):
print("***PhyML failed.", file=sys.stderr)
return None
sh("cp {0} {1}".format(tree_file, outfile), log=False)
logging.debug("ML tree printed to %s" % outfile)
return outfile, phy_file | 0.002743 |
def updater_loop(self):
""" Main loop that should run in the background. """
self.updater_running = True
while (self.updater_running):
self.send_updates()
sleep(self.updateinterval) | 0.008734 |
def create_vcard3_str(name, surname, displayname, email='', org='', title='', url='', note=''):
""" Create a vCard3.0 string with the given parameters.
Reference: http://www.evenx.com/vcard-3-0-format-specification
"""
vcard = []
vcard += ['BEGIN:VCARD']
vcard += ['VERSION:3.0']
if name and surname:
name = name.strip()
vcard += ['N:{};{};;;'.format(name, surname)]
if not displayname:
displayname = '{} {}'.format(name, surname)
vcard += ['FN:{}'.format(displayname)]
if email:
vcard += ['EMAIL:{}'.format(email)]
if org:
vcard += ['ORG:{}'.format(org)]
if title:
vcard += ['TITLE:{}'.format(title)]
if url:
vcard += ['URL:{}'.format(url)]
if note:
vcard += ['NOTE:{}'.format(note)]
vcard += ['END:VCARD']
return '\n'.join([field.strip() for field in vcard]) | 0.002227 |
def matrix_to_images(data_matrix, mask):
"""
Unmasks rows of a matrix and writes as images
ANTsR function: `matrixToImages`
Arguments
---------
data_matrix : numpy.ndarray
each row corresponds to an image
array should have number of columns equal to non-zero voxels in the mask
mask : ANTsImage
image containing a binary mask. Rows of the matrix are
unmasked and written as images. The mask defines the output image space
Returns
-------
list of ANTsImage types
"""
if data_matrix.ndim > 2:
data_matrix = data_matrix.reshape(data_matrix.shape[0], -1)
numimages = len(data_matrix)
numVoxelsInMatrix = data_matrix.shape[1]
numVoxelsInMask = (mask >= 0.5).sum()
if numVoxelsInMask != numVoxelsInMatrix:
raise ValueError('Num masked voxels %i must match data matrix %i' % (numVoxelsInMask, numVoxelsInMatrix))
imagelist = []
for i in range(numimages):
img = mask.clone()
img[mask >= 0.5] = data_matrix[i,:]
imagelist.append(img)
return imagelist | 0.00365 |
def set_palette_name(self, palette_name):
"""If the given palette matches an existing one, shows it in the
combobox
"""
combo = self.get_widget('palette_name')
found = False
log.debug("wanting palette: %r", palette_name)
for i in combo.get_model():
if i[0] == palette_name:
combo.set_active_iter(i.iter)
found = True
break
if not found:
combo.set_active(self.custom_palette_index) | 0.003876 |
def resolve_context(self, verbosity=0, max_fails=-1, timestamp=None,
callback=None, buf=None, package_load_callback=None):
"""Update the current context by performing a re-resolve.
The newly resolved context is only applied if it is a successful solve.
Returns:
`ResolvedContext` object, which may be a successful or failed solve.
"""
package_filter = PackageFilterList.from_pod(self.package_filter)
context = ResolvedContext(
self.request,
package_paths=self.packages_path,
package_filter=package_filter,
verbosity=verbosity,
max_fails=max_fails,
timestamp=timestamp,
buf=buf,
callback=callback,
package_load_callback=package_load_callback,
caching=self.caching)
if context.success:
if self._context and self._context.load_path:
context.set_load_path(self._context.load_path)
self._set_context(context)
self._modified = True
return context | 0.003584 |
def _build_query_params(self, headers_only=False, page_size=None):
"""Return key-value pairs for the list_time_series API call.
:type headers_only: bool
:param headers_only:
Whether to omit the point data from the
:class:`~google.cloud.monitoring_v3.types.TimeSeries` objects.
:type page_size: int
:param page_size:
(Optional) The maximum number of points in each page of results
from this request. Non-positive values are ignored. Defaults
to a sensible value set by the API.
"""
params = {"name": self._project_path, "filter_": self.filter}
params["interval"] = types.TimeInterval()
params["interval"].end_time.FromDatetime(self._end_time)
if self._start_time:
params["interval"].start_time.FromDatetime(self._start_time)
if (
self._per_series_aligner
or self._alignment_period_seconds
or self._cross_series_reducer
or self._group_by_fields
):
params["aggregation"] = types.Aggregation(
per_series_aligner=self._per_series_aligner,
cross_series_reducer=self._cross_series_reducer,
group_by_fields=self._group_by_fields,
alignment_period={"seconds": self._alignment_period_seconds},
)
if headers_only:
params["view"] = enums.ListTimeSeriesRequest.TimeSeriesView.HEADERS
else:
params["view"] = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
if page_size is not None:
params["page_size"] = page_size
return params | 0.001181 |
def _create_cont_table(self,data):
"""
Create tableone for continuous data.
Returns
----------
table : pandas DataFrame
A table summarising the continuous variables.
"""
# remove the t1_summary level
table = self.cont_describe[['t1_summary']].copy()
table.columns = table.columns.droplevel(level=0)
# add a column of null counts as 1-count() from previous function
nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull')
try:
table = table.join(nulltable)
except TypeError: # if columns form a CategoricalIndex, need to convert to string first
table.columns = table.columns.astype(str)
table = table.join(nulltable)
# add an empty level column, for joining with cat table
table['level'] = ''
table.set_index([table.index,'level'],inplace=True)
# add pval column
if self._pval and self._pval_adjust:
table = table.join(self._significance_table[['pval (adjusted)','ptest']])
elif self._pval:
table = table.join(self._significance_table[['pval','ptest']])
return table | 0.009016 |
def get_module_path(modname):
"""Return module *modname* base path"""
return osp.abspath(osp.dirname(sys.modules[modname].__file__)) | 0.007042 |
def in_resource(cls, session_type):
"""Returns True if the attribute is part of a given session type.
The session_type is a tuple with the interface type and resource_class
:type session_type: (constants.InterfaceType, str)
:rtype: bool
"""
if cls.resources is AllSessionTypes:
return True
return session_type in cls.resources | 0.005051 |
def check_connections(self):
"""Check connection between OpenStack to Nexus device."""
switch_connections = self._mdriver.get_all_switch_ips()
for switch_ip in switch_connections:
state = self._mdriver.get_switch_ip_and_active_state(switch_ip)
config_failure = self._mdriver.get_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
contact_failure = self._mdriver.get_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
LOG.debug("check_connections() thread %(thid)d, switch "
"%(switch_ip)s state %(state)s "
"contact_failure %(contact_failure)d "
"config_failure %(config_failure)d ",
{'thid': threading.current_thread().ident,
'switch_ip': switch_ip, 'state': state,
'contact_failure': contact_failure,
'config_failure': config_failure})
try:
# Send a simple get nexus type to determine if
# the switch is up
nexus_type = self._driver.get_nexus_type(switch_ip)
except Exception:
if state != const.SWITCH_INACTIVE:
LOG.error("Lost connection to switch ip "
"%(switch_ip)s", {'switch_ip': switch_ip})
self._mdriver.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_INACTIVE)
else:
self._mdriver.incr_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
else:
if state == const.SWITCH_RESTORE_S2:
try:
self._mdriver.configure_next_batch_of_vlans(switch_ip)
except Exception as e:
LOG.error("Unexpected exception while replaying "
"entries for switch %(switch_ip)s, "
"Reason:%(reason)s ",
{'switch_ip': switch_ip, 'reason': e})
self._mdriver.register_switch_as_inactive(
switch_ip, 'replay next_vlan_batch')
continue
if state == const.SWITCH_INACTIVE:
self._configure_nexus_type(switch_ip, nexus_type)
LOG.info("Re-established connection to switch "
"ip %(switch_ip)s",
{'switch_ip': switch_ip})
self._mdriver.set_switch_ip_and_active_state(
switch_ip, const.SWITCH_RESTORE_S1)
self.replay_config(switch_ip)
# If replay failed, it stops trying to configure db entries
# and sets switch state to inactive so this caller knows
# it failed. If it did fail, we increment the
# retry counter else reset it to 0.
if self._mdriver.get_switch_ip_and_active_state(
switch_ip) == const.SWITCH_INACTIVE:
self._mdriver.incr_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
LOG.warning("Replay config failed for "
"ip %(switch_ip)s",
{'switch_ip': switch_ip})
else:
self._mdriver.reset_switch_replay_failure(
const.FAIL_CONFIG, switch_ip)
self._mdriver.reset_switch_replay_failure(
const.FAIL_CONTACT, switch_ip)
LOG.info("Replay config successful for "
"ip %(switch_ip)s",
{'switch_ip': switch_ip}) | 0.002033 |
def make_regression(
n_samples=100,
n_features=100,
n_informative=10,
n_targets=1,
bias=0.0,
effective_rank=None,
tail_strength=0.5,
noise=0.0,
shuffle=True,
coef=False,
random_state=None,
chunks=None,
):
"""
Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See
:func:`sklearn.datasets.make_low_rank_matrix` for more details.
This can be used to generate very large Dask arrays on a cluster of
machines. When using Dask in distributed mode, the client machine
only needs to allocate a single block's worth of data.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset creation. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
chunks : int, tuple
How to chunk the array. Must be one of the following forms:
- A blocksize like 1000.
- A blockshape like (1000, 1000).
- Explicit sizes of all blocks along all dimensions like
((1000, 1000, 500), (400, 400)).
Returns
-------
X : Dask array of shape [n_samples, n_features]
The input samples.
y : Dask array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
chunks = da.core.normalize_chunks(chunks, (n_samples, n_features))
_check_axis_partitioning(chunks, n_features)
rng = sklearn.utils.check_random_state(random_state)
return_coef = coef is True
if chunks[1][0] != n_features:
raise ValueError(
"Can only generate arrays partitioned along the "
"first axis. Specifying a larger chunksize for "
"the second axis."
)
_, _, coef = sklearn.datasets.make_regression(
n_samples=chunks[0][0],
n_features=n_features,
n_informative=n_informative,
n_targets=n_targets,
bias=bias,
effective_rank=effective_rank,
tail_strength=tail_strength,
noise=noise,
shuffle=shuffle,
coef=True, # hardcode here
random_state=rng,
)
seed = da.random.random_state_data(1, random_state=rng)
da_rng = da.random.RandomState(seed[0])
X_big = da_rng.normal(size=(n_samples, n_features), chunks=(chunks[0], n_features))
y_big = da.dot(X_big, coef) + bias
if noise > 0:
y_big = y_big + da_rng.normal(
scale=noise, size=y_big.shape, chunks=y_big.chunks
)
y_big = y_big.squeeze()
if return_coef:
return X_big, y_big, coef
else:
return X_big, y_big | 0.000434 |
def _HasDuplicateRegistryKeyPaths(
self, filename, artifact_definition, source):
"""Checks if Registry key paths are not already defined by other artifacts.
Note that at the moment this function will only find exact duplicate
Registry key paths.
Args:
filename (str): name of the artifacts definition file.
artifact_definition (ArtifactDefinition): artifact definition.
source (SourceType): source definition.
Returns:
bool: True if the Registry key paths defined by the source type
are used in other artifacts.
"""
result = False
intersection = self._artifact_registry_key_paths.intersection(
set(source.keys))
if intersection:
duplicate_key_paths = '\n'.join(intersection)
logging.warning((
'Artifact definition: {0:s} in file: {1:s} has duplicate '
'Registry key paths:\n{2:s}').format(
artifact_definition.name, filename, duplicate_key_paths))
result = True
self._artifact_registry_key_paths.update(source.keys)
return result | 0.003714 |
def fix_code(source_code, line_ranges, options=None, verbose=0):
'''Apply autopep8 over the line_ranges, returns the corrected code.
Note: though this is not checked for line_ranges should not overlap.
Example
-------
>>> code = "def f( x ):\\n if True:\\n return 2*x"
>>> print(fix_code(code, [(1, 1), (3, 3)]))
def f(x):
if True:
return 2 * x
'''
if options is None:
from pep8radius.main import parse_args
options = parse_args()
if getattr(options, "yapf", False):
from yapf.yapflib.yapf_api import FormatCode
result = FormatCode(source_code, style_config=options.style, lines=line_ranges)
# yapf<0.3 returns diff as str, >=0.3 returns a tuple of (diff, changed)
return result[0] if isinstance(result, tuple) else result
line_ranges = reversed(line_ranges)
# Apply line fixes "up" the file (i.e. in reverse) so that
# fixes do not affect changes we're yet to make.
partial = source_code
for start, end in line_ranges:
partial = fix_line_range(partial, start, end, options)
_maybe_print('.', end='', max_=1, verbose=verbose)
_maybe_print('', max_=1, verbose=verbose)
fixed = partial
return fixed | 0.002383 |
def slave_open(tty_name):
"""slave_open(tty_name) -> slave_fd
Open the pty slave and acquire the controlling terminal, returning
opened filedescriptor.
Deprecated, use openpty() instead."""
result = os.open(tty_name, os.O_RDWR)
try:
from fcntl import ioctl, I_PUSH
except ImportError:
return result
try:
ioctl(result, I_PUSH, "ptem")
ioctl(result, I_PUSH, "ldterm")
except OSError:
pass
return result | 0.002079 |
def p_unit_list(self, p):
""" unit_list : unit_list unit
| unit
"""
if isinstance(p[1], list):
if len(p) >= 3:
if isinstance(p[2], list):
p[1].extend(p[2])
else:
p[1].append(p[2])
else:
p[1] = [p[1]]
p[0] = p[1] | 0.005013 |
def locate_primers(sequences, forward_primer, reverse_primer,
reverse_complement, max_hamming_distance):
"""
Find forward and reverse primers in a set of sequences, return two tuples:
(forward_start, forward_end), (reverse_start, reverse_end)
"""
forward_loc = None
reverse_loc = None
seq_length = None
# Reverse complement the reverse primer, if appropriate
if reverse_complement:
reverse_primer = reverse_primer.reverse_complement()
forward_aligner = PrimerAligner(forward_primer)
reverse_aligner = PrimerAligner(reverse_primer)
for i, sequence in enumerate(sequences):
if seq_length is None:
seq_length = len(sequence)
elif len(sequence) != seq_length:
raise ValueError(("Sequence Length Heterogeneity: {0} != {1}. "
"Is this an alignment?").format(
len(sequence), seq_length))
index_map = ungap_index_map(sequence.seq)
if forward_loc is None:
ham_dist, start, end = forward_aligner.align(sequence.seq.ungap())
if ham_dist <= max_hamming_distance:
forward_loc = index_map[start], index_map[end]
logging.info("Forward in sequence %d: indexes %d to %d", i + 1,
*forward_loc)
if reverse_loc is None:
ham_dist, start, end = reverse_aligner.align(sequence.seq.ungap())
if ham_dist <= max_hamming_distance:
reverse_loc = index_map[start], index_map[end]
logging.info("Reverse in sequence %d: indexes %d to %d", i + 1,
*reverse_loc)
if forward_loc and reverse_loc:
# Both found
# Check order
if forward_loc[0] > reverse_loc[0]:
raise PrimerOrderError(forward_loc[0], reverse_loc[0])
return forward_loc, reverse_loc
else:
logging.debug(
"Sequence %d: %d/2 primers found", i + 1,
sum(j is not None for j in (forward_loc, reverse_loc)))
# Did not find either the forward or reverse primer:
if not forward_loc:
raise PrimerNotFound(forward_primer)
else:
raise PrimerNotFound(reverse_primer) | 0.000433 |
def components(models, wrap_script=True, wrap_plot_info=True, theme=FromCurdoc):
''' Return HTML components to embed a Bokeh plot. The data for the plot is
stored directly in the returned HTML.
An example can be found in examples/embed/embed_multiple.py
The returned components assume that BokehJS resources are **already loaded**.
The html template in which they will be embedded needs to include the following
links and scripts tags. The widgets and tables resources are only necessary if
the components make use of widgets and tables.
.. code-block:: html
<link
href="http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.css"
rel="stylesheet" type="text/css">
<link
href="http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.css"
rel="stylesheet" type="text/css">
<link
href="http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.css"
rel="stylesheet" type="text/css">
<script src="http://cdn.pydata.org/bokeh/release/bokeh-x.y.z.min.js"></script>
<script src="http://cdn.pydata.org/bokeh/release/bokeh-widgets-x.y.z.min.js"></script>
<script src="http://cdn.pydata.org/bokeh/release/bokeh-tables-x.y.z.min.js"></script>
Note that in Jupyter Notebooks, it is not possible to use components and show in
the same notebook cell.
Args:
models (Model|list|dict|tuple) :
A single Model, a list/tuple of Models, or a dictionary of keys and Models.
wrap_script (boolean, optional) :
If True, the returned javascript is wrapped in a script tag.
(default: True)
wrap_plot_info (boolean, optional) : If True, returns ``<div>`` strings.
Otherwise, return dicts that can be used to build your own divs.
(default: True)
If False, the returned dictionary contains the following information:
.. code-block:: python
{
'modelid': 'The model ID, used with Document.get_model_by_id',
'elementid': 'The css identifier the BokehJS will look for to target the plot',
'docid': 'Used by Bokeh to find the doc embedded in the returned script',
}
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
UTF-8 encoded *(script, div[s])* or *(raw_script, plot_info[s])*
Examples:
With default wrapping parameter values:
.. code-block:: python
components(plot)
# => (script, plot_div)
components((plot1, plot2))
# => (script, (plot1_div, plot2_div))
components({"Plot 1": plot1, "Plot 2": plot2})
# => (script, {"Plot 1": plot1_div, "Plot 2": plot2_div})
Examples:
With wrapping parameters set to ``False``:
.. code-block:: python
components(plot, wrap_script=False, wrap_plot_info=False)
# => (javascript, plot_dict)
components((plot1, plot2), wrap_script=False, wrap_plot_info=False)
# => (javascript, (plot1_dict, plot2_dict))
components({"Plot 1": plot1, "Plot 2": plot2}, wrap_script=False, wrap_plot_info=False)
# => (javascript, {"Plot 1": plot1_dict, "Plot 2": plot2_dict})
'''
# 1) Convert single items and dicts into list
was_single_object = isinstance(models, Model) or isinstance(models, Document)
models = _check_models_or_docs(models)
# now convert dict to list, saving keys in the same order
model_keys = None
dict_type = None
if isinstance(models, dict):
model_keys = models.keys()
dict_type = models.__class__
values = []
# don't just use .values() to ensure we are in the same order as key list
for k in model_keys:
values.append(models[k])
models = values
# 2) Append models to one document. Either pre-existing or new and render
with OutputDocumentFor(models, apply_theme=theme):
(docs_json, [render_item]) = standalone_docs_json_and_render_items(models)
script = bundle_all_models() or ""
script += script_for_render_items(docs_json, [render_item])
if wrap_script:
script = wrap_in_script_tag(script)
script = encode_utf8(script)
def div_for_root(root):
return ROOT_DIV.render(root=root, macros=MACROS)
if wrap_plot_info:
results = list(div_for_root(root) for root in render_item.roots)
else:
results = render_item.roots
# 3) convert back to the input shape
if was_single_object:
result = results[0]
elif model_keys is not None:
result = dict_type(zip(model_keys, results))
else:
result = tuple(results)
return script, result | 0.003928 |
def parse_sparse(filename, vocab_filename):
"""
Parse a file that's in libSVM format. In libSVM format each line of the text
file represents a document in bag of words format:
num_unique_words_in_doc word_id:count another_id:count
The word_ids have 0-based indexing, i.e. 0 corresponds to the first
word in the vocab filename.
Parameters
----------
filename : str
The name of the file to parse.
vocab_filename : str
A list of words that are used for this data set.
Returns
-------
out : SArray
Each element represents a document in bag-of-words format.
Examples
--------
If we have two documents:
1. "It was the best of times, it was the worst of times"
2. "It was the age of wisdom, it was the age of foolishness"
Then the vocabulary file might contain the unique words, with a word
on each line, in the following order:
it, was, the, best, of, times, worst, age, wisdom, foolishness
In this case, the file in libSVM format would have two lines:
7 0:2 1:2 2:2 3:1 4:2 5:1 6:1
7 0:2 1:2 2:2 7:2 8:1 9:1 10:1
The following command will parse the above two files into an SArray
of type dict.
>>> file = 'https://static.turi.com/datasets/text/ap.dat'
>>> vocab = 'https://static.turi.com/datasets/text/ap.vocab.txt'
>>> docs = turicreate.text_analytics.parse_sparse(file, vocab)
"""
vocab = _turicreate.SFrame.read_csv(vocab_filename, header=None)['X1']
vocab = list(vocab)
docs = _turicreate.SFrame.read_csv(filename, header=None)
# Remove first word
docs = docs['X1'].apply(lambda x: x.split(' ')[1:])
# Helper function that checks whether we get too large a word id
def get_word(word_id):
assert int(word_id) < len(vocab), \
"Text data contains integers that are larger than the \
size of the provided vocabulary."
return vocab[word_id]
def make_dict(pairs):
pairs = [z.split(':') for z in pairs]
ret = {}
for k, v in pairs:
ret[get_word(int(k))] = int(v)
return ret
# Split word_id and count and make into a dictionary
docs = docs.apply(lambda x: make_dict(x))
return docs | 0.000881 |
def equal(mol, query, largest_only=True, ignore_hydrogen=True):
""" if mol is exactly same structure as the query, return True
Args:
mol: Compound
query: Compound
"""
m = molutil.clone(mol)
q = molutil.clone(query)
if largest_only:
m = molutil.largest_graph(m)
q = molutil.largest_graph(q)
if ignore_hydrogen:
m = molutil.make_Hs_implicit(m)
q = molutil.make_Hs_implicit(q)
if molutil.mw(m) == molutil.mw(q):
gm = GraphMatcher(q.graph, m.graph, node_match=atom_match)
return gm.is_isomorphic()
return False | 0.001661 |
def add():
"""
Add a blurb (a Misc/NEWS entry) to the current CPython repo.
"""
editor = find_editor()
handle, tmp_path = tempfile.mkstemp(".rst")
os.close(handle)
atexit.register(lambda : os.unlink(tmp_path))
def init_tmp_with_template():
with open(tmp_path, "wt", encoding="utf-8") as file:
# hack:
# my editor likes to strip trailing whitespace from lines.
# normally this is a good idea. but in the case of the template
# it's unhelpful.
# so, manually ensure there's a space at the end of the bpo line.
text = template
bpo_line = ".. bpo:"
without_space = "\n" + bpo_line + "\n"
with_space = "\n" + bpo_line + " \n"
if without_space not in text:
sys.exit("Can't find BPO line to ensure there's a space on the end!")
text = text.replace(without_space, with_space)
file.write(text)
init_tmp_with_template()
# We need to be clever about EDITOR.
# On the one hand, it might be a legitimate path to an
# executable containing spaces.
# On the other hand, it might be a partial command-line
# with options.
if shutil.which(editor):
args = [editor]
else:
args = list(shlex.split(editor))
if not shutil.which(args[0]):
sys.exit(f("Invalid GIT_EDITOR / EDITOR value: {editor}"))
args.append(tmp_path)
while True:
subprocess.run(args)
failure = None
blurb = Blurbs()
try:
blurb.load(tmp_path)
except BlurbError as e:
failure = str(e)
if not failure:
assert len(blurb) # if parse_blurb succeeds, we should always have a body
if len(blurb) > 1:
failure = "Too many entries! Don't specify '..' on a line by itself."
if failure:
print()
print(f("Error: {failure}"))
print()
try:
prompt("Hit return to retry (or Ctrl-C to abort)")
except KeyboardInterrupt:
print()
return
print()
continue
break
path = blurb.save_next()
git_add_files.append(path)
flush_git_add_files()
print("Ready for commit.") | 0.002553 |
def to_tuples(self, data):
'''
path_data : string, from an svg path tag's 'd' attribute, eg:
'M 46,74 L 35,12 l 53,-13 z'
returns the same data collected in a list of tuples, eg:
[ ('M', 46, 74), ('L', 35, 12), ('l', 53, -13), ('z') ],
The input data may have floats instead of ints, this will be reflected
in the output. The input may have its whitespace stripped out, or its
commas replaced by whitespace.
'''
self.data = data
self.pos = 0
parsed = []
command = []
while self.pos < len(self.data):
indicator = self.data[self.pos]
if indicator == ' ':
self.pos += 1
elif indicator == ',':
if len(command) >= 2:
self.pos += 1
else:
msg = 'unexpected comma at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
elif indicator in '0123456789.-':
if command:
command.append(self.get_number())
else:
msg = 'missing command at %d in %r' % (self.pos, self.data)
raise ParseError(msg)
else:
if command:
parsed.append(tuple(command))
command = [indicator]
self.pos += 1
if command:
parsed.append(tuple(command))
if parsed[0][0] == 'M' and parsed[-1][0] == 'L'\
and parsed[0][1:] == parsed[-1][1:]:
parsed[-1] = ('z',)
return parsed | 0.001826 |
def cmd_init_push_to_cloud(args):
"""Initiate the local catalog and push it the cloud"""
(lcat, ccat) = (args.local_catalog, args.cloud_catalog)
logging.info("[init-push-to-cloud]: %s => %s"%(lcat, ccat))
if not isfile(lcat):
args.error("[init-push-to-cloud] The local catalog does not exist: %s"%lcat)
if isfile(ccat):
args.error("[init-push-to-cloud] The cloud catalog already exist: %s"%ccat)
(lmeta, cmeta) = ("%s.lrcloud"%lcat, "%s.lrcloud"%ccat)
if isfile(lmeta):
args.error("[init-push-to-cloud] The local meta-data already exist: %s"%lmeta)
if isfile(cmeta):
args.error("[init-push-to-cloud] The cloud meta-data already exist: %s"%cmeta)
#Let's "lock" the local catalog
logging.info("Locking local catalog: %s"%(lcat))
if not lock_file(lcat):
raise RuntimeError("The catalog %s is locked!"%lcat)
#Copy catalog from local to cloud, which becomes the new "base" changeset
util.copy(lcat, ccat)
# Write meta-data both to local and cloud
mfile = MetaFile(lmeta)
utcnow = datetime.utcnow().strftime(DATETIME_FORMAT)[:-4]
mfile['catalog']['hash'] = hashsum(lcat)
mfile['catalog']['modification_utc'] = utcnow
mfile['catalog']['filename'] = lcat
mfile['last_push']['filename'] = ccat
mfile['last_push']['hash'] = hashsum(lcat)
mfile['last_push']['modification_utc'] = utcnow
mfile.flush()
mfile = MetaFile(cmeta)
mfile['changeset']['is_base'] = True
mfile['changeset']['hash'] = hashsum(lcat)
mfile['changeset']['modification_utc'] = utcnow
mfile['changeset']['filename'] = basename(ccat)
mfile.flush()
#Let's copy Smart Previews
if not args.no_smart_previews:
copy_smart_previews(lcat, ccat, local2cloud=True)
#Finally,let's unlock the catalog files
logging.info("Unlocking local catalog: %s"%(lcat))
unlock_file(lcat)
logging.info("[init-push-to-cloud]: Success!") | 0.009674 |
def dfs(node, expand=expansion_all, callback=None, silent=True):
""" Perform a depth-first search on the node graph
:param node: GraphNode
:param expand: Returns the list of Nodes to explore from a Node
:param callback: Callback to run in each node
:param silent: Don't throw exception on circular dependency
:return:
"""
nodes = deque()
for n in expand(node):
nodes.append(n)
while nodes:
n = nodes.pop()
n.visits += 1
if callback:
callback(n)
for k in expand(n):
if k.visits < 1:
nodes.append(k)
else:
if not silent:
raise CircularDependency('Circular Dependency') | 0.001361 |
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = ''
for contract in self.slither.contracts_derived:
txt += '\n{}:\n'.format(contract.name)
table = PrettyTable(['Name', 'ID'])
for function in contract.functions:
if function.visibility in ['public', 'external']:
table.add_row([function.full_name, hex(get_function_id(function.full_name))])
for variable in contract.state_variables:
if variable.visibility in ['public']:
variable_getter_args = ""
if type(variable.type) is ArrayType:
length = 0
v = variable
while type(v.type) is ArrayType:
length += 1
v = v.type
variable_getter_args = ','.join(["uint256"]*length)
elif type(variable.type) is MappingType:
variable_getter_args = variable.type.type_from
table.add_row([f"{variable.name}({variable_getter_args})", hex(get_function_id(f"{variable.name}({variable_getter_args})"))])
txt += str(table) + '\n'
self.info(txt) | 0.002946 |
def Crop(px=None, percent=None, keep_size=True, sample_independently=True,
name=None, deterministic=False, random_state=None):
"""
Augmenter that crops/cuts away pixels at the sides of the image.
That allows to cut out subimages from given (full) input images.
The number of pixels to cut off may be defined in absolute values or
percent of the image sizes.
dtype support::
See ``imgaug.augmenters.size.CropAndPad``.
Parameters
----------
px : None or int or imgaug.parameters.StochasticParameter or tuple, optional
The number of pixels to crop away (cut off) on each side of the image.
Either this or the parameter `percent` may be set, not both at the same
time.
* If None, then pixel-based cropping will not be used.
* If int, then that exact number of pixels will always be cropped.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two ints with values ``a`` and ``b``, then each
side will be cropped by a random amount in the range
``a <= x <= b``. ``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single integer (always crop by
exactly that value), a tuple of two ints ``a`` and ``b`` (crop by
an amount ``a <= x <= b``), a list of ints (crop by a random
value that is contained in the list) or a StochasticParameter
(sample the amount to crop from that parameter).
percent : None or int or float or imgaug.parameters.StochasticParameter \
or tuple, optional
The number of pixels to crop away (cut off) on each side of the image
given *in percent* of the image height/width.
E.g. if this is set to 0.1, the augmenter will always crop away
10 percent of the image's height at the top, 10 percent of the width
on the right, 10 percent of the height at the bottom and 10 percent
of the width on the left.
Either this or the parameter `px` may be set, not both at the same time.
* If None, then percent-based cropping will not be used.
* If int, then expected to be 0 (no cropping).
* If float, then that percentage will always be cropped away.
* If StochasticParameter, then that parameter will be used for each
image. Four samples will be drawn per image (top, right, bottom,
left).
* If a tuple of two floats with values ``a`` and ``b``, then each
side will be cropped by a random percentage in the range
``a <= x <= b``. ``x`` is sampled per image side.
* If a tuple of four entries, then the entries represent top, right,
bottom, left. Each entry may be a single float (always crop by
exactly that percent value), a tuple of two floats a and ``b``
(crop by a percentage ``a <= x <= b``), a list of floats (crop by
a random value that is contained in the list) or a
StochasticParameter (sample the percentage to crop from that
parameter).
keep_size : bool, optional
After cropping, the result image has a different height/width than
the input image. If this parameter is set to True, then the cropped
image will be resized to the input image's size, i.e. the image size
is then not changed by the augmenter.
sample_independently : bool, optional
If False AND the values for `px`/`percent` result in exactly one
probability distribution for the amount to crop, only one
single value will be sampled from that probability distribution
and used for all sides. I.e. the crop amount then is the same
for all sides.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Crop(px=(0, 10))
crops each side by a random value from the range 0px to 10px (the value
is sampled per side).
>>> aug = iaa.Crop(px=(0, 10), sample_independently=False)
samples one value ``v`` from the discrete range ``[0..10]`` and crops all
sides by ``v`` pixels.
>>> aug = iaa.Crop(px=(0, 10), keep_size=False)
crops each side by a random value from the range 0px to 10px (the value
is sampled per side). After cropping, the images are NOT resized to their
original size (i.e. the images may end up having different heights/widths).
>>> aug = iaa.Crop(px=((0, 10), (0, 5), (0, 10), (0, 5)))
crops the top and bottom by a random value from the range 0px to 10px
and the left and right by a random value in the range 0px to 5px.
>>> aug = iaa.Crop(percent=(0, 0.1))
crops each side by a random value from the range 0 percent to
10 percent. (Percent with respect to the side's size, e.g. for the
top side it uses the image's height.)
>>> aug = iaa.Crop(percent=([0.05, 0.1], [0.05, 0.1], [0.05, 0.1], [0.05, 0.1]))
crops each side by either 5 percent or 10 percent.
"""
def recursive_negate(v):
if v is None:
return v
elif ia.is_single_number(v):
ia.do_assert(v >= 0)
return -v
elif isinstance(v, iap.StochasticParameter):
return iap.Multiply(v, -1)
elif isinstance(v, tuple):
return tuple([recursive_negate(v_) for v_ in v])
elif isinstance(v, list):
return [recursive_negate(v_) for v_ in v]
else:
raise Exception("Expected None or int or float or StochasticParameter or list or tuple, got %s." % (
type(v),))
px = recursive_negate(px)
percent = recursive_negate(percent)
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
aug = CropAndPad(
px=px, percent=percent,
keep_size=keep_size, sample_independently=sample_independently,
name=name, deterministic=deterministic, random_state=random_state
)
return aug | 0.001071 |
def get_period_strs(self, now=None):
"""
meterer.get_period_strings()
Return the period strings for the specified time (defaulting to the
current time if not specified).
"""
if now is None:
now = self.dt.utcnow()
year_str = "%04d" % now.year
month_str = "%04d-%02d" % (now.year, now.month)
day_str = "%04d-%02d-%02d" % (now.year, now.month, now.day)
hour_str = "%04d-%02d-%02dT%02d" % (
now.year, now.month, now.day, now.hour)
# Note: The week number may be in the previous year during the last
# days of the year.
isocal = now.isocalendar()
week_str = "%04d-W%02d" % (isocal[0], isocal[1])
return {
"year": year_str,
"month": month_str,
"day": day_str,
"hour": hour_str,
"week": week_str,
} | 0.002205 |
def fetch_github_token(self):
"""
Fetch GitHub token. First try to use variable provided
by --token option, otherwise try to fetch it from git config
and last CHANGELOG_GITHUB_TOKEN env variable.
:returns: Nothing
"""
if not self.options.token:
try:
for v in GH_CFG_VARS:
cmd = ['git', 'config', '--get', '{0}'.format(v)]
self.options.token = subprocess.Popen(
cmd, stdout=subprocess.PIPE).communicate()[0].strip()
if self.options.token:
break
except (subprocess.CalledProcessError, WindowsError):
pass
if not self.options.token:
self.options.token = os.environ.get(CHANGELOG_GITHUB_TOKEN)
if not self.options.token:
print(NO_TOKEN_PROVIDED) | 0.002215 |
def _remove_empty_lines(self, lines):
"""
Iterate through the lines and remove any that are
either empty or contain only one whitespace value
Parameters
----------
lines : array-like
The array of lines that we are to filter.
Returns
-------
filtered_lines : array-like
The same array of lines with the "empty" ones removed.
"""
ret = []
for l in lines:
# Remove empty lines and lines with only one whitespace value
if (len(l) > 1 or len(l) == 1 and
(not isinstance(l[0], str) or l[0].strip())):
ret.append(l)
return ret | 0.004225 |
def certain_timezone_at(self, *, lng, lat):
"""
this function looks up in which polygon the point certainly is included
this is much slower than 'timezone_at'!
:param lng: longitude of the point in degree
:param lat: latitude in degree
:return: the timezone name of the polygon the point is included in or None
"""
lng, lat = rectify_coordinates(lng, lat)
shortcut_id_x, shortcut_id_y = coord2shortcut(lng, lat)
possible_polygons = self.polygon_ids_of_shortcut(shortcut_id_x, shortcut_id_y)
# x = longitude y = latitude both converted to 8byte int
x = coord2int(lng)
y = coord2int(lat)
# check if the point is actually included in one of the polygons
for polygon_nr in possible_polygons:
# get boundaries
self.poly_max_values.seek(4 * NR_BYTES_I * polygon_nr)
boundaries = self.fromfile(self.poly_max_values, dtype=DTYPE_FORMAT_SIGNED_I_NUMPY, count=4)
if not (x > boundaries[0] or x < boundaries[1] or y > boundaries[2] or y < boundaries[3]):
outside_all_holes = True
# when the point is within a hole of the polygon this timezone doesn't need to be checked
for hole_coordinates in self._holes_of_line(polygon_nr):
if inside_polygon(x, y, hole_coordinates):
outside_all_holes = False
break
if outside_all_holes:
if inside_polygon(x, y, self.coords_of(line=polygon_nr)):
return timezone_names[self.id_of(polygon_nr)]
# no polygon has been matched
return None | 0.004053 |
def train(self, corpus=None, method='auto', clear_buffer=True, params=None):
"""
Create an indexing model. Will overwrite the model if it already exists.
All indexes become invalid, because documents in them use a now-obsolete
representation.
The model is trained on documents previously entered via `buffer`,
or directly on `corpus`, if specified.
"""
if corpus is not None:
# use the supplied corpus only (erase existing buffer, if any)
self.flush(clear_buffer=True)
self.buffer(corpus)
if not self.fresh_docs:
msg = "train called but no training corpus specified for %s" % self
logger.error(msg)
raise ValueError(msg)
if method == 'auto':
numdocs = len(self.fresh_docs)
if numdocs < 1000:
logging.warning("too few training documents; using simple log-entropy model instead of latent semantic indexing")
method = 'logentropy'
else:
method = 'lsi'
if params is None:
params = {}
self.model = SimModel(self.fresh_docs, method=method, params=params)
self.flush(save_model=True, clear_buffer=clear_buffer) | 0.003918 |
def metadata(self):
"""
Write metadata sheet.
"""
sheet = self.result.add_sheet("metadata")
self.header(sheet, "metadata")
n_row = 1 # row number
for k in self.po.metadata:
row = sheet.row(n_row)
row.write(0, k)
row.write(1, self.po.metadata[k])
n_row += 1
sheet.flush_row_data() | 0.005013 |
def parse_selinux(parts):
"""
Parse part of an ls output line that is selinux.
Args:
parts (list): A four element list of strings representing the initial
parts of an ls line after the permission bits. The parts are owner
group, selinux info, and the path.
Returns:
A dict containing owner, group, se_user, se_role, se_type, se_mls, and
name. If the raw name was a symbolic link, link is always included.
"""
owner, group = parts[:2]
selinux = parts[2].split(":")
lsel = len(selinux)
path, link = parse_path(parts[-1])
result = {
"owner": owner,
"group": group,
"se_user": selinux[0],
"se_role": selinux[1] if lsel > 1 else None,
"se_type": selinux[2] if lsel > 2 else None,
"se_mls": selinux[3] if lsel > 3 else None,
"name": path
}
if link:
result["link"] = link
return result | 0.001057 |
def parse_napoleon_doc(doc, style):
""" Extract the text from the various sections of a numpy-formatted docstring.
Parameters
----------
doc: Union[str, None]
The docstring to parse.
style: str
'google' or 'numpy'
Returns
-------
OrderedDict[str, Union[None,str]]
The extracted numpy-styled docstring sections."""
napoleon_sections = ["Short Summary", "Attributes", "Methods", "Warning", "Note", "Parameters", "Other Parameters",
"Keyword Arguments", "Returns", "Yields", "Raises", "Warns", "See Also", "References", "Todo",
"Example", "Examples"]
aliases = {"Args": "Parameters", "Arguments": "Parameters", "Keyword Args": "Keyword Arguments",
"Return": "Returns", "Warnings": "Warning", "Yield": "Yields"}
doc_sections = OrderedDict([(key, None) for key in napoleon_sections])
if not doc:
return doc_sections
assert style in ("google", "numpy")
doc = cleandoc(doc)
lines = iter(doc.splitlines())
key = "Short Summary"
body = []
while True:
try:
line = next(lines).rstrip()
header = line if style == "numpy" else (line[:-1] if line.endswith(":") else line)
if header and (header in doc_sections or header in aliases):
doc_sections[aliases.get(key, key)] = "\n".join(body).rstrip() if body else None
body = []
key = header
if style == "numpy":
next(lines) # skip section delimiter
else:
body.append(line)
except StopIteration:
doc_sections[aliases.get(key, key)] = "\n".join(body)
break
return doc_sections | 0.00385 |
def _smixins(self, name):
"""Inner wrapper to search for mixins by name.
"""
return (self._mixins[name] if name in self._mixins else False) | 0.01227 |
def _return_wrapper(fits, return_all, start, trace):
"""If the user wants to get all of the models back, this will
return a list of the ARIMA models, otherwise it will just return
the model. If this is called from the end of the function, ``fits``
will already be a list.
We *know* that if a function call makes it here, ``fits`` is NOT None
or it would have thrown an exception in :func:`_post_ppc_arima`.
Parameters
----------
fits : iterable or ARIMA
The ARIMA(s)
return_all : bool
Whether to return all.
"""
# make sure it's an iterable
if not is_iterable(fits):
fits = [fits]
# whether to print the final runtime
if trace:
print('Total fit time: %.3f seconds' % (time.time() - start))
# which to return? if not all, then first index (assume sorted)
if not return_all:
return fits[0]
return fits | 0.001092 |
def has_text_frame(self):
"""
Return |True| if this data label has a text frame (implying it has
custom data label text), and |False| otherwise. Assigning |True|
causes a text frame to be added if not already present. Assigning
|False| causes any existing text frame to be removed along with any
text contained in the text frame.
"""
dLbl = self._dLbl
if dLbl is None:
return False
if dLbl.xpath('c:tx/c:rich'):
return True
return False | 0.00365 |
def neighbors(self, distance=2.0):
"""Get all neighbors for all neurons."""
dgrid = self.distance_grid.reshape(self.num_neurons, self.num_neurons)
for x, y in zip(*np.nonzero(dgrid <= distance)):
if x != y:
yield x, y | 0.007435 |
def readObject(self, innode):
'''reads in a node and returns as a tuple: (type, name, points[])'''
#get name
names=innode.getElementsByTagName('name')[0].childNodes[0].data.strip()
#get type
pointType = 'Unknown'
if len(innode.getElementsByTagName('LineString')) == 0 and len(innode.getElementsByTagName('Point')) == 0:
pointType = 'Polygon'
elif len(innode.getElementsByTagName('Polygon')) == 0 and len(innode.getElementsByTagName('Point')) == 0:
pointType = 'Polygon'
elif len(innode.getElementsByTagName('LineString')) == 0 and len(innode.getElementsByTagName('Polygon')) == 0:
pointType = 'Point'
#get coords
coords = innode.getElementsByTagName('coordinates')[0].childNodes[0].data.strip()
coordsSplit = coords.split()
ret_s = []
for j in coordsSplit:
jcoord = j.split(',')
if len(jcoord) == 3 and jcoord[0] != '' and jcoord[1] != '':
#print("Got lon " + jcoord[0] + " and lat " + jcoord[1])
ret_s.append((float(jcoord[1]), float(jcoord[0])))
#return tuple
return (str(pointType), str(names), ret_s) | 0.01199 |
def create_remote_copy(self, target, effects=None, make_public=None,
pattern=None):
"""Creates file copy in remote storage.
Args:
- target:
Name of a custom storage connected to the project.
- effects:
Adds CDN image effects to ``self.default_effects`` if any.
- make_public:
To forbid public from accessing your files on the storage set
``make_public`` option to be False.
Default value is None. Files have public access by default.
- pattern:
Specify ``pattern`` option to set S3 object key name.
Takes precedence over pattern set in project settings.
If neither is specified defaults to
`${uuid}/${filename}${effects}${ext}`.
For more information on each of the options above please refer to
REST API docs https://uploadcare.com/docs/api_reference/rest/accessing_files/.
Following example copies a file to custom storage named ``samplefs``:
>>> file = File('e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a')
>>> file.create_remote_copy(target='samplefs',
>>> make_public=True,
>>> pattern='${uuid}/${filename}${ext}')
Now custom storage ``samplefs`` contains publicly available file
with original filename billmurray.jpg in
in the directory named ``e8ebfe20-8c11-4a94-9b40-52ecad7d8d1a``.
"""
effects = self._build_effects(effects)
data = {
'source': self.cdn_path(effects),
'target': target
}
if make_public is not None:
data['make_public'] = make_public
if pattern is not None:
data['pattern'] = pattern
return rest_request('POST', 'files/', data=data) | 0.002062 |
def hmget(self, hashkey, keys, *args):
"""Emulate hmget."""
redis_hash = self._get_hash(hashkey, 'HMGET')
attributes = self._list_or_args(keys, args)
return [redis_hash.get(self._encode(attribute)) for attribute in attributes] | 0.011583 |
def package(self):
"""Copy Flatbuffers' artifacts to package folder
"""
cmake = self.configure_cmake()
cmake.install()
self.copy(pattern="LICENSE.txt", dst="licenses")
self.copy(pattern="FindFlatBuffers.cmake", dst=os.path.join("lib", "cmake", "flatbuffers"), src="CMake")
self.copy(pattern="flathash*", dst="bin", src="bin")
self.copy(pattern="flatc*", dst="bin", src="bin")
if self.settings.os == "Windows" and self.options.shared:
if self.settings.compiler == "Visual Studio":
shutil.move(os.path.join(self.package_folder, "lib", "%s.dll" % self.name),
os.path.join(self.package_folder, "bin", "%s.dll" % self.name))
elif self.settings.compiler == "gcc":
shutil.move(os.path.join(self.package_folder, "lib", "lib%s.dll" % self.name),
os.path.join(self.package_folder, "bin", "lib%s.dll" % self.name)) | 0.007092 |
def _add_ssh_key(ret):
'''
Setups the salt-ssh minion to be accessed with salt-ssh default key
'''
priv = None
if __opts__.get('ssh_use_home_key') and os.path.isfile(os.path.expanduser('~/.ssh/id_rsa')):
priv = os.path.expanduser('~/.ssh/id_rsa')
else:
priv = __opts__.get(
'ssh_priv',
os.path.abspath(os.path.join(
__opts__['pki_dir'],
'ssh',
'salt-ssh.rsa'
))
)
if priv and os.path.isfile(priv):
ret['priv'] = priv | 0.003571 |
def parse_cookies(self, cookies, **kwargs):
"""Parses a semi-colon delimited list of cookies.
Example: foo=bar;baz=qux
"""
for name, value in _parse_keyvalue_list(cookies):
self.cookies.set(name, value, **kwargs) | 0.007782 |
def div_safe( numerator, denominator ):
"""
Ufunc-extension that returns 0 instead of nan when dividing numpy arrays
Parameters
----------
numerator: array-like
denominator: scalar or array-like that can be validly divided by the numerator
returns a numpy array
example: div_safe( [-1, 0, 1], 0 ) == [0, 0, 0]
"""
#First handle scalars
if np.isscalar(numerator):
raise ValueError("div_safe should only be used with an array-like numerator")
#Then numpy arrays
try:
with np.errstate(divide='ignore', invalid='ignore'):
result = np.true_divide( numerator, denominator )
result[ ~ np.isfinite( result )] = 0 # -inf inf NaN
return result
except ValueError as e:
raise e | 0.015326 |
def move(self, pose_tree, x=None, y=None, z=None, home_flagged_axes=True):
"""
Dispatch move command to the driver changing base of
x, y and z from source coordinate system to destination.
Value must be set for each axis that is mapped.
home_flagged_axes: (default=True)
This kwarg is passed to the driver. This ensures that any axes
within this Mover's axis_mapping is homed before moving, if it has
not yet done so. See driver docstring for details
"""
def defaults(_x, _y, _z):
_x = _x if x is not None else 0
_y = _y if y is not None else 0
_z = _z if z is not None else 0
return _x, _y, _z
dst_x, dst_y, dst_z = change_base(
pose_tree,
src=self._src,
dst=self._dst,
point=Point(*defaults(x, y, z)))
driver_target = {}
if 'x' in self._axis_mapping:
assert x is not None, "Value must be set for each axis mapped"
driver_target[self._axis_mapping['x']] = dst_x
if 'y' in self._axis_mapping:
assert y is not None, "Value must be set for each axis mapped"
driver_target[self._axis_mapping['y']] = dst_y
if 'z' in self._axis_mapping:
assert z is not None, "Value must be set for each axis mapped"
driver_target[self._axis_mapping['z']] = dst_z
self._driver.move(driver_target, home_flagged_axes=home_flagged_axes)
# Update pose with the new value. Since stepper motors are open loop
# there is no need to to query diver for position
return update(pose_tree, self, Point(*defaults(dst_x, dst_y, dst_z))) | 0.001149 |
def network_interface_create_or_update(name, ip_configurations, subnet, virtual_network,
resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
Create or update a network interface within a specified resource group.
:param name: The name of the network interface to create.
:param ip_configurations: A list of dictionaries representing valid
NetworkInterfaceIPConfiguration objects. The 'name' key is required at
minimum. At least one IP Configuration must be present.
:param subnet: The name of the subnet assigned to the network interface.
:param virtual_network: The name of the virtual network assigned to the subnet.
:param resource_group: The resource group name assigned to the
virtual network.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_interface_create_or_update test-iface0 [{'name': 'testipconfig1'}] \
testsubnet testnet testgroup
'''
if 'location' not in kwargs:
rg_props = __salt__['azurearm_resource.resource_group_get'](
resource_group, **kwargs
)
if 'error' in rg_props:
log.error(
'Unable to determine location from resource group specified.'
)
return False
kwargs['location'] = rg_props['location']
netconn = __utils__['azurearm.get_client']('network', **kwargs)
# Use NSG name to link to the ID of an existing NSG.
if kwargs.get('network_security_group'):
nsg = network_security_group_get(
name=kwargs['network_security_group'],
resource_group=resource_group,
**kwargs
)
if 'error' not in nsg:
kwargs['network_security_group'] = {'id': str(nsg['id'])}
# Use VM name to link to the ID of an existing VM.
if kwargs.get('virtual_machine'):
vm_instance = __salt__['azurearm_compute.virtual_machine_get'](
name=kwargs['virtual_machine'],
resource_group=resource_group,
**kwargs
)
if 'error' not in vm_instance:
kwargs['virtual_machine'] = {'id': str(vm_instance['id'])}
# Loop through IP Configurations and build each dictionary to pass to model creation.
if isinstance(ip_configurations, list):
subnet = subnet_get(
name=subnet,
virtual_network=virtual_network,
resource_group=resource_group,
**kwargs
)
if 'error' not in subnet:
subnet = {'id': str(subnet['id'])}
for ipconfig in ip_configurations:
if 'name' in ipconfig:
ipconfig['subnet'] = subnet
if isinstance(ipconfig.get('application_gateway_backend_address_pools'), list):
# TODO: Add ID lookup for referenced object names
pass
if isinstance(ipconfig.get('load_balancer_backend_address_pools'), list):
# TODO: Add ID lookup for referenced object names
pass
if isinstance(ipconfig.get('load_balancer_inbound_nat_rules'), list):
# TODO: Add ID lookup for referenced object names
pass
if ipconfig.get('public_ip_address'):
pub_ip = public_ip_address_get(
name=ipconfig['public_ip_address'],
resource_group=resource_group,
**kwargs
)
if 'error' not in pub_ip:
ipconfig['public_ip_address'] = {'id': str(pub_ip['id'])}
try:
nicmodel = __utils__['azurearm.create_object_model'](
'network',
'NetworkInterface',
ip_configurations=ip_configurations,
**kwargs
)
except TypeError as exc:
result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
return result
try:
interface = netconn.network_interfaces.create_or_update(
resource_group_name=resource_group,
network_interface_name=name,
parameters=nicmodel
)
interface.wait()
nic_result = interface.result()
result = nic_result.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
except SerializationError as exc:
result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
return result | 0.002328 |
def get_conn(self, urlparsed=None):
"""Returns an HTTPConnection based on the urlparse result given or the
default Swift cluster (internal url) urlparse result.
:param urlparsed: The result from urlparse.urlparse or None to use the
default Swift cluster's value
"""
if not urlparsed:
urlparsed = self.dsc_parsed2
if urlparsed.scheme == 'http':
return HTTPConnection(urlparsed.netloc)
else:
return HTTPSConnection(urlparsed.netloc) | 0.003643 |
def get_sigma(x, min_limit=-np.inf, max_limit=np.inf):
"""Compute the standard deviations around the points for a 1D GMM.
We take the distance from the nearest left and right neighbors
for each point, then use the max as the estimate of standard
deviation for the gaussian mixture around that point.
Arguments
---------
x : 1D array
Set of points to create the GMM
min_limit : Optional[float], default : -inf
Minimum limit for the distribution
max_limit : Optional[float], default : inf
maximum limit for the distribution
Returns
-------
1D array
Array of standard deviations
"""
z = np.append(x, [min_limit, max_limit])
sigma = np.ones(x.shape)
for i in range(x.size):
# Calculate the nearest left neighbor of x[i]
# Find the minimum of (x[i] - k) for k < x[i]
xleft = z[np.argmin([(x[i] - k) if k < x[i] else np.inf for k in z])]
# Calculate the nearest right neighbor of x[i]
# Find the minimum of (k - x[i]) for k > x[i]
xright = z[np.argmin([(k - x[i]) if k > x[i] else np.inf for k in z])]
sigma[i] = max(x[i] - xleft, xright - x[i])
if sigma[i] == np.inf:
sigma[i] = min(x[i] - xleft, xright - x[i])
if (sigma[i] == -np.inf): # should never happen
sigma[i] = 1.0
return sigma | 0.000721 |
def setup_completer(cls):
"Get the dictionary of valid completions"
try:
for element in Store.options().keys():
options = Store.options()['.'.join(element)]
plotkws = options['plot'].allowed_keywords
stylekws = options['style'].allowed_keywords
dotted = '.'.join(element)
cls._completions[dotted] = (plotkws, stylekws if stylekws else [])
except KeyError:
pass
return cls._completions | 0.005769 |
def run(self):
"""Reads data from disk and generates CSV files."""
# Try to create the directory
if not os.path.exists(self.output):
try:
os.mkdir(self.output)
except:
print 'failed to create output directory %s' % self.output
# Be sure it is a directory
if not os.path.isdir(self.output):
print 'invalid output directory %s' % self.output
sys.exit(1)
# Create the CSV handlers
visitors = [
_CompaniesCSV(self.output),
_ActivitiesCSV(self.output),
_ActivitiesSeenCSV(self.output),
_QSACSV(self.output),
]
# Run by each company populating the CSV files
for path in glob.glob(os.path.join(self.input, '*.json')):
with open(path, 'r') as f:
try:
data = json.load(f, encoding='utf-8')
except ValueError:
continue
for visitor in visitors:
visitor.visit(data) | 0.002765 |
def copy_from_model(cls, model_name, reference, **kwargs):
"""
Set-up a user-defined grid using specifications of a reference
grid model.
Parameters
----------
model_name : string
name of the user-defined grid model.
reference : string or :class:`CTMGrid` instance
Name of the reference model (see :func:`get_supported_models`),
or a :class:`CTMGrid` object from which grid set-up is copied.
**kwargs
Any set-up parameter which will override the settings of the
reference model (see :class:`CTMGrid` parameters).
Returns
-------
A :class:`CTMGrid` object.
"""
if isinstance(reference, cls):
settings = reference.__dict__.copy()
settings.pop('model')
else:
settings = _get_model_info(reference)
settings.pop('model_name')
settings.update(kwargs)
settings['reference'] = reference
return cls(model_name, **settings) | 0.001883 |
def calls(self):
"""
Provides access to call overview for the given webhook.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/webhook-calls
:return: :class:`WebhookWebhooksCallProxy <contentful_management.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy>` object.
:rtype: contentful.webhook_webhooks_call_proxy.WebhookWebhooksCallProxy
Usage:
>>> webhook_webhooks_call_proxy = webhook.calls()
<WebhookWebhooksCallProxy space_id="cfexampleapi" webhook_id="my_webhook">
"""
return WebhookWebhooksCallProxy(self._client, self.sys['space'].id, self.sys['id']) | 0.008499 |
def pythonize(line, fn='', subdir='gen'):
"""Convert a line of BMPM code from PHP to Python.
Parameters
----------
line : str
A line of code
fn : str
A filename
subdir : str
The file's subdirectory
Returns
-------
The code in Python
"""
global array_seen, nl, sd
if '$all' in line:
return ''
if 'make the sum of all languages be visible in the function' in line:
return ''
line = line.strip()
if 'array' in line and not line.startswith('//'):
array_seen = True
line = re.sub('//+', '#', line)
# line = re.sub('"\.\((\$.+?)\)\."', r'\1', line)
if line and re.search(r'array\("[^"]+?"\)', line):
# print("### " + line)
line = ''
line = line.replace('array', '')
line = re.sub(r'^\s*', '', line)
line = re.sub(';$', '', line)
line = re.sub('^include_.+', '', line)
line = re.sub(
r'\$(approx|rules|exact)\[LanguageIndex\("([^"]+)", '
+ r'\$languages\)\] = \$([a-zA-Z]+)',
lambda m: (
"BMDATA['"
+ subdir
+ "']['"
+ m.group(1)
+ "'][L_"
+ m.group(2).upper()
+ '] = _'
+ subdir.upper()
+ '_'
+ c2u(m.group(3)).upper()
),
line,
)
line = re.sub(
r'\$(approx|rules|exact|hebrew)([A-Za-z]+) = _merge'
+ r'\(\$([a-zA-Z]+), \$([a-zA-Z]+)\)',
lambda m: (
"BMDATA['"
+ subdir
+ "']['"
+ m.group(1)
+ "'][L_"
+ c2u(m.group(2)).upper()
+ '] = _'
+ subdir.upper()
+ '_'
+ c2u(m.group(3)).upper()
+ ' + _'
+ subdir.upper()
+ '_'
+ c2u(m.group(4)).upper()
),
line,
)
line = re.sub(
r'\$(approx|rules|exact)\[LanguageIndex\("([^"]+)", '
+ r'\$languages\)\] = _merge\(\$([a-zA-Z]+), \$([a-zA-Z]+)\)',
lambda m: (
"BMDATA['"
+ subdir
+ "']['"
+ m.group(1)
+ "'][L_"
+ c2u(m.group(2)).upper()
+ '] = _'
+ subdir.upper()
+ '_'
+ c2u(m.group(3)).upper()
+ ' + _'
+ subdir.upper()
+ '_'
+ c2u(m.group(4)).upper()
),
line,
)
line = re.sub(
r'^\$([a-zA-Z]+)',
lambda m: '_' + sd.upper() + '_' + c2u(m.group(1)).upper(),
line,
)
for _ in range(len(lang_tuple)):
line = re.sub(r'($[a-zA-Z]+) *\+ *($[a-zA-Z]+)', r'\1\+\2', line)
line = re.sub(
r'\$([a-zA-Z]+)',
lambda m: (
'L_' + m.group(1).upper()
if m.group(1) in lang_dict
else '$' + m.group(1)
),
line,
)
line = re.sub(r'\[\"\.\((L_[A-Z_+]+)\)\.\"\]', r'[\1]', line)
line = re.sub(
'L_([A-Z]+)', lambda m: str(lang_dict[m.group(1).lower()]), line
)
for _ in range(4):
line = re.sub(
r'([0-9]+) *\+ *([0-9]+)',
lambda m: str(int(m.group(1)) + int(m.group(2))),
line,
)
if fn == 'lang':
if len(line.split(',')) >= 3:
parts = line.split(',')
parts[0] = re.sub('/(.+?)/', r'\1', parts[0])
# parts[1] = re.sub('\$', 'L_', parts[1])
# parts[1] = re.sub(' *\+ *', '|', parts[1])
parts[2] = parts[2].title()
line = ','.join(parts)
if 'languagenames' in fn:
line = line.replace('"', "'")
line = line.replace("','", "', '")
if line and line[0] == "'":
line = ' ' * 14 + line
# fix upstream
# line = line.replace('ë', 'ü')
comment = ''
if '#' in line:
hashsign = line.find('#')
comment = line[hashsign:]
code = line[:hashsign]
else:
code = line
code = code.rstrip()
comment = comment.strip()
if not re.match(r'^\s*$', code):
comment = ' ' + comment
if '(' in code and ')' in code:
prefix = code[: code.find('(') + 1]
suffix = code[code.rfind(')') :]
tuplecontent = code[len(prefix) : len(code) - len(suffix)]
elts = tuplecontent.split(',')
for i in range(len(elts)):
elts[i] = elts[i].strip()
if elts[i][0] == '"' and elts[i][-1] == '"':
elts[i] = "'" + elts[i][1:-1].replace("'", "\\'") + "'"
tuplecontent = ', '.join(elts)
code = prefix + tuplecontent + suffix
line = code + comment
line = re.sub('# *', '# ', line)
if line:
nl = False
if array_seen and not (line[0] == '_' or line.startswith('BMDATA')):
line = ' ' * 4 + line
return line + '\n'
elif not nl:
nl = True
return '\n'
else:
return '' | 0.000604 |
def fn_min(self, a, axis=None):
"""
Return the minimum of an array, ignoring any NaNs.
:param a: The array.
:return: The minimum value of the array.
"""
return numpy.nanmin(self._to_ndarray(a), axis=axis) | 0.007874 |
def pretty_exe_doc(program, parser, stack=1, under='-'):
"""
Takes the name of a script and a parser that will give the help message for it.
The module that called this function will then add a header to the docstring
of the script, followed immediately by the help message generated
by the OptionParser
:param str program: Name of the program that we want to make the header
:param optparser.Option parser: Either a parser or a callable with no arguments
that will give the desired parser
:param int stack: How far up the stack to get the docstring to change
:param str under: The character you want for the program underline
"""
if os.path.basename(sys.argv[0]) == 'sphinx-build':
# Get the calling module
mod = inspect.getmodule(inspect.stack()[stack][0])
# Get parser
_parser = parser() if '__call__' in dir(parser) else parser
# Make the parser use the correct program
_parser.set_usage(mod.__usage__.replace('%prog', program))
# Modify docs by adding a header and usate
mod.__doc__ = '\n'.join(['', program, under * len(program), '::', ''] +
[' %s' % l for l in _parser.format_help().split('\n')]) + \
mod.__doc__ | 0.005243 |
def _find(string, sub_string, start_index):
"""Return index of sub_string in string.
Raise TokenError if sub_string is not found.
"""
result = string.find(sub_string, start_index)
if result == -1:
raise TokenError("expected '{0}'".format(sub_string))
return result | 0.003367 |
def lfc(pressure, temperature, dewpt, parcel_temperature_profile=None, dewpt_start=None):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure
temperature : `pint.Quantity`
The temperature at the levels given by `pressure`
dewpt : `pint.Quantity`
The dew point at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
dewpt_start: `pint.Quantity`, optional
The dewpoint of the parcel for which to calculate the LFC. Defaults to the surface
dewpoint.
Returns
-------
`pint.Quantity`
The LFC pressure and temperature
See Also
--------
parcel_profile
"""
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpt)
pressure, temperature, _, parcel_temperature_profile = new_stuff
temperature = temperature.to('degC')
parcel_temperature_profile = parcel_temperature_profile.to('degC')
if dewpt_start is None:
dewpt_start = dewpt[0]
# The parcel profile and data may have the same first data point.
# If that is the case, ignore that point to get the real first
# intersection for the LFC calculation.
if np.isclose(parcel_temperature_profile[0].m, temperature[0].m):
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='increasing')
else:
x, y = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing')
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpt_start)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
return np.nan * pressure.units, np.nan * temperature.units
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
x, y = this_lcl
return x, y
# Otherwise, make select first candidate LFC above the LCL
else:
x = x[idx]
y = y[idx]
return x[0], y[0] | 0.00196 |
def _get_app_path(url):
''' Extract the app path from a Bokeh server URL
Args:
url (str) :
Returns:
str
'''
app_path = urlparse(url).path.rstrip("/")
if not app_path.startswith("/"):
app_path = "/" + app_path
return app_path | 0.003584 |
def to_unicode(value):
"""
Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value))
return value.decode("utf-8") | 0.002137 |
def mixed_list_file(cls, filename, values, bits):
"""
Write a list of mixed values to a file.
If a file of the same name exists, it's contents are replaced.
See L{HexInput.mixed_list_file} for a description of the file format.
@type filename: str
@param filename: Name of the file to write.
@type values: list( int )
@param values: List of mixed values to write to the file.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexOutput.integer_size}
"""
fd = open(filename, 'w')
for original in values:
try:
parsed = cls.integer(original, bits)
except TypeError:
parsed = repr(original)
print >> fd, parsed
fd.close() | 0.002217 |
def _preprocess(self, data):
"""
Internal function to perform fit_transform() on all but last step.
"""
transformed_data = _copy(data)
for name, step in self._transformers[:-1]:
transformed_data = step.fit_transform(transformed_data)
if type(transformed_data) != _tc.SFrame:
raise RuntimeError("The transform function in step '%s' did not"
" return an SFrame (got %s instead)." % (name,
type(transformed_data).__name__))
return transformed_data | 0.008389 |
def entity_copy(args):
""" Copy entities from one workspace to another. """
if not args.to_workspace:
args.to_workspace = args.workspace
if not args.to_project:
args.to_project = args.project
if (args.project == args.to_project
and args.workspace == args.to_workspace):
eprint("destination project and namespace must differ from"
" source workspace")
return 1
if not args.entities:
# get a list of entities from source workspace matching entity_type
ents = _entity_paginator(args.project, args.workspace, args.entity_type,
page_size=500, filter_terms=None,
sort_direction='asc')
args.entities = [e['name'] for e in ents]
prompt = "Copy {0} {1}(s) from {2}/{3} to {4}/{5}?\n[Y\\n]: "
prompt = prompt.format(len(args.entities), args.entity_type, args.project,
args.workspace, args.to_project, args.to_workspace)
if not args.yes and not _confirm_prompt("", prompt):
return
r = fapi.copy_entities(args.project, args.workspace, args.to_project,
args.to_workspace, args.entity_type, args.entities,
link_existing_entities=args.link)
fapi._check_response_code(r, 201)
return 0 | 0.002222 |
def _mod_info(modname, toskip=[], onlylocals=True):
"""
Determines if a module is a module or a package and whether or not
it has classes or functions.
"""
hascls = hasfunc = False
for localnm, fqnm, obj in zip(*find_mod_objs(modname, onlylocals=onlylocals)):
if localnm not in toskip:
hascls = hascls or inspect.isclass(obj)
hasfunc = hasfunc or inspect.isroutine(obj)
if hascls and hasfunc:
break
# find_mod_objs has already imported modname
# TODO: There is probably a cleaner way to do this, though this is pretty
# reliable for all Python versions for most cases that we care about.
pkg = sys.modules[modname]
ispkg = (hasattr(pkg, '__file__') and isinstance(pkg.__file__, str) and
os.path.split(pkg.__file__)[1].startswith('__init__.py'))
return ispkg, hascls, hasfunc | 0.002225 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.