text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def stop(self) -> None:
"""
Stops the running simulation once the current event is done executing.
"""
if self.is_running:
if _logger is not None:
self._log(INFO, "stop", __now=self.now())
self._is_running = False | 0.007018 |
def namingConventionDecorator(self, namingConvention):
"""
:type namingConvention:INamingConvention
"""
def decoratorFunction(cls):
SyntheticClassController(cls).setNamingConvention(namingConvention)
return cls
return decoratorFunction | 0.01 |
def _probe_positions(probe, group):
"""Return the positions of a probe channel group."""
positions = probe['channel_groups'][group]['geometry']
channels = _probe_channels(probe, group)
return np.array([positions[channel] for channel in channels]) | 0.003817 |
def process_action(self, request, queryset):
"""
Publishes the selected objects by passing the value of \
'when' to the object's publish method. The object's \
`purge_archives` method is also called to limit the number \
of old items that we keep around. The action is logged as \
either 'published' or 'scheduled' depending on the value of \
'when', and the user is notified with a message.
Returns a 'render redirect' to the result of the \
`get_done_url` method.
"""
form = self.form(request.POST)
if form.is_valid():
when = form.cleaned_data.get('when')
count = 0
for obj in queryset:
count += 1
obj.publish(user=request.user, when=when)
obj.purge_archives()
object_url = self.get_object_url(obj)
if obj.state == obj.PUBLISHED:
self.log_action(
obj, CMSLog.PUBLISH, url=object_url)
else:
self.log_action(
obj, CMSLog.SCHEDULE, url=object_url)
message = "%s objects published." % count
self.write_message(message=message)
return self.render(request, redirect_url= self.get_done_url(),
message=message,
collect_render_data=False)
return self.render(request, queryset=queryset, publish_form=form, action='Publish') | 0.003896 |
def get(self):
"""Render the List-of-Analyses overview page."""
return self.render(
'index.html',
databench_version=DATABENCH_VERSION,
meta_infos=self.meta_infos(),
**self.info
) | 0.008 |
def _get_whitelist_page_generator(self, start_page=0, page_size=None):
"""
Creates a generator from the |get_whitelist_page| method that returns each successive page.
:param int start_page: The page to start on.
:param int page_size: The size of each page.
:return: The generator.
"""
return Page.get_page_generator(self.get_whitelist_page, start_page, page_size) | 0.009501 |
def vtt_formatter(subtitles, padding_before=0, padding_after=0):
"""
Serialize a list of subtitles according to the VTT format, with optional time padding.
"""
text = srt_formatter(subtitles, padding_before, padding_after)
text = 'WEBVTT\n\n' + text.replace(',', '.')
return text | 0.006601 |
def task_id_arg(*args, **kwargs):
"""
This is the `TASK_ID` argument consumed by many Transfer Task operations.
It accept a toggle on whether or not it is required
Usage:
>>> @task_id_option
>>> def command_func(task_id):
>>> ...
or
>>> @task_id_option(required=False)
>>> def command_func(task_id):
>>> ...
By default, the task ID is made required; pass `required=False` to the
decorator arguments to make it optional.
"""
def inner_decorator(f, required=True):
f = click.argument("TASK_ID", required=required)(f)
return f
return detect_and_decorate(inner_decorator, args, kwargs) | 0.001479 |
def make_id(self):
"""Create a new URL id that is unique to the parent container"""
if self.url_id is None: # Set id only if empty
self.url_id = select([func.coalesce(func.max(self.__class__.url_id + 1), 1)],
self.__class__.parent == self.parent) | 0.013746 |
def customize_lexer_priority(file_name, accuracy, lexer):
"""Customize lexer priority"""
priority = lexer.priority
lexer_name = lexer.name.lower().replace('sharp', '#')
if lexer_name in LANGUAGES:
priority = LANGUAGES[lexer_name]
elif lexer_name == 'matlab':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
accuracy += 0.01
if '.h' not in available_extensions:
accuracy += 0.01
elif lexer_name == 'objective-c':
available_extensions = extensions_in_same_folder(file_name)
if '.mat' in available_extensions:
accuracy -= 0.01
else:
accuracy += 0.01
if '.h' in available_extensions:
accuracy += 0.01
return (accuracy, priority, lexer) | 0.0012 |
def run_vasp(self, run_dir: PathLike = ".",
vasp_cmd: list = None,
output_file: PathLike = "vasp.out",
err_file: PathLike = "vasp.err"):
"""
Write input files and run VASP.
:param run_dir: Where to write input files and do the run.
:param vasp_cmd: Args to be supplied to run VASP. Otherwise, the
PMG_VASP_EXE in .pmgrc.yaml is used.
:param output_file: File to write output.
:param err_file: File to write err.
"""
self.write_input(output_dir=run_dir)
vasp_cmd = vasp_cmd or SETTINGS.get("PMG_VASP_EXE")
vasp_cmd = [os.path.expanduser(os.path.expandvars(t)) for t in vasp_cmd]
if not vasp_cmd:
raise RuntimeError("You need to supply vasp_cmd or set the PMG_VASP_EXE in .pmgrc.yaml to run VASP.")
with cd(run_dir):
with open(output_file, 'w') as f_std, \
open(err_file, "w", buffering=1) as f_err:
subprocess.check_call(vasp_cmd, stdout=f_std, stderr=f_err) | 0.0065 |
async def popHiveKey(self, path):
''' Remove and return the value of a key in the cell default hive '''
perm = ('hive:pop',) + path
self.user.allowed(perm)
return await self.cell.hive.pop(path) | 0.008889 |
def _sanitize_data(self, data):
"""
Some CIF files do not conform to spec. This function corrects
known issues, particular in regards to Springer materials/
Pauling files.
This function is here so that CifParser can assume its
input conforms to spec, simplifying its implementation.
:param data: CifBlock
:return: data CifBlock
"""
"""
This part of the code deals with handling formats of data as found in
CIF files extracted from the Springer Materials/Pauling File
databases, and that are different from standard ICSD formats.
"""
# check for implicit hydrogens, warn if any present
if "_atom_site_attached_hydrogens" in data.data.keys():
attached_hydrogens = [str2float(x) for x in data.data['_atom_site_attached_hydrogens']
if str2float(x) != 0]
if len(attached_hydrogens) > 0:
self.errors.append("Structure has implicit hydrogens defined, "
"parsed structure unlikely to be suitable for use "
"in calculations unless hydrogens added.")
# Check to see if "_atom_site_type_symbol" exists, as some test CIFs do
# not contain this key.
if "_atom_site_type_symbol" in data.data.keys():
# Keep a track of which data row needs to be removed.
# Example of a row: Nb,Zr '0.8Nb + 0.2Zr' .2a .m-3m 0 0 0 1 14
# 'rhombic dodecahedron, Nb<sub>14</sub>'
# Without this code, the above row in a structure would be parsed
# as an ordered site with only Nb (since
# CifParser would try to parse the first two characters of the
# label "Nb,Zr") and occupancy=1.
# However, this site is meant to be a disordered site with 0.8 of
# Nb and 0.2 of Zr.
idxs_to_remove = []
new_atom_site_label = []
new_atom_site_type_symbol = []
new_atom_site_occupancy = []
new_fract_x = []
new_fract_y = []
new_fract_z = []
for idx, el_row in enumerate(data["_atom_site_label"]):
# CIF files from the Springer Materials/Pauling File have
# switched the label and symbol. Thus, in the
# above shown example row, '0.8Nb + 0.2Zr' is the symbol.
# Below, we split the strings on ' + ' to
# check if the length (or number of elements) in the label and
# symbol are equal.
if len(data["_atom_site_type_symbol"][idx].split(' + ')) > \
len(data["_atom_site_label"][idx].split(' + ')):
# Dictionary to hold extracted elements and occupancies
els_occu = {}
# parse symbol to get element names and occupancy and store
# in "els_occu"
symbol_str = data["_atom_site_type_symbol"][idx]
symbol_str_lst = symbol_str.split(' + ')
for elocc_idx in range(len(symbol_str_lst)):
# Remove any bracketed items in the string
symbol_str_lst[elocc_idx] = re.sub(
r'\([0-9]*\)', '',
symbol_str_lst[elocc_idx].strip())
# Extract element name and its occupancy from the
# string, and store it as a
# key-value pair in "els_occ".
els_occu[str(re.findall(r'\D+', symbol_str_lst[
elocc_idx].strip())[1]).replace('<sup>', '')] = \
float('0' + re.findall(r'\.?\d+', symbol_str_lst[
elocc_idx].strip())[1])
x = str2float(data["_atom_site_fract_x"][idx])
y = str2float(data["_atom_site_fract_y"][idx])
z = str2float(data["_atom_site_fract_z"][idx])
for et, occu in els_occu.items():
# new atom site labels have 'fix' appended
new_atom_site_label.append(
et + '_fix' + str(len(new_atom_site_label)))
new_atom_site_type_symbol.append(et)
new_atom_site_occupancy.append(str(occu))
new_fract_x.append(str(x))
new_fract_y.append(str(y))
new_fract_z.append(str(z))
idxs_to_remove.append(idx)
# Remove the original row by iterating over all keys in the CIF
# data looking for lists, which indicates
# multiple data items, one for each row, and remove items from the
# list that corresponds to the removed row,
# so that it's not processed by the rest of this function (which
# would result in an error).
for original_key in data.data:
if isinstance(data.data[original_key], list):
for id in sorted(idxs_to_remove, reverse=True):
del data.data[original_key][id]
if len(idxs_to_remove) > 0:
self.errors.append("Pauling file corrections applied.")
data.data["_atom_site_label"] += new_atom_site_label
data.data["_atom_site_type_symbol"] += new_atom_site_type_symbol
data.data["_atom_site_occupancy"] += new_atom_site_occupancy
data.data["_atom_site_fract_x"] += new_fract_x
data.data["_atom_site_fract_y"] += new_fract_y
data.data["_atom_site_fract_z"] += new_fract_z
"""
This fixes inconsistencies in naming of several magCIF tags
as a result of magCIF being in widespread use prior to
specification being finalized (on advice of Branton Campbell).
"""
if self.feature_flags["magcif"]:
# CIF-1 style has all underscores, interim standard
# had period before magn instead of before the final
# component (e.g. xyz)
# we want to standardize on a specific key, to simplify
# parsing code
correct_keys = ["_space_group_symop_magn_operation.xyz",
"_space_group_symop_magn_centering.xyz",
"_space_group_magn.name_BNS",
"_space_group_magn.number_BNS",
"_atom_site_moment_crystalaxis_x",
"_atom_site_moment_crystalaxis_y",
"_atom_site_moment_crystalaxis_z",
"_atom_site_moment_label"]
# cannot mutate OrderedDict during enumeration,
# so store changes we want to make
changes_to_make = {}
for original_key in data.data:
for correct_key in correct_keys:
# convert to all underscore
trial_key = "_".join(correct_key.split("."))
test_key = "_".join(original_key.split("."))
if trial_key == test_key:
changes_to_make[correct_key] = original_key
# make changes
for correct_key, original_key in changes_to_make.items():
data.data[correct_key] = data.data[original_key]
# renamed_keys maps interim_keys to final_keys
renamed_keys = {
"_magnetic_space_group.transform_to_standard_Pp_abc":
"_space_group_magn.transform_BNS_Pp_abc"}
changes_to_make = {}
for interim_key, final_key in renamed_keys.items():
if data.data.get(interim_key):
changes_to_make[final_key] = interim_key
if len(changes_to_make) > 0:
self.errors.append("Keys changed to match new magCIF specification.")
for final_key, interim_key in changes_to_make.items():
data.data[final_key] = data.data[interim_key]
# check for finite precision frac co-ordinates (e.g. 0.6667 instead of 0.6666666...7)
# this can sometimes cause serious issues when applying symmetry operations
important_fracs = (1/3., 2/3.)
fracs_to_change = {}
for label in ('_atom_site_fract_x', '_atom_site_fract_y', '_atom_site_fract_z'):
if label in data.data.keys():
for idx, frac in enumerate(data.data[label]):
try:
frac = str2float(frac)
except:
# co-ordinate might not be defined e.g. '?'
continue
for comparison_frac in important_fracs:
if abs(1 - frac/comparison_frac) < 1e-4:
fracs_to_change[(label, idx)] = str(comparison_frac)
if fracs_to_change:
self.errors.append("Some fractional co-ordinates rounded to ideal values to "
"avoid finite precision errors.")
for (label, idx), val in fracs_to_change.items():
data.data[label][idx] = val
return data | 0.001277 |
def sample(self, mu):
"""
Return random samples from this Normal distribution.
Parameters
----------
mu : array-like of shape n_samples or shape (n_simulations, n_samples)
expected values
Returns
-------
random_samples : np.array of same shape as mu
"""
number_of_trials = self.levels
success_probability = mu / number_of_trials
return np.random.binomial(n=number_of_trials, p=success_probability,
size=None) | 0.003623 |
def import_submodules(package_name, *submodules):
"""Import all submodules by package name."""
package = sys.modules[package_name]
return {
name: importlib.import_module(package_name + '.' + name)
for _, name, _ in pkgutil.walk_packages(package.__path__)
if not submodules or name in submodules
} | 0.002976 |
def _steps_to_slices():
"""parse timesteps and snapshots arguments and return slices"""
if not (conf.core.timesteps or conf.core.snapshots):
# default to the last snap
conf.core.timesteps = None
conf.core.snapshots = slice(-1, None, None)
return
elif conf.core.snapshots:
# snapshots take precedence over timesteps
# if both are defined
conf.core.timesteps = None
steps = conf.core.snapshots
else:
conf.core.snapshots = None
steps = conf.core.timesteps
steps = steps.split(':')
steps[0] = int(steps[0]) if steps[0] else None
if len(steps) == 1:
steps.append(steps[0] + 1)
steps[1] = int(steps[1]) if steps[1] else None
if len(steps) != 3:
steps = steps[0:2] + [1]
steps[2] = int(steps[2]) if steps[2] else None
steps = slice(*steps)
if conf.core.snapshots is not None:
conf.core.snapshots = steps
else:
conf.core.timesteps = steps | 0.001006 |
def lesspager(lines):
"""
Use for streaming writes to a less process
Taken from pydoc.pipepager:
/usr/lib/python2.7/pydoc.py
and
/usr/lib/python3.5/pydoc.py
"""
cmd = "less -S"
if sys.version_info[0] >= 3:
"""Page through text by feeding it to another program."""
import subprocess
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
try:
with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
try:
for l in lines:
pipe.write(l)
except KeyboardInterrupt:
# We've hereby abandoned whatever text hasn't been written,
# but the pager is still in control of the terminal.
pass
except OSError:
pass # Ignore broken pipes caused by quitting the pager program.
while True:
try:
proc.wait()
break
except KeyboardInterrupt:
# Ignore ctl-c like the pager itself does. Otherwise the pager is
# left running and the terminal is in raw mode and unusable.
pass
else:
proc = os.popen(cmd, 'w')
try:
for l in lines:
proc.write(l)
except IOError:
proc.close()
sys.exit() | 0.004255 |
def search_cloud_integration_deleted_for_facets(self, **kwargs): # noqa: E501
"""Lists the values of one or more facets over the customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_for_facets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param FacetsSearchRequestContainer body:
:return: ResponseContainerFacetsResponseContainer
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_deleted_for_facets_with_http_info(**kwargs) # noqa: E501
return data | 0.001885 |
def _simulate_client(plaintext_password, init_pbkdf2_salt, cnonce, server_challenge):
"""
A implementation of the JavaScript client part.
Needful for finding bugs.
"""
# log.debug("_simulate_client(plaintext_password='%s', init_pbkdf2_salt='%s', cnonce='%s', server_challenge='%s')",
# plaintext_password, init_pbkdf2_salt, cnonce, server_challenge
# )
pbkdf2_temp_hash = hexlify_pbkdf2(
plaintext_password,
salt=init_pbkdf2_salt,
iterations=app_settings.ITERATIONS1,
length=app_settings.PBKDF2_BYTE_LENGTH
)
first_pbkdf2_part = pbkdf2_temp_hash[:PBKDF2_HALF_HEX_LENGTH]
second_pbkdf2_part = pbkdf2_temp_hash[PBKDF2_HALF_HEX_LENGTH:]
second_pbkdf2_salt = cnonce + server_challenge
pbkdf2_hash = hexlify_pbkdf2(
first_pbkdf2_part,
salt=second_pbkdf2_salt,
iterations=app_settings.ITERATIONS2,
length=app_settings.PBKDF2_BYTE_LENGTH
)
# log.debug("_simulate_client() locals():\n%s", pprint.pformat(locals()))
return pbkdf2_hash, second_pbkdf2_part | 0.002783 |
def docs(output=DOC_OUTPUT, proj_settings=PROJ_SETTINGS, github=False):
"""Generate API documentation (using Sphinx).
:param output: Output directory.
:param proj_settings: Django project settings to use.
:param github: Convert to GitHub-friendly format?
"""
local("export PYTHONPATH='' && "
"export DJANGO_SETTINGS_MODULE=%s && "
"sphinx-build -b html %s %s" % (proj_settings, DOC_INPUT, output),
capture=False)
if _parse_bool(github):
local("touch %s/.nojekyll" % output, capture=False) | 0.001795 |
def drop_db(url):
"""Drop specified database
"""
parsed_url = urlparse(url)
db_name = parsed_url.path
db_name = db_name.strip("/")
db = connect("postgresql://" + parsed_url.netloc)
# check that db exists
q = """SELECT 1 as exists
FROM pg_database
WHERE datname = '{db_name}'""".format(
db_name=db_name
)
if db.query(q).fetchone():
# DROP DATABASE must be run outside of a transaction
conn = db.engine.connect()
conn.execute("commit")
conn.execute("DROP DATABASE " + db_name)
conn.close() | 0.001675 |
def _get_new_csv_writers(trans_title, meta_title,
trans_csv_path, meta_csv_path):
"""
Prepare new csv writers, write title rows and return them.
"""
trans_writer = UnicodeWriter(trans_csv_path)
trans_writer.writerow(trans_title)
meta_writer = UnicodeWriter(meta_csv_path)
meta_writer.writerow(meta_title)
return trans_writer, meta_writer | 0.002525 |
def read(self):
"""
If there is data available to be read from the transport, reads the data and tries to parse it as a protobuf message. If the parsing succeeds, return a protobuf object.
Otherwise, returns None.
"""
if not self.ready_to_read():
return None
data = self._read()
if data is None:
return None
return self._parse_message(data) | 0.006961 |
def _X509__asn1date_to_datetime(asn1date):
"""
Converts openssl ASN1_TIME object to python datetime.datetime
"""
bio = Membio()
libcrypto.ASN1_TIME_print(bio.bio, asn1date)
pydate = datetime.strptime(str(bio), "%b %d %H:%M:%S %Y %Z")
return pydate.replace(tzinfo=utc) | 0.006757 |
def add_text_memo(self, memo_text):
"""Set the memo for the transaction to a new :class:`TextMemo
<stellar_base.memo.TextMemo>`.
:param memo_text: The text for the memo to add.
:type memo_text: str, bytes
:return: This builder instance.
"""
memo_text = memo.TextMemo(memo_text)
return self.add_memo(memo_text) | 0.005333 |
def free(self):
"""Free the map"""
if self._ptr is None:
return
Gauged.map_free(self.ptr)
SparseMap.ALLOCATIONS -= 1
self._ptr = None | 0.010811 |
def get_allow_repeat_items_metadata(self):
"""get the metadata for allow repeat items"""
metadata = dict(self._allow_repeat_items_metadata)
metadata.update({'existing_id_values': self.my_osid_object_form._my_map['allowRepeatItems']})
return Metadata(**metadata) | 0.010239 |
def main():
""" command line script """
# boilerplate
print("Ontospy " + ontospy.VERSION)
ontospy.get_or_create_home_repo()
ONTOSPY_LOCAL_MODELS = ontospy.get_home_location()
opts, args = parse_options()
sTime = time.time()
# switch dir and start server
startServer(port=DEFAULT_PORT, location=ONTOSPY_LOCAL_MODELS)
# finally:
# print some stats....
eTime = time.time()
tTime = eTime - sTime
printDebug("-" * 10)
printDebug("Time: %0.2fs" % tTime) | 0.036247 |
def get_context_data(self, **kwargs):
""" Returns the context data to provide to the template. """
context = super().get_context_data(**kwargs)
# Insert the considered topic and the associated forum into the context
topic = self.get_topic()
context['topic'] = topic
context['forum'] = topic.forum
# Handles the case when a poll is associated to the topic
try:
if hasattr(topic, 'poll') and topic.poll.options.exists():
context['poll'] = topic.poll
context['poll_form'] = self.poll_form_class(poll=topic.poll)
context['view_results_action'] = self.request.GET.get('view_results', None)
context['change_vote_action'] = self.request.GET.get('change_vote', None)
except ObjectDoesNotExist: # pragma: no cover
pass
return context | 0.004464 |
def activate(self, engine):
"""
Activates the Component.
:param engine: Engine to attach the Component to.
:type engine: QObject
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Activating '{0}' Component.".format(self.__class__.__name__))
self.__engine = engine
self.__settings = self.__engine.settings
self.__settings_section = self.name
self.activated = True
return True | 0.006122 |
def predictor(self, (i, j, A, alpha, Bb)):
"Add to chart any rules for B that could help extend this edge."
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs]) | 0.007353 |
def init(args=None):
"""Initialize the rabit library with arguments"""
if args is None:
args = []
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(arr), arr) | 0.004831 |
def verify(self, verifier, consumer_key=None, consumer_secret=None,
access_token=None, access_token_secret=None):
"""
After converting the token into verifier, call this to finalize the
authorization.
"""
# Stored values can be supplied to verify
self.consumer_key = consumer_key or self.consumer_key
self.consumer_secret = consumer_secret or self.consumer_secret
self.access_token = access_token or self.access_token
self.access_token_secret = access_token_secret or self.access_token_secret
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
resource_owner_key=self.access_token,
resource_owner_secret=self.access_token_secret,
verifier=verifier)
r = requests.post(self.url('ACCESS_TOKEN'), auth=oauth)
if r.status_code == 200:
creds = parse_qs(r.content)
else:
return (False, r.content)
self.finalize_oauth(creds.get('oauth_token')[0],
creds.get('oauth_token_secret')[0])
return (True, None) | 0.004299 |
def start_completion(self, buffer_name=None, select_first=False,
select_last=False, insert_common_part=False,
complete_event=None):
"""
Start asynchronous autocompletion of this buffer.
(This will do nothing if a previous completion was still in progress.)
"""
buffer_name = buffer_name or self.current_buffer_name
completer = self._async_completers.get(buffer_name)
if completer:
completer(select_first=select_first,
select_last=select_last,
insert_common_part=insert_common_part,
complete_event=CompleteEvent(completion_requested=True)) | 0.005533 |
def replace_nones(list_, repl=-1):
r"""
Recursively removes Nones in all lists and sublists and replaces them with
the repl variable
Args:
list_ (list):
repl (obj): replacement value
Returns:
list
CommandLine:
python -m utool.util_list --test-replace_nones
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_list import * # NOQA
>>> # build test data
>>> list_ = [None, 0, 1, 2]
>>> repl = -1
>>> # execute function
>>> repl_list = replace_nones(list_, repl)
>>> # verify results
>>> result = str(repl_list)
>>> print(result)
[-1, 0, 1, 2]
"""
repl_list = [
repl if item is None else (
replace_nones(item, repl) if isinstance(item, list) else item
)
for item in list_
]
return repl_list | 0.001122 |
def do_levmarq(s, param_names, damping=0.1, decrease_damp_factor=10.,
run_length=6, eig_update=True, collect_stats=False, rz_order=0,
run_type=2, **kwargs):
"""
Runs Levenberg-Marquardt optimization on a state.
Convenience wrapper for LMGlobals. Same keyword args, but the defaults
have been set to useful values for optimizing globals.
See LMGlobals and LMEngine for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
LMGlobals : Optimizer object; the workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
"""
if rz_order > 0:
aug = AugmentedState(s, param_names, rz_order=rz_order)
lm = LMAugmentedState(aug, damping=damping, run_length=run_length,
decrease_damp_factor=decrease_damp_factor, eig_update=
eig_update, **kwargs)
else:
lm = LMGlobals(s, param_names, damping=damping, run_length=run_length,
decrease_damp_factor=decrease_damp_factor, eig_update=
eig_update, **kwargs)
if run_type == 2:
lm.do_run_2()
elif run_type == 1:
lm.do_run_1()
else:
raise ValueError('run_type=1,2 only')
if collect_stats:
return lm.get_termination_stats() | 0.006032 |
def upload_tree(self, src, dst, ignore=None):
"""Recursively upload a directory tree.
Although similar to shutil.copytree we don't follow symlinks.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
try:
self.conn.mkd(dst)
except error_perm:
pass
errors = []
for name in names:
if name in ignored_names:
continue
src_name = os.path.join(src, name)
dst_name = os.path.join(dst, name)
try:
if os.path.islink(src_name):
pass
elif os.path.isdir(src_name):
self.upload_tree(src_name, dst_name, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
self.put(src_name, dst_name)
except Exception as why:
errors.append((src_name, dst_name, str(why)))
return dst | 0.001826 |
def AnalizarRemito(self, ret, archivo=None):
"Extrae el resultado del remito, si existen en la respuesta XML"
if ret:
self.CodRemito = ret.get("codRemito")
self.TipoComprobante = ret.get("tipoComprobante")
self.PuntoEmision = ret.get("puntoEmision")
datos_aut = ret.get('datosAutorizacion')
if datos_aut:
self.NroRemito = datos_aut.get('nroRemito')
self.CodAutorizacion = datos_aut.get('codAutorizacion')
self.FechaEmision = datos_aut.get('fechaEmision')
self.FechaVencimiento = datos_aut.get('fechaVencimiento')
self.Estado = ret.get('estado')
self.Resultado = ret.get('resultado')
self.QR = ret.get('qr') or ""
if archivo:
qr = base64.b64decode(self.QR)
f = open(archivo, "wb")
f.write(qr)
f.close() | 0.002099 |
def to_source(node, indentation=' ' * 4):
"""Return source code of a given AST."""
if isinstance(node, gast.AST):
node = gast.gast_to_ast(node)
generator = SourceWithCommentGenerator(indentation, False,
astor.string_repr.pretty_string)
generator.visit(node)
generator.result.append('\n')
return astor.source_repr.pretty_source(generator.result).lstrip() | 0.017073 |
def crop(self, doy, depth, lat, lon, var):
""" Crop a subset of the dataset for each var
Given doy, depth, lat and lon, it returns the smallest subset
that still contains the requested coordinates inside it.
It handels special cases like a region around greenwich and
the international date line.
Accepts 0 to 360 and -180 to 180 longitude reference.
It extends time and longitude coordinates, so simplify the use
of series. For example, a ship track can be requested with
a longitude sequence like [352, 358, 364, 369, 380], and
the equivalent for day of year above 365.
"""
dims, idx = cropIndices(self.dims, lat, lon, depth, doy)
subset = {}
for v in var:
subset[v] = ma.asanyarray([
self.ncs[tnn][v][0, idx['zn'], idx['yn'], idx['xn']] \
for tnn in idx['tn']])
return subset, dims | 0.003972 |
def extract_variants(pattern):
"""Extract the pattern variants (ie. {foo,bar}baz = foobaz or barbaz)."""
v1, v2 = pattern.find('{'), pattern.find('}')
if v1 > -1 and v2 > v1:
variations = pattern[v1+1:v2].split(',')
variants = [pattern[:v1] + v + pattern[v2+1:] for v in variations]
else:
variants = [pattern]
return list(_deduplicate(variants)) | 0.002571 |
async def update_websub(self, config, hub):
""" Update WebSub hub to know about this feed """
try:
LOGGER.debug("WebSub: Notifying %s of %s", hub, self.url)
request = await utils.retry_post(
config,
hub,
data={
'hub.mode': 'publish',
'hub.url': self.url
})
if request.success:
LOGGER.info("%s: WebSub notification sent to %s",
self.url, hub)
else:
LOGGER.warning("%s: Hub %s returned status code %s: %s", self.url, hub,
request.status, request.text)
except Exception as err: # pylint:disable=broad-except
LOGGER.warning("WebSub %s: got %s: %s",
hub, err.__class__.__name__, err) | 0.003386 |
def check_mod_enabled(mod):
'''
Checks to see if the specific mod symlink is in /etc/apache2/mods-enabled.
This will only be functional on Debian-based operating systems (Ubuntu,
Mint, etc).
CLI Examples:
.. code-block:: bash
salt '*' apache.check_mod_enabled status
salt '*' apache.check_mod_enabled status.load
salt '*' apache.check_mod_enabled status.conf
'''
if mod.endswith('.load') or mod.endswith('.conf'):
mod_file = mod
else:
mod_file = '{0}.load'.format(mod)
return os.path.islink('/etc/apache2/mods-enabled/{0}'.format(mod_file)) | 0.001603 |
def clear_numeric_score_increment(self):
"""Clears the numeric score increment.
raise: NoAccess - ``Metadata.isRequired()`` or
``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score
if (self.get_numeric_score_increment_metadata().is_read_only() or
self.get_numeric_score_increment_metadata().is_required()):
raise errors.NoAccess()
self._my_map['numericScoreIncrement'] = self._numeric_score_increment_default | 0.006182 |
def QA_util_getBetweenQuarter(begin_date, end_date):
"""
#加上每季度的起始日期、结束日期
"""
quarter_list = {}
month_list = QA_util_getBetweenMonth(begin_date, end_date)
for value in month_list:
tempvalue = value.split("-")
year = tempvalue[0]
if tempvalue[1] in ['01', '02', '03']:
quarter_list[year + "Q1"] = ['%s-01-01' % year, '%s-03-31' % year]
elif tempvalue[1] in ['04', '05', '06']:
quarter_list[year + "Q2"] = ['%s-04-01' % year, '%s-06-30' % year]
elif tempvalue[1] in ['07', '08', '09']:
quarter_list[year + "Q3"] = ['%s-07-31' % year, '%s-09-30' % year]
elif tempvalue[1] in ['10', '11', '12']:
quarter_list[year + "Q4"] = ['%s-10-01' % year, '%s-12-31' % year]
return(quarter_list) | 0.001245 |
def ServiceWorker_skipWaiting(self, scopeURL):
"""
Function path: ServiceWorker.skipWaiting
Domain: ServiceWorker
Method name: skipWaiting
Parameters:
Required arguments:
'scopeURL' (type: string) -> No description
No return value.
"""
assert isinstance(scopeURL, (str,)
), "Argument 'scopeURL' must be of type '['str']'. Received type: '%s'" % type(
scopeURL)
subdom_funcs = self.synchronous_command('ServiceWorker.skipWaiting',
scopeURL=scopeURL)
return subdom_funcs | 0.051429 |
def _to_dict(objects):
'''
Potentially interprets a string as JSON for usage with mongo
'''
try:
if isinstance(objects, six.string_types):
objects = salt.utils.json.loads(objects)
except ValueError as err:
log.error("Could not parse objects: %s", err)
raise err
return objects | 0.002967 |
def start_charge(self):
"""Start charging the Tesla Vehicle."""
if not self.__charger_state:
data = self._controller.command(self._id, 'charge_start',
wake_if_asleep=True)
if data and data['response']['result']:
self.__charger_state = True
self.__manual_update_time = time.time() | 0.005115 |
def _write_textfile(filename, text):
"""
Write `text` into file `filename`. If the target directory does
not exist, create it.
"""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
outf = open(filename, 'w')
outf.write(text)
outf.close() | 0.003125 |
def _start_drag_row(self, event):
"""Start dragging a row"""
self._dragged_row = self.identify_row(event.y) # identify dragged row
bbox = self.bbox(self._dragged_row)
self._dy = bbox[1] - event.y # distance between cursor and row upper border
self._dragged_row_y = bbox[1] # y coordinate of dragged row upper border
self._dragged_row_height = bbox[3]
# configure dragged row preview
self._visual_drag.configure(displaycolumns=self['displaycolumns'],
height=1)
for col in self['columns']:
self._visual_drag.column(col, width=self.column(col, 'width'))
if 'tree' in tuple(str(p) for p in self['show']):
self._visual_drag.configure(show='tree')
else:
self._visual_drag.configure(show='')
self._visual_drag.place(in_=self, x=0, y=bbox[1],
height=self._visual_drag.winfo_reqheight() + 2,
anchor='nw', relwidth=1)
self._visual_drag.selection_add(self._dragged_row)
self.selection_remove(self._dragged_row)
self._visual_drag.update_idletasks()
self._visual_drag.see(self._dragged_row)
self._visual_drag.update_idletasks()
self._visual_drag.xview_moveto(self.xview()[0]) | 0.002974 |
def clear_scroll(self, scroll_id, params=None):
"""
Clear the scroll request created by specifying the scroll parameter to
search.
`<http://www.elasticsearch.org/guide/reference/api/search/scroll/>`_
:arg scroll_id: The scroll ID or a list of scroll IDs
"""
if not isinstance(scroll_id, list):
scroll_id = [scroll_id]
body = {
"scroll_id": scroll_id
}
if params and "scroll_id" in params.keys():
params.pop("scroll_id")
_, data = yield self.transport.perform_request('DELETE',
_make_path('_search',
'scroll'),
body=body,
params=params)
raise gen.Return(data) | 0.002162 |
def _set_client_pw(self, v, load=False):
"""
Setter method for client_pw, mapped from YANG variable /cluster/client_pw (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_client_pw is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_client_pw() directly.
YANG Description: Client Pseudo Wire
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=client_pw.client_pw, is_container='container', presence=True, yang_name="client-pw", rest_name="client-pw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Pseudo Wire', u'cli-add-mode': None, u'sort-priority': u'RUNNCFG_MCT_PW_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """client_pw must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=client_pw.client_pw, is_container='container', presence=True, yang_name="client-pw", rest_name="client-pw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Client Pseudo Wire', u'cli-add-mode': None, u'sort-priority': u'RUNNCFG_MCT_PW_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-mct', defining_module='brocade-mct', yang_type='container', is_config=True)""",
})
self.__client_pw = t
if hasattr(self, '_set'):
self._set() | 0.005727 |
def write(self, buffer_size, window_size, x, y, p, address, data):
"""Write a bytestring to an address in memory.
..note::
This method is included here to maintain API compatibility with an
`alternative implementation of SCP
<https://github.com/project-rig/rig-scp>`_.
Parameters
----------
buffer_size : int
Number of bytes held in an SCP buffer by SARK, determines how many
bytes will be expected in a socket and how many bytes will be
written in each packet.
window_size : int
x : int
y : int
p : int
address : int
The address at which to start writing the data. Addresses are given
within the address space of a SpiNNaker core. See the SpiNNaker
datasheet for more information.
data : :py:class:`bytes`
Data to write into memory. Writes are automatically broken into a
sequence of SCP write commands.
"""
# While there is still data perform a write: get the block to write
# this time around, determine the data type, perform the write and
# increment the address
def packets(address, data):
end = len(data)
pos = 0
while pos < end:
block = data[pos:pos + buffer_size]
block_size = len(block)
dtype = consts.address_length_dtype[(address % 4,
block_size % 4)]
yield scpcall(x, y, p, consts.SCPCommands.write, address,
block_size, dtype, block)
address += block_size
pos += block_size
# Run the event loop and then return the retrieved data
self.send_scp_burst(buffer_size, window_size,
list(packets(address, data))) | 0.001029 |
def wpad_search_urls(subdomain_or_host, fld):
"""
Generate URLs from which to look for a PAC file, based on the subdomain and TLD parts of
a fully-qualified host name.
:param str subdomain_or_host: Subdomain portion of the fully-qualified host name.
For foo.bar.example.com, this is foo.bar.
:param str fld: FLD portion of the fully-qualified host name.
For foo.bar.example.com, this is example.com.
:return: PAC URLs to try in order, according to the WPAD protocol.
:rtype: list[str]
"""
parts = subdomain_or_host.split('.')
search_urls = []
for i in range(1, len(parts)+1):
# Chop off host and move up the subdomain hierarchy.
url = 'http://wpad.{}/wpad.dat'.format('.'.join(parts[i:] + [fld]))
search_urls.append(url)
return search_urls | 0.004711 |
def norm_join(prefix, suffix):
"""
Join ``prefix`` and ``suffix`` paths
and return the resulting path, normalized.
:param string prefix: the prefix path
:param string suffix: the suffix path
:rtype: string
"""
if (prefix is None) and (suffix is None):
return "."
if prefix is None:
return os.path.normpath(suffix)
if suffix is None:
return os.path.normpath(prefix)
return os.path.normpath(os.path.join(prefix, suffix)) | 0.002053 |
def signable(self, request, authheaders):
"""Creates the signable string for a request and returns it.
Keyword arguments:
request -- A request object which can be consumed by this API.
authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
"""
method = request.method.upper()
md5 = hashlib.md5()
if request.body is not None:
md5.update(request.body)
bodyhash = md5.hexdigest()
ctype = request.get_header("content-type")
date = request.get_header("date")
cheaders = []
cheader_first = True
cheaders_sign = ''
if "headers" in authheaders:
cheaders = authheaders["headers"].split(";")
cheaders.sort()
for cheader in cheaders:
if cheader_first:
cheader_first = False
else:
cheaders_sign += '\n'
cheaders_sign += '{0}: {1}'.format(cheader.lower(), request.get_header(cheader))
requri = request.url.request_uri()
return '{0}\n{1}\n{2}\n{3}\n{4}\n{5}'.format(method, bodyhash, ctype, date, cheaders_sign, requri) | 0.004153 |
def _ReadAppJsonFile(self, relative_path):
"""Reads JSON file from an application directory.
Args:
relative_path: file name relative to application root directory.
Returns:
Parsed JSON data or None if the file does not exist, can't be read or
not a valid JSON file.
"""
try:
with open(os.path.join(sys.path[0], relative_path), 'r') as f:
return json.load(f)
except (IOError, ValueError):
return None | 0.006479 |
def mesh_axis_to_cumprod(self, tensor_shape):
"""For each mesh axis, give the product of previous tensor axes.
Args:
tensor_shape: Shape.
Returns:
list with length self.ndims where each element is an integer or None.
"""
tensor_layout = self.tensor_layout(tensor_shape)
ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims)
ta2cumprod = tensor_shape.cumprod
return [None if ta is None else ta2cumprod[ta] for ta in ma2ta] | 0.002119 |
def blend_line(messages, blend_combos=None):
"""
Given a list of messages on the same line, blend them together so that we
end up with one message per actual problem. Note that we can still return
more than one message here if there are two or more different errors for
the line.
"""
blend_combos = blend_combos or BLEND_COMBOS
blend_lists = [[] for _ in range(len(blend_combos))]
blended = []
# first we split messages into each of the possible blendable categories
# so that we have a list of lists of messages which can be blended together
for message in messages:
key = (message.source, message.code)
found = False
for blend_combo_idx, blend_combo in enumerate(blend_combos):
if key in blend_combo:
found = True
blend_lists[blend_combo_idx].append(message)
# note: we use 'found=True' here rather than a simple break/for-else
# because this allows the same message to be put into more than one
# 'bucket'. This means that the same message from pep8 can 'subsume'
# two from pylint, for example.
if not found:
# if we get here, then this is not a message which can be blended,
# so by definition is already blended
blended.append(message)
# we should now have a list of messages which all represent the same
# problem on the same line, so we will sort them according to the priority
# in BLEND and pick the first one
for blend_combo_idx, blend_list in enumerate(blend_lists):
if len(blend_list) == 0:
continue
blend_list.sort(
key=lambda msg: blend_combos[blend_combo_idx].index(
(msg.source, msg.code),
),
)
if blend_list[0] not in blended:
# We may have already added this message if it represents
# several messages in other tools which are not being run -
# for example, pylint missing-docstring is blended with pep257 D100, D101
# and D102, but should not appear 3 times!
blended.append(blend_list[0])
# Some messages from a tool point out an error that in another tool is handled by two
# different errors or more. For example, pylint emits the same warning (multiple-statements)
# for "two statements on a line" separated by a colon and a semi-colon, while pep8 has E701
# and E702 for those cases respectively. In this case, the pylint error will not be 'blended' as
# it will appear in two blend_lists. Therefore we mark anything not taken from the blend list
# as "consumed" and then filter later, to avoid such cases.
for now_used in blend_list[1:]:
now_used.used = True
return [m for m in blended if not getattr(m, 'used', False)] | 0.002435 |
def find_slave_widgets(self,tab):
"""return all the frontends that do not own the kernel attached to the given widget/tab.
Only find frontends owned by the current application. Selection
based on connection file of the kernel.
This function does the conversion tabNumber/widget if needed.
"""
#convert from/to int/richIpythonWidget if needed
if isinstance(tab, int):
tab = self.tab_widget.widget(tab)
km=tab.kernel_manager
#build list of all widgets
widget_list = [self.tab_widget.widget(i) for i in range(self.tab_widget.count())]
# widget that are candidate not to be the owner of the kernel does have all the same port of the curent widget
filtered_widget_list = ( widget for widget in widget_list if
widget.kernel_manager.connection_file == km.connection_file)
# Get a list of all widget owning the same kernel and removed it from
# the previous cadidate. (better using sets ?)
master_widget_list = self.find_master_tab(tab, as_list=True)
slave_list = [widget for widget in filtered_widget_list if widget not in master_widget_list]
return slave_list | 0.0104 |
def fail(self, message, status=500, **kw):
"""Set a JSON error object and a status to the response
"""
self.request.response.setStatus(status)
result = {"success": False, "errors": message, "status": status}
result.update(kw)
return result | 0.006969 |
def get_cloud_service(self, cloud_service_id):
'''
The Get Cloud Service operation gets all the resources (job collections)
in the cloud service.
cloud_service_id:
The cloud service id
'''
_validate_not_none('cloud_service_id', cloud_service_id)
path = self._get_cloud_services_path(cloud_service_id)
return self._perform_get(path, CloudService) | 0.007109 |
def printHistogram(data, bins=10, height=10, logscale=False, minbin=0,
horizontal=False, char=u"\U00002589",
c=None, bold=True, title='Histogram'):
"""
Ascii histogram printing.
:param int bins: number of histogram bins
:param int height: height of the histogram in character units
:param bool logscale: use logscale for frequencies
:param int minbin: ignore bins before minbin
:param bool horizontal: show histogram horizontally
:param str char: character to be used
:param str,int c: ascii color
:param bool char: use boldface
:param str title: histogram title
:Example:
.. code-block:: python
from vtkplotter import printHistogram
import numpy as np
d = np.random.normal(size=1000)
printHistogram(d, c='blue', logscale=True, title='my scalars')
printHistogram(d, c=1, horizontal=1)
|printhisto|
"""
# Adapted from http://pyinsci.blogspot.com/2009/10/ascii-histograms.html
if not horizontal: # better aspect ratio
bins *= 2
isimg = isinstance(data, vtk.vtkImageData)
isvol = isinstance(data, vtk.vtkVolume)
if isimg or isvol:
if isvol:
img = data.image
else:
img = data
dims = img.GetDimensions()
nvx = min(100000, dims[0]*dims[1]*dims[2])
idxs = np.random.randint(0, min(dims), size=(nvx, 3))
data = []
for ix, iy, iz in idxs:
d = img.GetScalarComponentAsFloat(ix, iy, iz, 0)
data.append(d)
elif isinstance(data, vtk.vtkActor):
arr = data.polydata().GetPointData().GetScalars()
if not arr:
arr = data.polydata().GetCellData().GetScalars()
if not arr:
return
from vtk.util.numpy_support import vtk_to_numpy
data = vtk_to_numpy(arr)
h = np.histogram(data, bins=bins)
if minbin:
hi = h[0][minbin:-1]
else:
hi = h[0]
if sys.version_info[0] < 3 and char == u"\U00002589":
char = "*" # python2 hack
if char == u"\U00002589" and horizontal:
char = u"\U00002586"
entrs = "\t(entries=" + str(len(data)) + ")"
if logscale:
h0 = np.log10(hi+1)
maxh0 = int(max(h0)*100)/100
title = '(logscale) ' + title + entrs
else:
h0 = hi
maxh0 = max(h0)
title = title + entrs
def _v():
his = ""
if title:
his += title +"\n"
bars = h0 / maxh0 * height
for l in reversed(range(1, height + 1)):
line = ""
if l == height:
line = "%s " % maxh0
else:
line = " |" + " " * (len(str(maxh0))-3)
for c in bars:
if c >= np.ceil(l):
line += char
else:
line += " "
line += "\n"
his += line
his += "%.2f" % h[1][0] + "." * (bins) + "%.2f" % h[1][-1] + "\n"
return his
def _h():
his = ""
if title:
his += title +"\n"
xl = ["%.2f" % n for n in h[1]]
lxl = [len(l) for l in xl]
bars = h0 / maxh0 * height
his += " " * int(max(bars) + 2 + max(lxl)) + "%s\n" % maxh0
for i, c in enumerate(bars):
line = (xl[i] + " " * int(max(lxl) - lxl[i]) + "| " + char * int(c) + "\n")
his += line
return his
if horizontal:
height *= 2
printc(_h(), c=c, bold=bold)
else:
printc(_v(), c=c, bold=bold) | 0.004101 |
def _ReadFileEntry(self, file_object, file_offset):
"""Reads a file entry.
Args:
file_object (FileIO): file-like object.
file_offset (int): offset of the data relative from the start of
the file-like object.
Returns:
CPIOArchiveFileEntry: a file entry.
Raises:
FileFormatError: if the file entry cannot be read.
"""
if self.file_format == 'bin-big-endian':
data_type_map = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_BIG_ENDIAN_FILE_ENTRY_SIZE
elif self.file_format == 'bin-little-endian':
data_type_map = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY
file_entry_data_size = self._CPIO_BINARY_LITTLE_ENDIAN_FILE_ENTRY_SIZE
elif self.file_format == 'odc':
data_type_map = self._CPIO_PORTABLE_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_PORTABLE_ASCII_FILE_ENTRY_SIZE
elif self.file_format in ('crc', 'newc'):
data_type_map = self._CPIO_NEW_ASCII_FILE_ENTRY
file_entry_data_size = self._CPIO_NEW_ASCII_FILE_ENTRY_SIZE
file_entry = self._ReadStructure(
file_object, file_offset, file_entry_data_size, data_type_map,
'file entry')
file_offset += file_entry_data_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
file_entry.modification_time = (
(file_entry.modification_time.upper << 16) |
file_entry.modification_time.lower)
file_entry.file_size = (
(file_entry.file_size.upper << 16) | file_entry.file_size.lower)
if self.file_format == 'odc':
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_ODC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 8)
except ValueError:
raise errors.FileFormatError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
elif self.file_format in ('crc', 'newc'):
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_CRC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 16)
except ValueError:
raise errors.FileFormatError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
path_data = file_object.read(file_entry.path_size)
file_offset += file_entry.path_size
# TODO: should this be ASCII?
path = path_data.decode('ascii')
path, _, _ = path.partition('\x00')
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
file_offset += padding_size
archive_file_entry = CPIOArchiveFileEntry()
archive_file_entry.data_offset = file_offset
archive_file_entry.data_size = file_entry.file_size
archive_file_entry.group_identifier = file_entry.group_identifier
archive_file_entry.inode_number = file_entry.inode_number
archive_file_entry.modification_time = file_entry.modification_time
archive_file_entry.path = path
archive_file_entry.mode = file_entry.mode
archive_file_entry.size = (
file_entry_data_size + file_entry.path_size + padding_size +
file_entry.file_size)
archive_file_entry.user_identifier = file_entry.user_identifier
file_offset += file_entry.file_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
if padding_size > 0:
archive_file_entry.size += padding_size
return archive_file_entry | 0.006601 |
def to_etree(self):
"""
creates an etree element of a ``SaltLabel`` that mimicks a SaltXMI
<labels> element
"""
attribs = {
'{{{pre}}}type'.format(pre=NAMESPACES['xsi']): self.xsi_type,
'namespace': self.namespace, 'name': self.name,
'value': self.hexvalue, 'valueString': self.value}
non_empty_attribs = {key: val for (key, val) in attribs.items()
if val is not None}
E = ElementMaker()
return E('labels', non_empty_attribs) | 0.00361 |
def trace_function(module, function, tracer=tracer):
"""
Traces given module function using given tracer.
:param module: Module of the function.
:type module: object
:param function: Function to trace.
:type function: object
:param tracer: Tracer.
:type tracer: object
:return: Definition success.
:rtype: bool
"""
if is_traced(function):
return False
name = get_object_name(function)
if is_untracable(function) or name in UNTRACABLE_NAMES:
return False
setattr(module, name, tracer(function))
return True | 0.001698 |
def predict_size_distribution_component_models(self, model_names, input_columns, output_columns, metadata_cols,
data_mode="forecast", location=6):
"""
Make predictions using fitted size distribution models.
Args:
model_names: Name of the models for predictions
input_columns: Data columns used for input into ML models
output_columns: Names of output columns
metadata_cols: Columns from input data that should be included in the data frame with the predictions.
data_mode: Set of data used as input for prediction models
location: Value of fixed location parameter
Returns:
Predictions in dictionary of data frames grouped by group type
"""
groups = self.size_distribution_models.keys()
predictions = pd.DataFrame(self.data[data_mode]["combo"][metadata_cols])
for group in groups:
group_idxs = self.data[data_mode]["combo"][self.group_col] == group
group_count = np.count_nonzero(group_idxs)
print(self.size_distribution_models[group])
if group_count > 0:
log_mean = self.size_distribution_models[group]["lognorm"]["mean"]
log_sd = self.size_distribution_models[group]["lognorm"]["sd"]
for m, model_name in enumerate(model_names):
raw_preds = np.zeros((group_count, len(output_columns)))
for c in range(len(output_columns)):
raw_preds[:, c] = self.size_distribution_models[group][
"pc_{0:d}".format(c)][model_name].predict(self.data[data_mode]["combo"].loc[group_idxs,
input_columns])
log_norm_preds = self.size_distribution_models[group]["lognorm"]["pca"].inverse_transform(raw_preds)
log_norm_preds[:, 0] *= -1
multi_predictions = np.exp(log_norm_preds * log_sd + log_mean)
if multi_predictions.shape[1] == 2:
multi_predictions_temp = np.zeros((multi_predictions.shape[0], 3))
multi_predictions_temp[:, 0] = multi_predictions[:, 0]
multi_predictions_temp[:, 1] = location
multi_predictions_temp[:, 2] = multi_predictions[:, 1]
multi_predictions = multi_predictions_temp
for p, pred_col in enumerate(["shape", "location", "scale"]):
predictions.loc[group_idxs, model_name.replace(" ", "-") + "_" + pred_col] = \
multi_predictions[:, p]
return predictions | 0.005308 |
def get_raw_tag_data(filename):
"Return the ID3 tag in FILENAME as a raw byte string."
with open(filename, "rb") as file:
try:
(cls, offset, length) = stagger.tags.detect_tag(file)
except stagger.NoTagError:
return bytes()
file.seek(offset)
return file.read(length) | 0.00304 |
def extract_key_values(array_value, separators=(';', ',', ':'), **kwargs):
"""Serialize array of objects with simple key-values
"""
items_sep, fields_sep, keys_sep = separators
return items_sep.join(fields_sep.join(keys_sep.join(x) for x in sorted(it.items()))
for it in array_value) | 0.006154 |
def setReflexAnalysisOf(self, analysis):
"""Sets the analysis that has been reflexed in order to create this
one, but if the analysis is the same as self, do nothing.
:param analysis: an analysis object or UID
"""
if not analysis or analysis.UID() == self.UID():
pass
else:
self.getField('ReflexAnalysisOf').set(self, analysis) | 0.005013 |
def build_lattice(self, x):
""" Construct the list of nodes and edges for input features. """
I, J, _ = x.shape
lattice = self._subset_independent_lattice((I, J))
return lattice | 0.009569 |
def get_contradictory_pairs(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity]]:
"""Iterates over contradictory node pairs in the graph based on their causal relationships
:return: An iterator over (source, target) node pairs that have contradictory causal edges
"""
for u, v in graph.edges():
if pair_has_contradiction(graph, u, v):
yield u, v | 0.012723 |
def set_sqlite_pragma(dbapi_connection, connection_record):
"""Allows foreign keys to work in sqlite."""
import sqlite3
if dbapi_connection.__class__ is sqlite3.Connection:
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close() | 0.003344 |
def side_effect(self, func, *args, **kwargs):
'''
Wrap side effects for spies.
'''
self._spy_side_effect = func
self._spy_side_effect_args = args
self._spy_side_effect_kwargs = kwargs
return self | 0.007968 |
def plot(self, modelx, dlines=False, xmin=None, xmax=None,
ymin=None, ymax=None, **kwargs):
"""Plot the data and model (requires `omega`).
This assumes that `data` is 1D and that `mfunc` takes one argument
that should be treated as the X variable.
"""
import omega as om
modelx = np.asarray(modelx)
if modelx.shape != self.data.shape:
raise ValueError('modelx and data arrays must have same shape')
modely = self.mfunc(modelx)
sigmas = self.invsigma**-1 # TODO: handle invsigma = 0
vb = om.layout.VBox(2)
vb.pData = om.quickXYErr(modelx, self.data, sigmas,
'Data', lines=dlines, **kwargs)
vb[0] = vb.pData
vb[0].addXY(modelx, modely, 'Model')
vb[0].setYLabel('Y')
vb[0].rebound(False, True)
vb[0].setBounds(xmin, xmax, ymin, ymax)
vb[1] = vb.pResid = om.RectPlot()
vb[1].defaultField.xaxis = vb[1].defaultField.xaxis
vb[1].addXYErr(modelx, self.resids, sigmas, None, lines=False)
vb[1].setLabels('X', 'Residuals')
vb[1].rebound(False, True)
# ignore Y values since residuals are on different scale:
vb[1].setBounds(xmin, xmax)
vb.setWeight(0, 3)
return vb | 0.003028 |
def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None):
"""Learning rate warmup multiplier."""
if not warmup_steps:
return tf.constant(1.)
tf.logging.info("Applying %s learning rate warmup for %d steps",
warmup_schedule, warmup_steps)
warmup_steps = tf.to_float(warmup_steps)
global_step = _global_step(hparams)
if warmup_schedule == "exp":
return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step)
else:
assert warmup_schedule == "linear"
start = tf.constant(0.35)
return ((tf.constant(1.) - start) / warmup_steps) * global_step + start | 0.012719 |
def abbreviations(text):
"""Remove periods after an abbreviation from a list of known
abbrevations that can be spoken the same without that period. This
prevents having to handle tokenization of that period.
Note:
Could potentially remove the ending period of a sentence.
Note:
Abbreviations that Google Translate can't pronounce without
(or even with) a period should be added as a word substitution with a
:class:`PreProcessorSub` pre-processor. Ex.: 'Esq.', 'Esquire'.
"""
return PreProcessorRegex(
search_args=symbols.ABBREVIATIONS,
search_func=lambda x: r"(?<={})(?=\.).".format(x),
repl='', flags=re.IGNORECASE).run(text) | 0.001401 |
def tag(self):
""" Return a (deferred) cached Koji tag name for this change. """
name_or_id = self.task.tag
if name_or_id is None:
return defer.succeed(None)
if isinstance(name_or_id, StringType):
return defer.succeed(name_or_id)
if isinstance(name_or_id, int):
return self.task.connection.cache.tag_name(name_or_id)
return defer.fail() | 0.004762 |
def sorted_proposals(proposals, scopepref=None, typepref=None):
"""Sort a list of proposals
Return a sorted list of the given `CodeAssistProposal`\s.
`scopepref` can be a list of proposal scopes. Defaults to
``['parameter_keyword', 'local', 'global', 'imported',
'attribute', 'builtin', 'keyword']``.
`typepref` can be a list of proposal types. Defaults to
``['class', 'function', 'instance', 'module', None]``.
(`None` stands for completions with no type like keywords.)
"""
sorter = _ProposalSorter(proposals, scopepref, typepref)
return sorter.get_sorted_proposal_list() | 0.003215 |
def get_instance(self, payload):
"""
Build an instance of ValidationRequestInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.validation_request.ValidationRequestInstance
:rtype: twilio.rest.api.v2010.account.validation_request.ValidationRequestInstance
"""
return ValidationRequestInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | 0.01073 |
def set_display_name(self, display_name):
"""Sets a display name.
arg: display_name (string): the new display name
raise: InvalidArgument - ``display_name`` is invalid
raise: NoAccess - ``display_name`` cannot be modified
raise: NullArgument - ``display_name`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_display_name_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_string(display_name,
self.get_display_name_metadata()):
raise errors.InvalidArgument()
self._my_map['displayName']['text'] = display_name | 0.002774 |
def build_image_here(source, image,
parent_registry=None, target_registries=None, parent_registry_insecure=False,
target_registries_insecure=False, dont_pull_base_image=False, **kwargs):
"""
build image from provided dockerfile (specified by `source`) in current environment
:param source: dict, where/how to get source code to put in image
:param image: str, tag for built image ([registry/]image_name[:tag])
:param parent_registry: str, registry to pull base image from
:param target_registries: list of str, list of registries to push image to (might change in
future)
:param parent_registry_insecure: bool, allow connecting to parent registry over plain http
:param target_registries_insecure: bool, allow connecting to target registries over plain http
:param dont_pull_base_image: bool, don't pull or update base image specified in dockerfile
:return: BuildResults
"""
build_json = _prepare_build_json(image, source, parent_registry, target_registries,
parent_registry_insecure, target_registries_insecure,
dont_pull_base_image, **kwargs)
m = DockerBuildWorkflow(**build_json)
return m.build_docker_image() | 0.007599 |
def parse_field(cls: Type[DocumentType], field_name: str, line: str) -> Any:
"""
Parse a document field with regular expression and return the value
:param field_name: Name of the field
:param line: Line string to parse
:return:
"""
try:
match = cls.fields_parsers[field_name].match(line)
if match is None:
raise AttributeError
value = match.group(1)
except AttributeError:
raise MalformedDocumentError(field_name)
return value | 0.003546 |
def get_config_value(config_file, section, variable):
""" extracts a config file value """
try:
parser = ConfigParser.SafeConfigParser()
parser.read(config_file)
return parser.get(section, variable)
except:
return None | 0.007634 |
def new_stats_exporter(option):
""" new_stats_exporter returns an exporter
that exports stats to Prometheus.
"""
if option.namespace == "":
raise ValueError("Namespace can not be empty string.")
collector = new_collector(option)
exporter = PrometheusStatsExporter(options=option,
gatherer=option.registry,
collector=collector)
return exporter | 0.002183 |
def register(self, name):
"""
Register a new hook. Not required (see :py:func:`.connect` method)
:param str name: The hook name
:return: Django signal
:rtype: :py:class:`django.dispatch.Signal`
"""
signal = Signal(providing_args=['args', 'kwargs'])
self._registry[name] = signal
return signal | 0.005479 |
def remove(self, auto_confirm=False, verbose=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self.paths:
logger.info(
"Can't uninstall '%s'. No files were found to uninstall.",
self.dist.project_name,
)
return
dist_name_version = (
self.dist.project_name + "-" + self.dist.version
)
logger.info('Uninstalling %s:', dist_name_version)
with indent_log():
if auto_confirm or self._allowed_to_proceed(verbose):
moved = self._moved_paths
for_rename = compress_for_rename(self.paths)
for path in sorted(compact(for_rename)):
moved.stash(path)
logger.debug('Removing file or directory %s', path)
for pth in self.pth.values():
pth.remove()
logger.info('Successfully uninstalled %s', dist_name_version) | 0.001914 |
def connect(self):
"""Connect to the Redis server if necessary.
:rtype: :class:`~tornado.concurrent.Future`
:raises: :class:`~tredis.exceptions.ConnectError`
:class:`~tredis.exceptinos.RedisError`
"""
future = concurrent.Future()
if self.connected:
raise exceptions.ConnectError('already connected')
LOGGER.debug('%s connecting', self.name)
self.io_loop.add_future(
self._client.connect(self.host, self.port),
lambda f: self._on_connected(f, future))
return future | 0.003373 |
def trsm(self,B,trans='N'):
r"""
Solves a triangular system of equations with multiple righthand
sides. Computes
.. math::
B &:= L^{-1} B \text{ if trans is 'N'}
B &:= L^{-T} B \text{ if trans is 'T'}
"""
if trans=='N':
cp.trsm(self._L0,B)
pftrsm(self._V,self._L,self._B,B,trans='N')
elif trans=='T':
pftrsm(self._V,self._L,self._B,B,trans='T')
cp.trsm(self._L0,B,trans='T')
elif type(trans) is str:
raise ValueError("trans must be 'N' or 'T'")
else:
raise TypeError("trans must be 'N' or 'T'")
return | 0.027496 |
def safe_sum(x, alt_value=-np.inf, name=None):
"""Elementwise adds list members, replacing non-finite results with alt_value.
Typically the `alt_value` is chosen so the `MetropolisHastings`
`TransitionKernel` always rejects the proposal.
Args:
x: Python `list` of `Tensors` to elementwise add.
alt_value: Python scalar used to replace any elementwise sums which would
otherwise be non-finite.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "safe_sum").
Returns:
safe_sum: `Tensor` representing the elementwise sum of list of `Tensor`s
`x` or `alt_value` where sums are non-finite.
Raises:
TypeError: if `x` is not list-like.
ValueError: if `x` is empty.
"""
with tf.compat.v1.name_scope(name, 'safe_sum', [x, alt_value]):
if not is_list_like(x):
raise TypeError('Expected list input.')
if not x:
raise ValueError('Input should not be empty.')
in_shape = x[0].shape
x = tf.stack(x, axis=-1)
x = tf.reduce_sum(input_tensor=x, axis=-1)
alt_value = np.array(alt_value, x.dtype.as_numpy_dtype)
alt_fill = tf.fill(tf.shape(input=x), value=alt_value)
x = tf.where(tf.math.is_finite(x), x, alt_fill)
x.set_shape(x.shape.merge_with(in_shape))
return x | 0.004601 |
def set_json(self, reason='', new_page=False):
"""Send the JSON from the cache to the usernotes wiki page.
Arguments:
reason: the change reason that will be posted to the wiki changelog
(str)
Raises:
OverflowError if the new JSON data is greater than max_page_size
"""
compressed_json = json.dumps(self._compress_json(self.cached_json))
if len(compressed_json) > self.max_page_size:
raise OverflowError(
'Usernotes page is too large (>{0} characters)'.
format(self.max_page_size)
)
if new_page:
self.subreddit.wiki.create(
self.page_name,
compressed_json,
reason
)
# Set the page as hidden and available to moderators only
self.subreddit.wiki[self.page_name].mod.update(False, permlevel=2)
else:
self.subreddit.wiki[self.page_name].edit(
compressed_json,
reason
) | 0.001855 |
def getLogicalLines(fp, allowQP=True, findBegin=False):
"""
Iterate through a stream, yielding one logical line at a time.
Because many applications still use vCard 2.1, we have to deal with the
quoted-printable encoding for long lines, as well as the vCard 3.0 and
vCalendar line folding technique, a whitespace character at the start
of the line.
Quoted-printable data will be decoded in the Behavior decoding phase.
# We're leaving this test in for awhile, because the unittest was ugly and dumb.
>>> from six import StringIO
>>> f=StringIO(testLines)
>>> for n, l in enumerate(getLogicalLines(f)):
... print("Line %s: %s" % (n, l[0]))
...
Line 0: Line 0 text, Line 0 continued.
Line 1: Line 1;encoding=quoted-printable:this is an evil=
evil=
format.
Line 2: Line 2 is a new line, it does not start with whitespace.
"""
if not allowQP:
val = fp.read(-1)
#Shouldn't need this anymore...
"""
if len(val) > 0:
if not findBegin:
val = val.decode('utf-8')
else:
for encoding in 'utf-8', 'utf-16-LE', 'utf-16-BE', 'iso-8859-1':
try:
val = val.decode(encoding)
if begin_re.search(val) is not None:
break
except UnicodeDecodeError:
pass
else:
raise ParseError('Could not find BEGIN when trying to determine encoding')
"""
# strip off any UTF8 BOMs which Python's UTF8 decoder leaves
#val = val.lstrip( unicode( codecs.BOM_UTF8, "utf8" ) )
lineNumber = 1
for match in logical_lines_re.finditer(val):
line, n = wrap_re.subn('', match.group())
if line != '':
yield line, lineNumber
lineNumber += n
else:
quotedPrintable = False
newbuffer = six.StringIO
logicalLine = newbuffer()
lineNumber = 0
lineStartNumber = 0
while True:
line = fp.readline()
if line == '':
break
else:
line = line.rstrip(CRLF)
lineNumber += 1
if line.rstrip() == '':
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
quotedPrintable = False
continue
if quotedPrintable and allowQP:
logicalLine.write('\n')
logicalLine.write(line)
quotedPrintable = False
elif line[0] in SPACEORTAB:
logicalLine.write(line[1:])
elif logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber
lineStartNumber = lineNumber
logicalLine = newbuffer()
logicalLine.write(line)
else:
logicalLine = newbuffer()
logicalLine.write(line)
# vCard 2.1 allows parameters to be encoded without a parameter name.
# False positives are unlikely, but possible.
val = logicalLine.getvalue()
if val[-1]=='=' and val.lower().find('quoted-printable') >= 0:
quotedPrintable=True
if logicalLine.tell() > 0:
yield logicalLine.getvalue(), lineStartNumber | 0.002532 |
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback | 0.002567 |
def get_params_type(descriptor):
"""
Return the parameters type of a descriptor (e.g (IC)V)
"""
params = descriptor.split(')')[0][1:].split()
if params:
return [param for param in params]
return [] | 0.004367 |
def enrich_pull_requests(self, ocean_backend, enrich_backend, raw_issues_index="github_issues_raw"):
"""
The purpose of this Study is to add additional fields to the pull_requests only index.
Basically to calculate some of the metrics from Code Development under GMD metrics:
https://github.com/chaoss/wg-gmd/blob/master/2_Growth-Maturity-Decline.md#code-development
When data from the pull requests category is fetched using perceval,
some additional fields such as "number_of_comments" that are made on the PR
cannot be calculated as the data related to comments is not fetched.
When data from the issues category is fetched, then every item is considered as an issue
and PR specific data such as "review_comments" are not fetched.
Items (pull requests) from the raw issues index are queried and data from those items
are used to add fields in the corresponding pull request in the pull requests only index.
The ids are matched in both the indices.
:param ocean_backend: backend from which to read the raw items
:param enrich_backend: backend from which to read the enriched items
:param raw_issues_index: the raw issues index from which the data for PRs is to be extracted
:return: None
"""
HEADER_JSON = {"Content-Type": "application/json"}
# issues raw index from which the data will be extracted
github_issues_raw_index = ocean_backend.elastic_url + "/" + raw_issues_index
issues_index_search_url = github_issues_raw_index + "/_search"
# pull_requests index search url in which the data is to be updated
enrich_index_search_url = self.elastic.index_url + "/_search"
logger.info("Doing enrich_pull_request study for index {}"
.format(self.elastic.anonymize_url(self.elastic.index_url)))
time.sleep(1) # HACK: Wait until git enrich index has been written
def make_request(url, error_msg, data=None, req_type="GET"):
"""
Make a request to the given url. The request can be of type GET or a POST.
If the request raises an error, display that error using the custom error msg.
:param url: URL to make the GET request to
:param error_msg: custom error message for logging purposes
:param data: data to be sent with the POST request
optional if type="GET" else compulsory
:param req_type: the type of request to be made: GET or POST
default: GET
:return r: requests object
"""
r = None
if req_type == "GET":
r = self.requests.get(url, headers=HEADER_JSON,
verify=False)
elif req_type == "POST" and data is not None:
r = self.requests.post(url, data=data, headers=HEADER_JSON,
verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.error(error_msg)
logger.error(ex)
return
return r
# Check if the github issues raw index exists, if not raise an error and abort
error_msg = "Invalid index provided for enrich_pull_requests study. Aborting."
make_request(issues_index_search_url, error_msg)
# get the number of pull requests in the pull_requests index
# https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-count.html
# Example:
# epoch timestamp count
# 1533454641 13:07:21 276
count_url = enrich_backend.elastic_url + "/_cat/count/" + enrich_backend.elastic.index + "?v"
error_msg = "Cannot fetch number of items in {} Aborting.".format(enrich_backend.elastic.index)
r = make_request(count_url, error_msg)
num_pull_requests = int(r.text.split()[-1])
# get all the ids that are in the enriched pull requests index which will be used later
# to pull requests data from the issue having the same id in the raw_issues_index
pull_requests_ids = []
size = 10000 # Default number of items that can be queried from elasticsearch at a time
i = 0 # counter
while num_pull_requests > 0:
fetch_id_in_repo_query = """
{
"_source": ["id_in_repo"],
"from": %s,
"size": %s
}
""" % (i, size)
error_msg = "Error extracting id_in_repo from {}. Aborting.".format(self.elastic.index_url)
r = make_request(enrich_index_search_url, error_msg, fetch_id_in_repo_query, "POST")
id_in_repo_json = r.json()["hits"]["hits"]
pull_requests_ids.extend([item["_source"]["id_in_repo"] for item in id_in_repo_json])
i += size
num_pull_requests -= size
# get pull requests data from the github_issues_raw and pull_requests only
# index using specific id for each of the item
query = """
{
"query": {
"bool": {
"must": [{
"match": {
%s: %s
}
}]
}
}
}
"""
num_enriched = 0 # counter to count the number of PRs enriched
pull_requests = []
for pr_id in pull_requests_ids:
# retrieve the data from the issues index
issue_query = query % ('"data.number"', pr_id)
error_msg = "Id {} doesnot exists in {}. Aborting.".format(pr_id, github_issues_raw_index)
r = make_request(issues_index_search_url, error_msg, issue_query, "POST")
issue = r.json()["hits"]["hits"][0]["_source"]["data"]
# retrieve the data from the pull_requests index
pr_query = query % ('"id_in_repo"', pr_id)
error_msg = "Id {} doesnot exists in {}. Aborting.".format(pr_id, self.elastic.index_url)
r = make_request(enrich_index_search_url, error_msg, pr_query, "POST")
pull_request_data = r.json()["hits"]["hits"][0]
pull_request = pull_request_data['_source']
pull_request["_item_id"] = pull_request_data['_id']
# Add the necessary fields
reaction_time = get_time_diff_days(str_to_datetime(issue['created_at']),
self.get_time_to_first_attention(issue))
if not reaction_time:
reaction_time = 0
if pull_request["time_to_merge_request_response"]:
reaction_time = min(pull_request["time_to_merge_request_response"], reaction_time)
pull_request["time_to_merge_request_response"] = reaction_time
pull_request['num_comments'] = issue['comments']
# should latest reviews be considered as well?
pull_request['pr_comment_duration'] = get_time_diff_days(str_to_datetime(issue['created_at']),
self.get_latest_comment_date(issue))
pull_request['pr_comment_diversity'] = self.get_num_commenters(issue)
pull_requests.append(pull_request)
if len(pull_requests) >= self.elastic.max_items_bulk:
self.elastic.bulk_upload(pull_requests, "_item_id")
pull_requests = []
num_enriched += 1
logger.info("pull_requests processed %i/%i", num_enriched, len(pull_requests_ids))
self.elastic.bulk_upload(pull_requests, "_item_id") | 0.004594 |
def prj_add_user(self, *args, **kwargs):
"""Add more users to the project.
:returns: None
:rtype: None
:raises: None
"""
if not self.cur_prj:
return
dialog = UserAdderDialog(project=self.cur_prj)
dialog.exec_()
users = dialog.users
for user in users:
userdata = djitemdata.UserItemData(user)
treemodel.TreeItem(userdata, self.prj_user_model.root)
self.cur_prj.save() | 0.004073 |
def convert_old_command_expansions(command):
"""Convert expansions from !OLD! style to {new}."""
command = command.replace("!VERSION!", "{version}")
command = command.replace("!MAJOR_VERSION!", "{version.major}")
command = command.replace("!MINOR_VERSION!", "{version.minor}")
command = command.replace("!BASE!", "{base}")
command = command.replace("!ROOT!", "{root}")
command = command.replace("!USER!", "{system.user}")
return command | 0.001996 |
def _SetPath(self, path):
"""Sets the current path to watch for new events.
This also records the size of the old path, if any. If the size can't be
found, an error is logged.
Args:
path: The full path of the file to watch.
"""
old_path = self._path
if old_path and not io_wrapper.IsCloudPath(old_path):
try:
# We're done with the path, so store its size.
size = tf.io.gfile.stat(old_path).length
logger.debug('Setting latest size of %s to %d', old_path, size)
self._finalized_sizes[old_path] = size
except tf.errors.OpError as e:
logger.error('Unable to get size of %s: %s', old_path, e)
self._path = path
self._loader = self._loader_factory(path) | 0.004027 |
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval) | 0.004211 |
def _append_element(self, render_func, pe):
'''
Append a render function and the parameters to pass
an equivilent PathElement, or the PathElement itself.
'''
self._render_funcs.append(render_func)
self._elements.append(pe) | 0.007407 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.