text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def path_only_contains_dirs(self, path):
"""Return boolean on whether a path only contains directories."""
pathlistdir = os.listdir(path)
if pathlistdir == []:
return True
if any(os.path.isfile(os.path.join(path, i)) for i in pathlistdir):
return False
return all(self.path_only_contains_dirs(os.path.join(path, i)) for i in pathlistdir) | 0.007481 |
def get_file_meta(filepath):
"""
Get meta-information about a file.
Parameters
----------
filepath : str
Returns
-------
meta : dict
"""
meta = {}
meta['filepath'] = os.path.abspath(filepath)
meta['creation_datetime'] = get_creation_datetime(filepath)
meta['last_access_datetime'] = get_access_datetime(filepath)
meta['modification_datetime'] = get_modification_datetime(filepath)
try:
import magic
f_mime = magic.Magic(mime=True, uncompress=True)
f_other = magic.Magic(mime=False, uncompress=True)
meta['mime'] = f_mime.from_file(meta['filepath'])
meta['magic-type'] = f_other.from_file(meta['filepath'])
except ImportError:
pass
return meta | 0.001316 |
def json_changebase(obj, changer):
"""
Given a primitive compound Python object (i.e. a dict,
string, int, or list) and a changer function that takes
a primitive Python object as an argument, apply the
changer function to the object and each sub-component.
Return the newly-reencoded object.
"""
if isinstance(obj, (str, unicode)):
return changer(obj)
elif isinstance(obj, (int, long)) or obj is None:
return obj
elif isinstance(obj, list):
return [json_changebase(x, changer) for x in obj]
elif isinstance(obj, dict):
return dict((x, json_changebase(obj[x], changer)) for x in obj)
else:
raise ValueError("Invalid object") | 0.001395 |
def reconnect(self):
"""Reconnect to rabbitmq server"""
import pika
import pika.exceptions
self.connection = pika.BlockingConnection(pika.URLParameters(self.amqp_url))
self.channel = self.connection.channel()
try:
self.channel.queue_declare(self.name)
except pika.exceptions.ChannelClosed:
self.connection = pika.BlockingConnection(pika.URLParameters(self.amqp_url))
self.channel = self.connection.channel() | 0.008 |
def get_task(self, id):
"""
Returns the task with the given id.
:type id:integer
:param id: The id of a task.
:rtype: Task
:returns: The task with the given id.
"""
tasks = [task for task in self.get_tasks() if task.id == id]
return tasks[0] if len(tasks) == 1 else None | 0.005831 |
def square_and_sum(a, s):
"""
Writes np.sum(a**2,axis=0) into s
"""
cmin, cmax = thread_partition_array(a)
map_noreturn(asqs, [(a, s, cmin[i], cmax[i]) for i in xrange(len(cmax))])
return a | 0.004695 |
def _update_indexes_for_mutated_object(collection, obj):
"""If an object is updated, this will simply remove
it and re-add it to the indexes defined on the
collection."""
for index in _db[collection].indexes.values():
_remove_from_index(index, obj)
_add_to_index(index, obj) | 0.003268 |
def do_put(endpoint, body, access_token):
'''Do an HTTP PUT request and return JSON.
Args:
endpoint (str): Azure Resource Manager management endpoint.
body (str): JSON body of information to put.
access_token (str): A valid Azure authentication token.
Returns:
HTTP response. JSON body.
'''
headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token}
headers['User-Agent'] = get_user_agent()
return requests.put(endpoint, data=body, headers=headers) | 0.003697 |
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
APFSFileEntry: linked file entry or None if not available.
"""
link = self._GetLink()
if not link:
return None
# TODO: is there a way to determine the identifier here?
link_identifier = None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(
location=link, parent=parent_path_spec)
is_root = bool(
link == self._file_system.LOCATION_ROOT or
link_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root) | 0.002703 |
def get(self, name):
"""Returns the specific IP interface properties
The Ipinterface resource returns the following:
* name (str): The name of the interface
* address (str): The IP address of the interface in the form
of A.B.C.D/E
* mtu (int): The configured value for IP MTU.
Args:
name (string): The interface identifier to retrieve the
configuration for
Return:
A Python dictionary object of key/value pairs that represents
the current configuration of the node. If the specified
interface does not exist then None is returned.
"""
config = self.get_block('interface %s' % name)
if name[0:2] in ['Et', 'Po'] and not SWITCHPORT_RE.search(config,
re.M):
return None
resource = dict(name=name)
resource.update(self._parse_address(config))
resource.update(self._parse_mtu(config))
return resource | 0.00183 |
def list_tasks(collector):
"""List the available_tasks"""
print("Usage: aws_syncr <environment> <task>")
print("")
print("Available environments to choose from are")
print("-----------------------------------------")
print("")
for environment in os.listdir(collector.configuration_folder):
location = os.path.join(collector.configuration_folder, environment)
if os.path.isdir(location) and not environment.startswith("."):
print("\t{0}".format(environment))
print("")
print("Available tasks to choose from are:")
print("-----------------------------------")
print("")
keygetter = lambda item: item[1].label
tasks = sorted(available_actions.items(), key=keygetter)
sorted_tasks = sorted(list(tasks), key=lambda item: len(item[0]))
max_length = max(len(name) for name, _ in sorted_tasks)
for key, task in sorted_tasks:
desc = dedent(task.__doc__ or "").strip().split('\n')[0]
print("\t{0}{1} :-: {2}".format(" " * (max_length-len(key)), key, desc))
print("") | 0.002806 |
def encipher(self,message):
"""Encipher string using M209 cipher according to initialised key. Punctuation and whitespace
are removed from the input.
Example (continuing from the example above)::
ciphertext = m.encipher(plaintext)
:param string: The string to encipher.
:returns: The enciphered string.
"""
message = self.remove_punctuation(message)
effective_ch = [0,0,0,0,0,0,0] # these are the wheels which are effective currently, 1 for yes, 0 no
# -the zero at the beginning is extra, indicates lug was in pos 0
ret = ''
# from now we no longer need the wheel starts, we can just increment the actual key
for j in range(len(message)):
shift = 0
effective_ch[0] = 0;
effective_ch[1] = self.wheel_1_settings[self.actual_key[0]]
effective_ch[2] = self.wheel_2_settings[self.actual_key[1]]
effective_ch[3] = self.wheel_3_settings[self.actual_key[2]]
effective_ch[4] = self.wheel_4_settings[self.actual_key[3]]
effective_ch[5] = self.wheel_5_settings[self.actual_key[4]]
effective_ch[6] = self.wheel_6_settings[self.actual_key[5]]
for i in range(0,27): # implements the cylindrical drum with lugs on it
if effective_ch[self.lug_positions[i][0]] or effective_ch[self.lug_positions[i][1]]: shift+=1
# shift has been found, now actually encrypt letter
ret += self.subst(message[j],key='ZYXWVUTSRQPONMLKJIHGFEDCBA',offset=-shift); # encrypt letter
self.advance_key(); # advance the key wheels
return ret | 0.020455 |
def add_content(self, content, mime_type=None):
"""Add content to the email
:param contents: Content to be added to the email
:type contents: Content
:param mime_type: Override the mime type
:type mime_type: MimeType, str
"""
if isinstance(content, str):
content = Content(mime_type, content)
# Content of mime type text/plain must always come first
if content.mime_type == "text/plain":
self._contents = self._ensure_insert(content, self._contents)
else:
if self._contents:
index = len(self._contents)
else:
index = 0
self._contents = self._ensure_append(
content, self._contents, index=index) | 0.002554 |
def GetCustomerIDs(client):
"""Retrieves all CustomerIds in the account hierarchy.
Note that your configuration file must specify a client_customer_id belonging
to an AdWords manager account.
Args:
client: an AdWordsClient instance.
Raises:
Exception: if no CustomerIds could be found.
Returns:
A Queue instance containing all CustomerIds in the account hierarchy.
"""
# For this example, we will use ManagedCustomerService to get all IDs in
# hierarchy that do not belong to MCC accounts.
managed_customer_service = client.GetService('ManagedCustomerService',
version='v201809')
offset = 0
# Get the account hierarchy for this account.
selector = {
'fields': ['CustomerId'],
'predicates': [{
'field': 'CanManageClients',
'operator': 'EQUALS',
'values': [False]
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
# Using Queue to balance load between processes.
queue = multiprocessing.Queue()
more_pages = True
while more_pages:
page = managed_customer_service.get(selector)
if page and 'entries' in page and page['entries']:
for entry in page['entries']:
queue.put(entry['customerId'])
else:
raise Exception('Can\'t retrieve any customer ID.')
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
return queue | 0.009785 |
def _escape_str_id(id_str):
"""make a single string id SBML compliant"""
for c in ("'", '"'):
if id_str.startswith(c) and id_str.endswith(c) \
and id_str.count(c) == 2:
id_str = id_str.strip(c)
for char, escaped_char in _renames:
id_str = id_str.replace(char, escaped_char)
return id_str | 0.002882 |
def t_quotedvar_DOLLAR_OPEN_CURLY_BRACES(t):
r'\$\{'
if re.match(r'[A-Za-z_]', peek(t.lexer)):
t.lexer.begin('varname')
else:
t.lexer.begin('php')
return t | 0.005348 |
def make_python_patterns(additional_keywords=[], additional_builtins=[]):
"""Strongly inspired from idlelib.ColorDelegator.make_pat"""
kw = r"\b" + any("keyword", kwlist + additional_keywords) + r"\b"
kw_namespace = r"\b" + any("namespace", kw_namespace_list) + r"\b"
word_operators = r"\b" + any("operator_word", wordop_list) + r"\b"
builtinlist = [str(name) for name in dir(builtins)
if not name.startswith('_')] + additional_builtins
for v in ['None', 'True', 'False']:
builtinlist.remove(v)
builtin = r"([^.'\"\\#]\b|^)" + any("builtin", builtinlist) + r"\b"
builtin_fct = any("builtin_fct", [r'_{2}[a-zA-Z_]*_{2}'])
comment = any("comment", [r"#[^\n]*"])
instance = any("instance", [r"\bself\b", r"\bcls\b"])
decorator = any('decorator', [r'@\w*', r'.setter'])
number = any("number",
[r"\b[+-]?[0-9]+[lLjJ]?\b",
r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b",
r"\b[+-]?0[oO][0-7]+[lL]?\b",
r"\b[+-]?0[bB][01]+[lL]?\b",
r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?[jJ]?\b"])
sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?'
uf_sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*(\\)$(?!')$"
uf_dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*(\\)$(?!")$'
sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(''')?"
dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(""")?'
uf_sq3string = r"(\b[rRuU])?'''[^'\\]*((\\.|'(?!''))[^'\\]*)*(\\)?(?!''')$"
uf_dq3string = r'(\b[rRuU])?"""[^"\\]*((\\.|"(?!""))[^"\\]*)*(\\)?(?!""")$'
string = any("string", [sq3string, dq3string, sqstring, dqstring])
ufstring1 = any("uf_sqstring", [uf_sqstring])
ufstring2 = any("uf_dqstring", [uf_dqstring])
ufstring3 = any("uf_sq3string", [uf_sq3string])
ufstring4 = any("uf_dq3string", [uf_dq3string])
return "|".join([instance, decorator, kw, kw_namespace, builtin,
word_operators, builtin_fct, comment,
ufstring1, ufstring2, ufstring3, ufstring4, string,
number, any("SYNC", [r"\n"])]) | 0.000452 |
def _score_step(step):
"""Count the mapped two-qubit gates, less the number of added SWAPs."""
# Each added swap will add 3 ops to gates_mapped, so subtract 3.
return len([g for g in step['gates_mapped']
if len(g.qargs) == 2]) - 3 * step['swaps_added'] | 0.003559 |
def _rr_new(self, rr_version, rr_name, rr_symlink_target, rr_relocated_child,
rr_relocated, rr_relocated_parent, file_mode):
# type: (str, bytes, bytes, bool, bool, bool, int) -> None
'''
Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing.
'''
if self.parent is None:
raise pycdlibexception.PyCdlibInternalError('Invalid call to create new Rock Ridge on root directory')
self.rock_ridge = rockridge.RockRidge()
is_first_dir_record_of_root = self.file_ident == b'\x00' and self.parent.is_root
bytes_to_skip = 0
if self.xa_record is not None:
bytes_to_skip = XARecord.length()
self.dr_len = self.rock_ridge.new(is_first_dir_record_of_root, rr_name,
file_mode, rr_symlink_target,
rr_version, rr_relocated_child,
rr_relocated, rr_relocated_parent,
bytes_to_skip, self.dr_len)
# For files, we are done
if not self.isdir:
return
# If this is a directory, we have to manipulate the file links
# appropriately.
if self.parent.is_root:
if self.file_ident == b'\x00' or self.file_ident == b'\x01':
# For the dot and dotdot children of the root, add one
# directly to their Rock Ridge links.
self.rock_ridge.add_to_file_links()
else:
# For all other children of the root, make sure to add one
# to each of the dot and dotdot entries.
if len(self.parent.children) < 2:
raise pycdlibexception.PyCdlibInvalidISO('Expected at least 2 children of the root directory record, saw %d' % (len(self.parent.children)))
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links()
if self.parent.children[1].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot-dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[1].rock_ridge.add_to_file_links()
else:
if self.parent.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have Rock Ridge, ISO is corrupt')
if self.file_ident == b'\x00':
# If we are adding the dot directory, increment the parent
# file links and our file links.
self.parent.rock_ridge.add_to_file_links()
self.rock_ridge.add_to_file_links()
elif self.file_ident == b'\x01':
# If we are adding the dotdot directory, copy the file links
# from the dot directory of the grandparent.
if self.parent.parent is None:
raise pycdlibexception.PyCdlibInternalError('Grandparent of the entry did not exist; this cannot be')
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent dotdot entry did not have Rock Ridge; ISO is corrupt')
self.rock_ridge.copy_file_links(self.parent.parent.children[0].rock_ridge)
else:
# For all other entries, increment the parents file links
# and the parents dot file links.
self.parent.rock_ridge.add_to_file_links()
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of the parent did not have a dot entry; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links() | 0.003119 |
def set_body_s(self, stream):
"""Set customized body stream.
Note: the body stream can only be changed before the stream
is consumed.
:param stream: InMemStream/PipeStream for body
:except TChannelError:
Raise TChannelError if the stream is being sent when you try
to change the stream.
"""
if self.argstreams[2].state == StreamState.init:
self.argstreams[2] = stream
else:
raise TChannelError(
"Unable to change the body since the streaming has started") | 0.003413 |
def add_membership(self, email, role, **attrs):
"""
Add a Membership to the project and returns a
:class:`Membership` resource.
:param email: email for :class:`Membership`
:param role: role for :class:`Membership`
:param attrs: role for :class:`Membership`
:param attrs: optional :class:`Membership` attributes
"""
return Memberships(self.requester).create(
self.id, email, role, **attrs
) | 0.004149 |
def connect(self, From, to,
protocolName, clientFactory,
chooser):
"""
Issue an INBOUND command, creating a virtual connection to the peer,
given identifying information about the endpoint to connect to, and a
protocol factory.
@param clientFactory: a *Client* ProtocolFactory instance which will
generate a protocol upon connect.
@return: a Deferred which fires with the protocol instance that was
connected, or fails with AttemptsFailed if the connection was not
possible.
"""
publicIP = self._determinePublicIP()
A = dict(From=From,
to=to,
protocol=protocolName)
if self.service.dispatcher is not None:
# Tell them exactly where they can shove it
A['udp_source'] = (publicIP,
self.service.sharedUDPPortnum)
else:
# Don't tell them because we don't know
log.msg("dispatcher unavailable when connecting")
D = self.callRemote(Inbound, **A)
def _connected(answer):
listenersD = defer.maybeDeferred(chooser, answer['listeners'])
def gotListeners(listeners):
allConnectionAttempts = []
for listener in listeners:
d = self.attemptConnectionMethods(
listener['methods'],
listener['id'],
From, to,
protocolName, clientFactory,
)
allConnectionAttempts.append(d)
return defer.DeferredList(allConnectionAttempts)
listenersD.addCallback(gotListeners)
def finishedAllAttempts(results):
succeededAny = False
failures = []
if not results:
return Failure(NoAttemptsMade(
"there was no available path for connections "
"(%r->%r/%s)" % (From, to, protocolName)))
for succeeded, result in results:
if succeeded:
succeededAny = True
randomConnection = result
break
else:
failures.append(result)
if not succeededAny:
return Failure(
AttemptsFailed(
[failure.getBriefTraceback()
for failure in failures]
)
)
# XXX TODO: this connection is really random; connectQ2Q should
# not return one of the connections it's made, put it into your
# protocol's connectionMade handler
return randomConnection
return listenersD.addCallback(finishedAllAttempts)
return D.addCallback(_connected) | 0.002319 |
def TimestampToRDFDatetime(timestamp):
"""Converts MySQL `TIMESTAMP(6)` columns to datetime objects."""
# TODO(hanuszczak): `timestamp` should be of MySQL type `Decimal`. However,
# it is unclear where this type is actually defined and how to import it in
# order to have a type assertion.
if timestamp is None:
return None
else:
micros = int(1000000 * timestamp)
return rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(micros) | 0.015556 |
def convert_video(fieldfile, force=False):
"""
Converts a given video file into all defined formats.
"""
instance = fieldfile.instance
field = fieldfile.field
filename = os.path.basename(fieldfile.path)
source_path = fieldfile.path
encoding_backend = get_backend()
for options in settings.VIDEO_ENCODING_FORMATS[encoding_backend.name]:
video_format, created = Format.objects.get_or_create(
object_id=instance.pk,
content_type=ContentType.objects.get_for_model(instance),
field_name=field.name, format=options['name'])
# do not reencode if not requested
if video_format.file and not force:
continue
else:
# set progress to 0
video_format.reset_progress()
# TODO do not upscale videos
_, target_path = tempfile.mkstemp(
suffix='_{name}.{extension}'.format(**options))
try:
encoding = encoding_backend.encode(
source_path, target_path, options['params'])
while encoding:
try:
progress = next(encoding)
except StopIteration:
break
video_format.update_progress(progress)
except VideoEncodingError:
# TODO handle with more care
video_format.delete()
os.remove(target_path)
continue
# save encoded file
video_format.file.save(
'{filename}_{name}.{extension}'.format(filename=filename,
**options),
File(open(target_path, mode='rb')))
video_format.update_progress(100) # now we are ready
# remove temporary file
os.remove(target_path) | 0.000552 |
def _get_relative_path(self):
"""Returns the path with the query parameters escaped and appended."""
param_string = self._get_query_string()
if self.path is None:
path = '/'
else:
path = self.path
if param_string:
return '?'.join([path, param_string])
else:
return path | 0.015773 |
def delete(self, record_id):
"""
Deletes a record by its id
>>> record = airtable.match('Employee Id', 'DD13332454')
>>> airtable.delete(record['id'])
Args:
record_id(``str``): Airtable record id
Returns:
record (``dict``): Deleted Record
"""
record_url = self.record_url(record_id)
return self._delete(record_url) | 0.004684 |
def get_assessment_part_ids_by_banks(self, bank_ids):
"""Gets the list of ``AssessmentPart Ids`` corresponding to a list of ``Banks``.
arg: bank_ids (osid.id.IdList): list of bank ``Ids``
return: (osid.id.IdList) - list of assessment part ``Ids``
raise: NullArgument - ``bank_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
id_list = []
for assessment_part in self.get_assessment_parts_by_banks(bank_ids):
id_list.append(assessment_part.get_id())
return IdList(id_list) | 0.003663 |
def activate_status_output_overall_error_msg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
activate_status = ET.Element("activate_status")
config = activate_status
output = ET.SubElement(activate_status, "output")
overall_error_msg = ET.SubElement(output, "overall-error-msg")
overall_error_msg.text = kwargs.pop('overall_error_msg')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003876 |
def expect(self, f, *args):
"""Like 'accept' but throws a parse error if 'f' doesn't match."""
match = self.accept(f, *args)
if match:
return match
try:
func_name = f.func_name
except AttributeError:
func_name = "<unnamed grammar function>"
start, end = self.current_position()
raise errors.EfilterParseError(
query=self.tokenizer.source, start=start, end=end,
message="Was expecting %s here." % (func_name)) | 0.003802 |
def capitalize_unicode_name(s):
"""
Turns a string such as 'capital delta' into the shortened,
capitalized version, in this case simply 'Delta'. Used as a
transform in sanitize_identifier.
"""
index = s.find('capital')
if index == -1: return s
tail = s[index:].replace('capital', '').strip()
tail = tail[0].upper() + tail[1:]
return s[:index] + tail | 0.005141 |
async def processClaims(self, allClaims: Dict[ID, Claims]):
"""
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
"""
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res | 0.006993 |
def replace_cells(self, key, sorted_row_idxs):
"""Replaces cells in current selection so that they are sorted"""
row, col, tab = key
new_keys = {}
del_keys = []
selection = self.grid.actions.get_selection()
for __row, __col, __tab in self.grid.code_array:
if __tab == tab and \
(not selection or (__row, __col) in selection):
new_row = sorted_row_idxs.index(__row)
if __row != new_row:
new_keys[(new_row, __col, __tab)] = \
self.grid.code_array((__row, __col, __tab))
del_keys.append((__row, __col, __tab))
for key in del_keys:
self.grid.code_array.pop(key)
for key in new_keys:
CellActions.set_code(self, key, new_keys[key]) | 0.002375 |
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True):
"""
Parses the given text and yields tokens which represent words within
the given text. Tokens are assumed to be divided by any form of
whitespace character.
"""
if ngrams is None:
ngrams = 1
text = re.sub(re.compile('\'s'), '', text) # Simple heuristic
text = re.sub(_re_punctuation, '', text)
matched_tokens = re.findall(_re_token, text.lower())
for tokens in get_ngrams(matched_tokens, ngrams):
for i in range(len(tokens)):
tokens[i] = tokens[i].strip(punctuation)
if len(tokens[i]) < min_length or tokens[i] in stopwords:
break
if ignore_numeric and isnumeric(tokens[i]):
break
else:
yield tuple(tokens) | 0.00235 |
def load_data_file(fname, directory=None, force_download=False):
"""Get a standard vispy demo data file
Parameters
----------
fname : str
The filename on the remote ``demo-data`` repository to download,
e.g. ``'molecular_viewer/micelle.npy'``. These correspond to paths
on ``https://github.com/vispy/demo-data/``.
directory : str | None
Directory to use to save the file. By default, the vispy
configuration directory is used.
force_download : bool | str
If True, the file will be downloaded even if a local copy exists
(and this copy will be overwritten). Can also be a YYYY-MM-DD date
to ensure a file is up-to-date (modified date of a file on disk,
if present, is checked).
Returns
-------
fname : str
The path to the file on the local system.
"""
_url_root = 'http://github.com/vispy/demo-data/raw/master/'
url = _url_root + fname
if directory is None:
directory = config['data_path']
if directory is None:
raise ValueError('config["data_path"] is not defined, '
'so directory must be supplied')
fname = op.join(directory, op.normcase(fname)) # convert to native
if op.isfile(fname):
if not force_download: # we're done
return fname
if isinstance(force_download, string_types):
ntime = time.strptime(force_download, '%Y-%m-%d')
ftime = time.gmtime(op.getctime(fname))
if ftime >= ntime:
return fname
else:
print('File older than %s, updating...' % force_download)
if not op.isdir(op.dirname(fname)):
os.makedirs(op.abspath(op.dirname(fname)))
# let's go get the file
_fetch_file(url, fname)
return fname | 0.000543 |
def all_complexes(network, state):
"""Return a generator for all complexes of the network.
.. note::
Includes reducible, zero-|big_phi| complexes (which are not, strictly
speaking, complexes at all).
Args:
network (Network): The |Network| of interest.
state (tuple[int]): The state of the network (a binary tuple).
Yields:
SystemIrreducibilityAnalysis: A |SIA| for each |Subsystem| of the
|Network|.
"""
engine = FindAllComplexes(subsystems(network, state))
return engine.run(config.PARALLEL_COMPLEX_EVALUATION) | 0.001695 |
def update_vnic_template(self, host_id, vlan_id, physnet,
vnic_template_path, vnic_template):
"""Updates VNIC Template with the vlan_id."""
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('UCS Manager network driver does not have UCSM IP '
'for Host_id %s', str(host_id))
return False
vlan_name = self.make_vlan_name(vlan_id)
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
# Create Vlan Profile
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s', vlan_id)
return False
try:
LOG.debug('VNIC Template Path: %s', vnic_template_path)
vnic_template_full_path = (vnic_template_path +
const.VNIC_TEMPLATE_PREFIX + str(vnic_template))
LOG.debug('VNIC Template Path: %s for physnet %s',
vnic_template_full_path, physnet)
handle.StartTransaction()
mo = handle.GetManagedObject(
None,
self.ucsmsdk.VnicLanConnTempl.ClassId(),
{self.ucsmsdk.VnicLanConnTempl.DN:
vnic_template_full_path}, True)
if not mo:
LOG.error('UCS Manager network driver could '
'not find VNIC template %s',
vnic_template_full_path)
return False
vlan_dn = (vnic_template_full_path + const.VLAN_PATH_PREFIX +
vlan_name)
LOG.debug('VNIC Template VLAN path: %s', vlan_dn)
eth_if = handle.AddManagedObject(mo,
self.ucsmsdk.VnicEtherIf.ClassId(),
{self.ucsmsdk.VnicEtherIf.DN: vlan_dn,
self.ucsmsdk.VnicEtherIf.NAME: vlan_name,
self.ucsmsdk.VnicEtherIf.DEFAULT_NET: "no"}, True)
if not eth_if:
LOG.error('UCS Manager network driver could '
'not add VLAN %(vlan_name)s to VNIC '
'template %(vnic_template_full_path)s',
{'vlan_name': vlan_name,
'vnic_template_full_path': vnic_template_full_path})
return False
handle.CompleteTransaction()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'VNIC Template',
vlan_id, ucsm_ip) | 0.005523 |
def _flat_map(self, f: Callable):
''' **f** must return the same stack type as **self.value** has.
Iterates over the effects, sequences the inner instance
successively to the top and joins with the outer instance.
Example:
List(Right(Just(1))) => List(Right(Just(List(Right(Just(5))))))
=> List(List(Right(Just(Right(Just(5))))))
=> List(Right(Just(Right(Just(5)))))
=> List(Right(Right(Just(Just(5)))))
=> List(Right(Just(Just(5))))
=> List(Right(Just(5)))
Note: IO works only as outermost effect, as it cannot sequence
'''
index = List.range(self.depth + 1)
g = index.fold_left(f)(lambda z, i: lambda a: a.map(z))
nested = g(self.value)
def sequence_level(z, depth, tpe):
nesting = lambda z, i: lambda a: a.map(z).sequence(tpe)
lifter = List.range(depth).fold_left(I)(nesting)
return z // lifter
def sequence_type(z, data):
return lambda a: sequence_level(a, *data).map(z)
h = self.all_effects.reversed.with_index.fold_left(I)(sequence_type)
return h(nested) | 0.00431 |
def get_docs(r_session, url, encoder=None, headers=None, **params):
"""
Provides a helper for functions that require GET or POST requests
with a JSON, text, or raw response containing documents.
:param r_session: Authentication session from the client
:param str url: URL containing the endpoint
:param JSONEncoder encoder: Custom encoder from the client
:param dict headers: Optional HTTP Headers to send with the request
:returns: Raw response content from the specified endpoint
"""
keys_list = params.pop('keys', None)
keys = None
if keys_list is not None:
keys = json.dumps({'keys': keys_list}, cls=encoder)
f_params = python_to_couch(params)
resp = None
if keys is not None:
# If we're using POST we are sending JSON so add the header
if headers is None:
headers = {}
headers['Content-Type'] = 'application/json'
resp = r_session.post(url, headers=headers, params=f_params, data=keys)
else:
resp = r_session.get(url, headers=headers, params=f_params)
resp.raise_for_status()
return resp | 0.000888 |
def _redraw(self):
"""
Forgets the current layout and redraws with the most recent information
:return: None
"""
for row in self._rows:
for widget in row:
widget.grid_forget()
offset = 0 if not self.headers else 1
for i, row in enumerate(self._rows):
for j, widget in enumerate(row):
widget.grid(row=i+offset, column=j) | 0.004619 |
def send_message(self, recipient, subject, message, from_sr=None,
captcha=None, **kwargs):
"""Send a message to a redditor or a subreddit's moderators (mod mail).
:param recipient: A Redditor or Subreddit instance to send a message
to. A string can also be used in which case the string is treated
as a redditor unless it is prefixed with either '/r/' or '#', in
which case it will be treated as a subreddit.
:param subject: The subject of the message to send.
:param message: The actual message content.
:param from_sr: A Subreddit instance or string to send the message
from. When provided, messages are sent from the subreddit rather
than from the authenticated user. Note that the authenticated user
must be a moderator of the subreddit and have mail permissions.
:returns: The json response from the server.
This function may result in a captcha challenge. PRAW will
automatically prompt you for a response. See :ref:`handling-captchas`
if you want to manually handle captchas.
"""
if isinstance(recipient, objects.Subreddit):
recipient = '/r/{0}'.format(six.text_type(recipient))
else:
recipient = six.text_type(recipient)
data = {'text': message,
'subject': subject,
'to': recipient}
if from_sr:
data['from_sr'] = six.text_type(from_sr)
if captcha:
data.update(captcha)
response = self.request_json(self.config['compose'], data=data,
retry_on_error=False)
self.evict(self.config['sent'])
return response | 0.001696 |
def step(self, disable_interrupts=True, start=0, end=0):
"""
perform an instruction level step. This function preserves the previous
interrupt mask state
"""
# Was 'if self.get_state() != TARGET_HALTED:'
# but now value of dhcsr is saved
dhcsr = self.read_memory(CortexM.DHCSR)
if not (dhcsr & (CortexM.C_STEP | CortexM.C_HALT)):
logging.error('cannot step: target not halted')
return
self.notify(Notification(event=Target.EVENT_PRE_RUN, source=self, data=Target.RUN_TYPE_STEP))
self.clear_debug_cause_bits()
# Save previous interrupt mask state
interrupts_masked = (CortexM.C_MASKINTS & dhcsr) != 0
# Mask interrupts - C_HALT must be set when changing to C_MASKINTS
if not interrupts_masked and disable_interrupts:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_HALT | CortexM.C_MASKINTS)
# Single step using current C_MASKINTS setting
while True:
if disable_interrupts or interrupts_masked:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_MASKINTS | CortexM.C_STEP)
else:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_STEP)
# Wait for halt to auto set (This should be done before the first read)
while not self.read_memory(CortexM.DHCSR) & CortexM.C_HALT:
pass
# Range is empty, 'range step' will degenerate to 'step'
if start == end:
break
# Read program counter and compare to [start, end)
program_counter = self.read_core_register(CORE_REGISTER['pc'])
if program_counter < start or end <= program_counter:
break
# Check other stop reasons
if self.read_memory(CortexM.DFSR) & (CortexM.DFSR_DWTTRAP | CortexM.DFSR_BKPT):
break
# Restore interrupt mask state
if not interrupts_masked and disable_interrupts:
# Unmask interrupts - C_HALT must be set when changing to C_MASKINTS
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_HALT)
self.flush()
self._run_token += 1
self.notify(Notification(event=Target.EVENT_POST_RUN, source=self, data=Target.RUN_TYPE_STEP)) | 0.006496 |
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile) | 0.002674 |
def get_incidents(self, offset=0, limit=None, include_resolved=False, **kwargs):
"""Retrieve all (v2) incidents.
"""
resp = self._get(
self._u(self._INCIDENT_ENDPOINT_SUFFIX),
params={
'offset': offset,
'limit': limit,
'include_resolved': str(include_resolved).lower(),
},
**kwargs)
resp.raise_for_status()
return resp.json() | 0.006494 |
def init_logging(log_level):
"""
Initialise the logging by adding an observer to the global log publisher.
:param str log_level: The minimum log level to log messages for.
"""
log_level_filter = LogLevelFilterPredicate(
LogLevel.levelWithName(log_level))
log_level_filter.setLogLevelForNamespace(
'twisted.web.client._HTTP11ClientFactory', LogLevel.warn)
log_observer = FilteringLogObserver(
textFileLogObserver(sys.stdout), [log_level_filter])
globalLogPublisher.addObserver(log_observer) | 0.001832 |
def boot(name, flavor_id=0, image_id=0, profile=None, timeout=300, **kwargs):
'''
Boot (create) a new instance
name
Name of the new instance (must be first)
flavor_id
Unique integer ID for the flavor
image_id
Unique integer ID for the image
timeout
How long to wait, after creating the instance, for the provider to
return information about it (default 300 seconds).
.. versionadded:: 2014.1.0
CLI Example:
.. code-block:: bash
salt '*' nova.boot myinstance flavor_id=4596 image_id=2
The flavor_id and image_id are obtained from nova.flavor_list and
nova.image_list
.. code-block:: bash
salt '*' nova.flavor_list
salt '*' nova.image_list
'''
conn = _auth(profile, **kwargs)
return conn.boot(name, flavor_id, image_id, timeout) | 0.001156 |
def cross_list_section(self, id, new_course_id):
"""
Cross-list a Section.
Move the Section to another course. The new course may be in a different account (department),
but must belong to the same root account (institution).
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - PATH - new_course_id
"""ID"""
path["new_course_id"] = new_course_id
self.logger.debug("POST /api/v1/sections/{id}/crosslist/{new_course_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/sections/{id}/crosslist/{new_course_id}".format(**path), data=data, params=params, single_item=True) | 0.005855 |
def Run(self):
"""
Renames all TV files from the constructor given file list.
It follows a number of key steps:
1) Extract a list of unique show titles from file name and lookup
actual show names from database or TV guide.
2) Update each file with showID and showName.
3) Get episode name for all remaining files in valid list.
4) Print file details and generate new file paths.
5) Rename files.
6) List skipped and incompatible files.
"""
# ------------------------------------------------------------------------
# Get list of unique fileInfo show names and find matching actual show
# names from database or TV guide
# ------------------------------------------------------------------------
showNameMatchDict = {}
uniqueFileShowList = self._GetUniqueFileShowNames(self._fileList)
if len(uniqueFileShowList) > 0:
goodlogging.Log.Seperator()
for fileShowName in uniqueFileShowList:
showNameMatchDict[fileShowName] = self._GetShowInfo(fileShowName)
goodlogging.Log.NewLine()
# ------------------------------------------------------------------------
# Update each file with showID and showName
# ------------------------------------------------------------------------
incompatibleFileList = []
validShowFileList = []
for tvFile in self._fileList:
if showNameMatchDict[tvFile.fileInfo.showName] is None:
incompatibleFileList.append(tvFile)
else:
tvFile.showInfo.showID = showNameMatchDict[tvFile.fileInfo.showName].showID
tvFile.showInfo.showName = showNameMatchDict[tvFile.fileInfo.showName].showName
validShowFileList.append(tvFile)
# ------------------------------------------------------------------------
# Get episode name for all remaining files in valid list
# ------------------------------------------------------------------------
if len(validShowFileList) > 0:
goodlogging.Log.Seperator()
validEpisodeNameFileList = []
goodlogging.Log.Info("RENAMER", "Looking up episode names:\n")
for tvFile in validShowFileList:
tvFile.showInfo.episodeName = self._guide.EpisodeNameLookUp(tvFile.showInfo.showName, tvFile.showInfo.seasonNum, tvFile.showInfo.episodeNum)
if tvFile.showInfo.episodeName is None:
incompatibleFileList.append(tvFile)
else:
validEpisodeNameFileList.append(tvFile)
goodlogging.Log.Info("RENAMER", "{0} S{1}E{2}: {3}".format(tvFile.showInfo.showName, tvFile.showInfo.seasonNum, tvFile.showInfo.episodeNum, tvFile.showInfo.episodeName))
goodlogging.Log.NewLine()
# ------------------------------------------------------------------------
# Print file details and generate new file paths
# ------------------------------------------------------------------------
goodlogging.Log.Seperator()
renameFileList = []
skippedFileList = []
goodlogging.Log.Info("RENAMER", "Generating library paths:\n")
if len(validEpisodeNameFileList) == 0:
goodlogging.Log.Info("RENAMER", "No compatible files were detected")
else:
for tvFile in validEpisodeNameFileList:
tvFile.Print()
goodlogging.Log.NewLine()
if self._inPlaceRename is False:
tvFile = self._GenerateLibraryPath(tvFile, self._tvDir)
else:
tvFile.GenerateNewFilePath()
if tvFile.fileInfo.newPath is None:
incompatibleFileList.append(tvFile)
elif tvFile.fileInfo.origPath != tvFile.fileInfo.newPath:
renameFileList.append(tvFile)
else:
skippedFileList.append(tvFile)
goodlogging.Log.NewLine()
# ------------------------------------------------------------------------
# Rename files
# ------------------------------------------------------------------------
goodlogging.Log.Seperator()
goodlogging.Log.Info("RENAMER", "Renamable files:\n")
if len(renameFileList) == 0:
goodlogging.Log.Info("RENAMER", "No renamable files were detected")
else:
showName = None
renameFileList.sort()
for tvFile in renameFileList:
if showName is None or showName != tvFile.showInfo.showName:
showName = tvFile.showInfo.showName
goodlogging.Log.Info("RENAMER", "{0}".format(showName))
goodlogging.Log.IncreaseIndent()
goodlogging.Log.Info("RENAMER", "FROM: {0}".format(tvFile.fileInfo.origPath))
goodlogging.Log.Info("RENAMER", "TO: {0}".format(tvFile.fileInfo.newPath))
goodlogging.Log.DecreaseIndent()
goodlogging.Log.NewLine()
if self._skipUserInput is False:
response = goodlogging.Log.Input('RENAMER', "***WARNING*** CONTINUE WITH RENAME PROCESS? [y/n]: ")
response = util.ValidUserResponse(response, ('y','n'))
else:
response = 'y'
if response == 'n':
goodlogging.Log.Info("RENAMER", "Renaming process skipped")
elif response == 'y':
goodlogging.Log.NewLine()
if self._inPlaceRename is False:
goodlogging.Log.Info("RENAMER", "Adding files to TV library:\n")
else:
goodlogging.Log.Info("RENAMER", "Renaming files:\n")
for tvFile in renameFileList:
self._MoveFileToLibrary(tvFile.fileInfo.origPath, tvFile.fileInfo.newPath)
goodlogging.Log.NewLine()
# ------------------------------------------------------------------------
# List skipped files
# ------------------------------------------------------------------------
if len(skippedFileList) > 0:
goodlogging.Log.Seperator()
goodlogging.Log.Info("RENAMER", "Skipped files:")
goodlogging.Log.IncreaseIndent()
for tvFile in skippedFileList:
if tvFile.fileInfo.origPath == tvFile.fileInfo.newPath:
goodlogging.Log.Info("RENAMER", "{0} (No rename required)".format(tvFile.fileInfo.origPath))
else:
goodlogging.Log.Info("RENAMER", "{0} (Unknown reason)".format(tvFile.fileInfo.origPath))
goodlogging.Log.DecreaseIndent()
# ------------------------------------------------------------------------
# List incompatible files
# ------------------------------------------------------------------------
if len(incompatibleFileList) > 0:
goodlogging.Log.Seperator()
goodlogging.Log.Info("RENAMER", "Incompatible files:")
goodlogging.Log.IncreaseIndent()
for tvFile in incompatibleFileList:
if tvFile.showInfo.showName is None:
goodlogging.Log.Info("RENAMER", "{0} (Missing show name)".format(tvFile.fileInfo.origPath))
elif tvFile.showInfo.episodeName is None:
goodlogging.Log.Info("RENAMER", "{0} (Missing episode name)".format(tvFile.fileInfo.origPath))
elif tvFile.fileInfo.newPath is None:
goodlogging.Log.Info("RENAMER", "{0} (Failed to create new file path)".format(tvFile.fileInfo.origPath))
else:
goodlogging.Log.Info("RENAMER", "{0} (Unknown reason)".format(tvFile.fileInfo.origPath))
goodlogging.Log.DecreaseIndent() | 0.009747 |
def create_customer_gateway(vpn_connection_type, ip_address, bgp_asn,
customer_gateway_name=None, tags=None,
region=None, key=None, keyid=None, profile=None):
'''
Given a valid VPN connection type, a static IP address and a customer
gateway’s Border Gateway Protocol (BGP) Autonomous System Number,
create a customer gateway.
Returns the customer gateway id if the customer gateway was created and
returns False if the customer gateway was not created.
CLI Example:
.. code-block:: bash
salt myminion boto_vpc.create_customer_gateway 'ipsec.1', '12.1.2.3', 65534
'''
return _create_resource('customer_gateway', customer_gateway_name,
type=vpn_connection_type,
ip_address=ip_address, bgp_asn=bgp_asn,
tags=tags, region=region, key=key,
keyid=keyid, profile=profile) | 0.002028 |
def step(self, batch_size, ignore_stale_grad=False):
"""Makes one step of parameter update. Should be called after
`autograd.backward()` and outside of `record()` scope.
For normal parameter updates, `step()` should be used, which internally calls
`allreduce_grads()` and then `update()`. However, if you need to get the reduced
gradients to perform certain transformation, such as in gradient clipping, then
you may want to manually call `allreduce_grads()` and `update()` separately.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
ignore_stale_grad : bool, optional, default=False
If true, ignores Parameters with stale gradient (gradient that has not
been updated by `backward` after last step) and skip update.
"""
rescale_grad = self._scale / batch_size
self._check_and_rescale_grad(rescale_grad)
if not self._kv_initialized:
self._init_kvstore()
if self._params_to_init:
self._init_params()
self._allreduce_grads()
self._update(ignore_stale_grad) | 0.006897 |
def _get_response(self, timeout=1.0, eor=('\n', '\n- ')):
""" Reads a response from the drive.
Reads the response returned by the drive with an optional
timeout. All carriage returns and linefeeds are kept.
Parameters
----------
timeout : number, optional
Optional timeout in seconds to use when reading the
response. A negative value or ``None`` indicates that the
an infinite timeout should be used.
eor : str or iterable of str, optional
``str`` or iterable of ``str`` that denote the allowed
End Of Response. For most commands, it should be
``('\\n', '\\n- ')``, but for running a program, it should
be ``'*END\\n'``. The default is ``('\\n', '\\n- ')``.
Returns
-------
response : str
The response obtained from the drive. Carriage returns and
linefeeds are preserved.
"""
# If no timeout is given or it is invalid and we are using '\n'
# as the eor, use the wrapper to read a line with an infinite
# timeout. Otherwise, the reading and timeout must be
# implemented manually.
if (timeout is None or timeout < 0) and eor == '\n':
return self._sio.readline()
else:
# A timer will be made that takes timeout to finish. Then,
# it is a matter of checking whether it is alive or not to
# know whether the timeout was exceeded or not. They need to
# be checked to make sure they are not too big, which is
# threading.TIMEOUT_MAX on Python 3.x and not specified on
# Python 2.x (lets use a week). Then, the timer is started.
if sys.hexversion >= 0x03000000:
maxtimeout = threading.TIMEOUT_MAX
else:
maxtimeout = 7*24*3600
timeout = min(timeout, maxtimeout)
tm = threading.Timer(timeout, lambda : None)
tm.start()
# eor needs to be converted to bytes. If it is just an str,
# it needs to be wrapped in a tuple.
if isinstance(eor, str):
eor = tuple([eor])
if sys.hexversion >= 0x03000000:
eor = [s.encode(encoding='ASCII') for s in eor]
# Read from the serial port into buf until the EOR is found
# or the timer has stopped. A small pause is done each time
# so that this thread doesn't hog the CPU.
buf = b''
while not any([(x in buf) for x in eor]) and tm.is_alive():
time.sleep(0.001)
buf += self._ser.read(self._ser.inWaiting())
# Just in case the timer has not stopped (EOR was found),
# stop it.
tm.cancel()
# Remove anything after the EOR if there is one. First, a
# set of matches (index, eor_str) for each string in eor
# needs to be constructed. Sorting the matches by their
# index puts all the ones that were not found (index of -1)
# at the front. Then a list of bools that are True for each
# index that isn't -1 is made, converted to a bytes (True
# goes to b'\x01' and False goes to b'\x00'), and then the
# index of the first True value found. If it is not -1, then
# there was a successful match and all the characters are
# dropped after that eor_str.
matches = [(buf.find(x), x) for x in eor]
matches.sort(key=lambda x: x[0])
index = bytes([x[0] != -1 for x in matches]).find(b'\x01')
if index != -1:
buf = buf[:(matches[index][0] + len(matches[index][1]))]
# Convert to an str before returning.
if sys.hexversion >= 0x03000000:
return buf.decode(errors='replace')
else:
return buf | 0.000752 |
def find_promulgation_date(line):
"""
>>> find_promulgation_date("Loi nº 2010-383 du 16 avril 2010 autorisant l'approbation de l'accord entre...")
'2010-04-16'
"""
line = line.split(' du ')[1]
return format_date(re.search(r"(\d\d? \w\w\w+ \d\d\d\d)", line).group(1)) | 0.006873 |
def _supplementary_files_download_worker(*args):
"""A worker to download supplementary files.
To be used with multiprocessing.
"""
gsm = args[0][0]
download_sra = args[0][1]
email = args[0][2]
dirpath = args[0][3]
sra_kwargs = args[0][4]
return (gsm.get_accession(), gsm.download_supplementary_files(
directory=dirpath,
download_sra=download_sra,
email=email, **sra_kwargs)) | 0.002299 |
def build_service(service_descriptor, did):
"""
Build a service.
:param service_descriptor: Tuples of length 2. The first item must be one of ServiceTypes
and the second item is a dict of parameters and values required by the service
:param did: DID, str
:return: Service
"""
assert isinstance(service_descriptor, tuple) and len(
service_descriptor) == 2, 'Unknown service descriptor format.'
service_type, kwargs = service_descriptor
if service_type == ServiceTypes.METADATA:
return ServiceFactory.build_metadata_service(
did,
kwargs['metadata'],
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.AUTHORIZATION:
return ServiceFactory.build_authorization_service(
kwargs['serviceEndpoint']
)
elif service_type == ServiceTypes.ASSET_ACCESS:
return ServiceFactory.build_access_service(
did, kwargs['price'],
kwargs['consumeEndpoint'], kwargs['serviceEndpoint'],
kwargs['timeout'], kwargs['templateId']
)
elif service_type == ServiceTypes.CLOUD_COMPUTE:
return ServiceFactory.build_compute_service(
did, kwargs['price'],
kwargs['consumeEndpoint'], kwargs['serviceEndpoint'], kwargs['timeout']
)
raise ValueError(f'Unknown service type {service_type}') | 0.00327 |
def initialize(self, timeouts):
""" Bind or connect the nanomsg socket to some address """
# Bind or connect to address
if self.bind is True:
self.socket.bind(self.address)
else:
self.socket.connect(self.address)
# Set send and recv timeouts
self._set_timeouts(timeouts) | 0.005814 |
def undefinedImageType(self):
"""
Returns the name of undefined image type for the invalid image.
.. versionadded:: 2.3.0
"""
if self._undefinedImageType is None:
ctx = SparkContext._active_spark_context
self._undefinedImageType = \
ctx._jvm.org.apache.spark.ml.image.ImageSchema.undefinedImageType()
return self._undefinedImageType | 0.007109 |
def _parse_objects_from_xml_elts(bucket_name, contents, common_prefixes):
"""Internal function that extracts objects and common prefixes from
list_objects responses.
"""
objects = [
Object(bucket_name,
content.get_child_text('Key'),
content.get_localized_time_elem('LastModified'),
content.get_etag_elem(strict=False),
content.get_int_elem('Size'),
is_dir=content.is_dir())
for content in contents
]
object_dirs = [
Object(bucket_name, dir_elt.text(), None, '',
0, is_dir=True)
for dirs_elt in common_prefixes
for dir_elt in dirs_elt.findall('Prefix')
]
return objects, object_dirs | 0.001342 |
def run(self, steps=None, resume=False, redo=None):
"""
Run a Stimela recipe.
steps : recipe steps to run
resume : resume recipe from last run
redo : Re-run an old recipe from a .last file
"""
recipe = {
"name" : self.name,
"steps" : []
}
start_at = 0
if redo:
recipe = utils.readJson(redo)
self.log.info('Rerunning recipe {0} from {1}'.format(recipe['name'], redo))
self.log.info('Recreating recipe instance..')
self.jobs = []
for step in recipe['steps']:
# add I/O folders to the json file
# add a string describing the contents of these folders
# The user has to ensure that these folders exist, and have the required content
if step['jtype'] == 'docker':
self.log.info('Adding job \'{0}\' to recipe. The container will be named \'{1}\''.format(step['cab'], step['name']))
cont = docker.Container(step['cab'], step['name'],
label=step['label'], logger=self.log,
shared_memory=step['shared_memory'])
self.log.debug('Adding volumes {0} and environmental variables {1}'.format(step['volumes'], step['environs']))
cont.volumes = step['volumes']
cont.environs = step['environs']
cont.shared_memory = step['shared_memory']
cont.input_content = step['input_content']
cont.msdir_content = step['msdir_content']
cont.logfile = step['logfile']
job = StimelaJob(step['name'], recipe=self, label=step['label'])
job.job = cont
job.jtype = 'docker'
elif step['jtype'] == 'function':
name = step['name']
func = inspect.currentframe().f_back.f_locals[step['function']]
job = StimelaJob(name, recipe=self, label=step['label'])
job.python_job(func, step['parameters'])
job.jtype = 'function'
self.jobs.append(job)
elif resume:
self.log.info("Resuming recipe from last run.")
try:
recipe = utils.readJson(self.resume_file)
except IOError:
raise StimelaRecipeExecutionError("Cannot resume pipeline, resume file '{}' not found".format(self.resume_file))
steps_ = recipe.pop('steps')
recipe['steps'] = []
_steps = []
for step in steps_:
if step['status'] == 'completed':
recipe['steps'].append(step)
continue
label = step['label']
number = step['number']
# Check if the recipe flow has changed
if label == self.jobs[number-1].label:
self.log.info('recipe step \'{0}\' is fit for re-execution. Label = {1}'.format(number, label))
_steps.append(number)
else:
raise StimelaRecipeExecutionError('Recipe flow, or task scheduling has changed. Cannot resume recipe. Label = {0}'.format(label))
# Check whether there are steps to resume
if len(_steps)==0:
self.log.info('All the steps were completed. No steps to resume')
sys.exit(0)
steps = _steps
if getattr(steps, '__iter__', False):
_steps = []
if isinstance(steps[0], str):
labels = [ job.label.split('::')[0] for job in self.jobs]
for step in steps:
try:
_steps.append(labels.index(step)+1)
except ValueError:
raise StimelaCabParameterError('Recipe label ID [{0}] doesn\'t exist'.format(step))
steps = _steps
else:
steps = range(1, len(self.jobs)+1)
jobs = [(step, self.jobs[step-1]) for step in steps]
for i, (step, job) in enumerate(jobs):
self.log.info('Running job {}'.format(job.name))
self.log.info('STEP {0} :: {1}'.format(i+1, job.label))
self.active = job
try:
if job.jtype == 'function':
job.run_python_job()
elif job.jtype in ['docker', 'singularity']:
with open(job.job.logfile, 'a') as astd:
astd.write('\n-----------------------------------\n')
astd.write('Stimela version : {}\n'.format(version.version))
astd.write('Cab name : {}\n'.format(job.job.image))
astd.write('-------------------------------------\n')
run_job = getattr(job, "run_{0:s}_job".format(job.jtype))
run_job()
self.log2recipe(job, recipe, step, 'completed')
except (utils.StimelaCabRuntimeError,
StimelaRecipeExecutionError,
StimelaCabParameterError) as e:
self.completed = [jb[1] for jb in jobs[:i]]
self.remaining = [jb[1] for jb in jobs[i+1:]]
self.failed = job
self.log.info('Recipe execution failed while running job {}'.format(job.name))
self.log.info('Completed jobs : {}'.format([c.name for c in self.completed]))
self.log.info('Remaining jobs : {}'.format([c.name for c in self.remaining]))
self.log2recipe(job, recipe, step, 'failed')
for step, jb in jobs[i+1:]:
self.log.info('Logging remaining task: {}'.format(jb.label))
self.log2recipe(jb, recipe, step, 'remaining')
self.log.info('Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
pe = PipelineException(e, self.completed, job, self.remaining)
raise_(pe, None, sys.exc_info()[2])
except:
import traceback
traceback.print_exc()
raise RuntimeError("An unhandled exception has occured. This is a bug, please report")
finally:
if job.jtype == 'docker' and job.created:
job.job.stop()
job.job.remove()
if job.jtype == 'singularity' and job.created:
job.job.stop()
self.log.info('Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
self.log.info('Recipe executed successfully')
return 0 | 0.006104 |
def _roundSlist(slist):
""" Rounds a signed list over the last element and removes it. """
slist[-1] = 60 if slist[-1] >= 30 else 0
for i in range(len(slist)-1, 1, -1):
if slist[i] == 60:
slist[i] = 0
slist[i-1] += 1
return slist[:-1] | 0.003546 |
def _try_reduce_list(statements: List["HdlStatement"]):
"""
Simplify statements in the list
"""
io_change = False
new_statements = []
for stm in statements:
reduced, _io_change = stm._try_reduce()
new_statements.extend(reduced)
io_change |= _io_change
new_statements, rank_decrease = HdlStatement._merge_statements(
new_statements)
return new_statements, rank_decrease, io_change | 0.004049 |
def _package_path(name):
"""Returns the path to the package containing the named module or
None if the path could not be identified (e.g., if
``name == "__main__"``).
"""
loader = pkgutil.get_loader(name)
if loader is None or name == '__main__':
return None
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(name)
else:
# Fall back to importing the specified module.
__import__(name)
filepath = sys.modules[name].__file__
return os.path.dirname(os.path.abspath(filepath)) | 0.001767 |
def _send_resolve_request(self):
"sends RESOLVE_PTR request (Tor custom)"
host = self._addr.host.encode()
self._data_to_send(
struct.pack(
'!BBBBB{}sH'.format(len(host)),
5, # version
0xF0, # command
0x00, # reserved
0x03, # DOMAINNAME
len(host),
host,
0, # self._addr.port?
)
) | 0.003802 |
def timerEvent( self, event ):
"""
When the timer finishes, hide the tooltip popup widget.
:param event | <QEvent>
"""
if self.currentMode() == XPopupWidget.Mode.ToolTip:
self.killTimer(event.timerId())
event.accept()
self.close()
else:
super(XPopupWidget, self).timerEvent(event) | 0.012469 |
def delete_activity(self, id_num):
"""Delete an activity (run).
:param id_num: The activity ID to delete
"""
url = self._build_url('my', 'activities', id_num)
r = self.session.delete(url)
r.raise_for_status()
return r | 0.007246 |
def srem(self, name, *values):
"""
send raw (source) values here. Right functioning with other values
not guaranteed (and even worse).
"""
return self.storage.srem(name, *self.dump(values, False)) | 0.008475 |
def _register_user_models(user_models, admin=None, schema=None):
"""Register any user-defined models with the API Service.
:param list user_models: A list of user-defined models to include in the
API service
"""
if any([issubclass(cls, AutomapModel) for cls in user_models]):
AutomapModel.prepare( # pylint:disable=maybe-no-member
db.engine, reflect=True, schema=schema)
for user_model in user_models:
register_model(user_model, admin) | 0.001873 |
def _copyAllocatedStates(self):
"""If state is allocated in CPP, copy over the data into our numpy arrays."""
# Get learn states if we need to print them out
if self.verbosity > 1 or self.retrieveLearningStates:
(activeT, activeT1, predT, predT1) = self.cells4.getLearnStates()
self.lrnActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.lrnPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn))
if self.allocateStatesInCPP:
assert False
(activeT, activeT1, predT, predT1, colConfidenceT, colConfidenceT1, confidenceT,
confidenceT1) = self.cells4.getStates()
self.cellConfidence['t'] = confidenceT.reshape((self.numberOfCols, self.cellsPerColumn))
self.cellConfidence['t-1'] = confidenceT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.colConfidence['t'] = colConfidenceT.reshape(self.numberOfCols)
self.colConfidence['t-1'] = colConfidenceT1.reshape(self.numberOfCols)
self.infActiveState['t-1'] = activeT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.infActiveState['t'] = activeT.reshape((self.numberOfCols, self.cellsPerColumn))
self.infPredictedState['t-1'] = predT1.reshape((self.numberOfCols, self.cellsPerColumn))
self.infPredictedState['t'] = predT.reshape((self.numberOfCols, self.cellsPerColumn)) | 0.017823 |
def get_field_by_showname(self, showname):
"""
Gets a field by its "showname"
(the name that appears in Wireshark's detailed display i.e. in 'User-Agent: Mozilla...', 'User-Agent' is the
showname)
Returns None if not found.
"""
for field in self._get_all_fields_with_alternates():
if field.showname_key == showname:
# Return it if "XXX: whatever == XXX"
return field | 0.006397 |
def plot_dry_adiabats(self, p=None, theta=None, **kwargs):
r'''Plot dry adiabats.
Adds dry adiabats (lines of constant potential temperature) to the
plot. The default style of these lines is dashed red lines with an
alpha value of 0.5. These can be overridden using keyword arguments.
Parameters
----------
p : array_like, optional
1-dimensional array of pressure values to be included in the dry
adiabats. If not specified, they will be linearly distributed
across the current plotted pressure range.
theta : array_like, optional
1-dimensional array of potential temperature values for dry
adiabats. By default these will be generated based on the current
temperature limits.
kwargs
Other keyword arguments to pass to
`matplotlib.collections.LineCollection`
See Also#B85C00
--------
plot_moist_adiabats
`matplotlib.collections.LineCollection`
`metpy.calc.dry_lapse`
'''
for artist in self._dry_adiabats:
artist.remove()
self._dry_adiabats = []
# Determine set of starting temps if necessary
if theta is None:
xmin, xmax = self.get_xlim()
theta = np.arange(xmin, xmax + 201, 10)
# Get pressure levels based on ylims if necessary
if p is None:
p = np.linspace(*self.get_ylim())
# Assemble into data for plotting
t = calculate('T', theta=theta[:, None], p=p, p_units='hPa',
T_units='degC', theta_units='degC')
linedata = [np.vstack((ti, p)).T for ti in t]
# Add to plot
kwargs.setdefault('clip_on', True)
kwargs.setdefault('colors', '#A65300')
kwargs.setdefault('linestyles', '-')
kwargs.setdefault('alpha', 1)
kwargs.setdefault('linewidth', 0.5)
kwargs.setdefault('zorder', 1.1)
collection = LineCollection(linedata, **kwargs)
self._dry_adiabats.append(collection)
self.add_collection(collection)
theta = theta.flatten()
T_label = calculate('T', p=140, p_units='hPa', theta=theta,
T_units='degC', theta_units='degC')
for i in range(len(theta)):
text = self.text(
T_label[i], 140, '{:.0f}'.format(theta[i]),
fontsize=8, ha='left', va='center', rotation=-60,
color='#A65300', bbox={
'facecolor': 'w', 'edgecolor': 'w', 'alpha': 0,
}, zorder=1.2)
text.set_clip_on(True)
self._dry_adiabats.append(text) | 0.000736 |
def copy_dir(src, dst):
""" this function will simply copy the file from the source path to the dest
path given as input
"""
try:
debug.log("copy dir from "+ src, "to "+ dst)
shutil.copytree(src, dst)
except Exception as e:
debug.log("Error: happened while copying!\n%s\n"%e) | 0.032468 |
def deserialize_bitarray(ser):
# type: (str) -> bitarray
"""Deserialize a base 64 encoded string to a bitarray (bloomfilter)
"""
ba = bitarray()
ba.frombytes(base64.b64decode(ser.encode(encoding='UTF-8', errors='strict')))
return ba | 0.011494 |
def document(self):
"""
Return :class:`~prompt_toolkit.document.Document` instance from the
current text, cursor position and selection state.
"""
return self._document_cache[
self.text, self.cursor_position, self.selection_state] | 0.007092 |
def format_file_node(import_graph, node, indent):
"""Prettyprint nodes based on their provenance."""
f = import_graph.provenance[node]
if isinstance(f, resolve.Direct):
out = '+ ' + f.short_path
elif isinstance(f, resolve.Local):
out = ' ' + f.short_path
elif isinstance(f, resolve.System):
out = ':: ' + f.short_path
elif isinstance(f, resolve.Builtin):
out = '(%s)' % f.module_name
else:
out = '%r' % node
return ' '*indent + out | 0.00198 |
def add_templatetype(templatetype,**kwargs):
"""
Add a template type with typeattrs.
"""
type_i = _update_templatetype(templatetype)
db.DBSession.flush()
return type_i | 0.010101 |
def ber(tp, tn, fp, fn):
"""Balanced Error Rate [0, 1]
:param int tp: number of true positives
:param int tn: number of true negatives
:param int fp: number of false positives
:param int fn: number of false negatives
:rtype: float
"""
return (fp / float(tn + fp) + fn / float(fn + tp)) / 2 | 0.009174 |
def ndb_delete(self, entity_or_key):
"""Like delete(), but for NDB entities/keys."""
if ndb is not None and isinstance(entity_or_key, ndb.Model):
key = entity_or_key.key
else:
key = entity_or_key
self.ndb_deletes.append(key) | 0.011905 |
def headers(self):
"""Return the headers for the resource. Returns the AltName, if specified; if not, then the
Name, and if that is empty, a name based on the column position. These headers
are specifically applicable to the output table, and may not apply to the resource source. FOr those headers,
use source_headers"""
t = self.schema_term
if t:
return [self._name_for_col_term(c, i)
for i, c in enumerate(t.children, 1) if c.term_is("Table.Column")]
else:
return None | 0.010471 |
def Y(self,value):
""" set phenotype """
assert value.shape[1]==1, 'Dimension mismatch'
self._N = value.shape[0]
self._Y = value | 0.025 |
def get_help_commands(server_prefix):
"""
Get the help commands for all modules
Args:
server_prefix: The server command prefix
Returns:
datapacks (list): A list of datapacks for the help commands for all the modules
"""
datapacks = []
_dir = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
for module_name in os.listdir("{}/../".format(_dir)):
if not module_name.startswith("_") and not module_name.startswith("!"):
help_command = "`{}help {}`".format(server_prefix, module_name)
datapacks.append((module_name, help_command, True))
return datapacks | 0.00299 |
def errorhandler_callback(cls, exc):
"""This function should be called in the global error handlers. This
will allow for consolidating of cleanup tasks if the exception
bubbles all the way to the top of the stack.
For example, this method will automatically rollback the database
session if the exception bubbles to the top.
This is the method that :meth:`register_errorhandler` adds as an
errorhandler. See the documentation there for more info.
Args:
exc (FleakerBaseException):
The exception that was thrown that we are to handle.
"""
# @TODO (orm, exc): Implement this when the ORM/DB stuff is done
# if not exc.prevent_rollback:
# db.session.rollback()
if exc.flash_message:
flash(exc.flash_message, exc.flash_level)
if exc.redirect is not MISSING:
return redirect(url_for(exc.redirect, **exc.redirect_args))
error_result = exc.error_page()
if error_result is not None:
return error_result, exc.status_code or 500 | 0.001784 |
def get_filter_regex(self):
"""
Used by the GUI to obtain human-readable version of the filter
"""
if self.windowInfoRegex is not None:
if self.isRecursive:
return self.windowInfoRegex.pattern
else:
return self.windowInfoRegex.pattern
elif self.parent is not None:
return self.parent.get_child_filter()
else:
return "" | 0.004484 |
def fetch(self):
"""Fetch the mbox files from the remote archiver.
Stores the archives in the path given during the initialization
of this object. Those archives which a not valid extension will
be ignored.
Groups.io archives are returned as a .zip file, which contains
one file in mbox format.
:returns: a list of tuples, storing the links and paths of the
fetched archives
"""
logger.info("Downloading mboxes from '%s'", self.uri)
logger.debug("Storing mboxes in '%s'", self.dirpath)
if not os.path.exists(self.dirpath):
os.makedirs(self.dirpath)
group_id = self.__find_group_id()
url = urijoin(GROUPSIO_API_URL, self.DOWNLOAD_ARCHIVES)
payload = {'group_id': group_id}
filepath = os.path.join(self.dirpath, MBOX_FILE)
success = self._download_archive(url, payload, filepath)
return success | 0.002086 |
def complete(self, return_code):
"""
Mark the process as complete with provided return_code
"""
self.return_code = return_code
self.status = 'COMPLETE' if not return_code else 'FAILED'
self.end_time = datetime.datetime.now() | 0.007353 |
def get_model_class(self):
"""Get model class"""
if getattr(self, 'model', None):
return self.model
elif getattr(self, 'object', None):
return self.object.__class__
elif 'app' in self.kwargs and 'model' in self.kwargs:
return apps.get_model(self.kwargs.get('app'), self.kwargs.get('model'))
elif hasattr(self, 'get_queryset'):
return self.get_queryset().model
else:
return None | 0.006186 |
def dihed_single(self, g_num, at_1, at_2, at_3, at_4):
""" Dihedral/out-of-plane angle among four atoms.
Returns the out-of-plane angle among four atoms from geometry
`g_num`, in degrees. The reference plane
is spanned by `at_1`, `at_2` and `at_3`. The out-of-plane angle is
defined such that a positive angle represents a counter-clockwise
rotation of the projected `at_3`\\ :math:`\\rightarrow`\\ `at_4`
vector with respect to the
reference plane when looking from `at_3` toward `at_2`.
Zero rotation corresponds to occlusion of `at_1` and `at_4`;
that is, the case where
the respective rejections of `at_1`
:math:`\\rightarrow`\\ `at_2` and
`at_3`\\ :math:`\\rightarrow`\\ `at_4` onto
`at_2`\\ :math:`\\rightarrow`\\ `at_3`
are ANTI-PARALLEL.
.. todo:: Pull the above to User Guide eventually, with figures.
All four atom indices must be distinct. Both of the atom trios 1-2-3
and 2-3-4 must be sufficiently nonlinear, as diagnosed by a bend
angle different from 0 or 180 degrees by at least
:data:`PRM.NON_PARALLEL_TOL <opan.const.PRM.NON_PARALLEL_TOL>`.
Parameters
----------
g_num
|int| -- Index of the desired geometry
at_1
|int| -- Index of the first atom
at_2
|int| -- Index of the second atom
at_3
|int| -- Index of the third atom
at_4
|int| -- Index of the fourth atom
Returns
-------
dihed
|npfloat_| --
Out-of-plane/dihedral angle in degrees for the indicated `at_#`,
drawn from geometry `g_num`
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided
~exceptions.ValueError
If any indices `at_#` are equal
~opan.error.XYZError
(typecode :data:`~opan.error.XYZError.DIHED`) If either
of the atom trios (1-2-3 or 2-3-4) is too close to linearity
"""
# library imports
import numpy as np
from scipy import linalg as spla
from .utils.vector import ortho_basis, rej, vec_angle
from .utils import safe_cast as scast
from .error import XYZError
from .const import PRM
# The below errors are explicitly checked and raised since the indices
# are multiplied by three when they are used as an index
# and thus give non-intuitive errors in later code.
# Complain if at_1 is invalid
if not(-self.num_atoms <= at_1 < self.num_atoms):
raise IndexError("Invalid index for 'at_1' ({0})".format(at_1))
# Complain if at_2 is invalid
if not(-self.num_atoms <= at_2 < self.num_atoms):
raise IndexError("Invalid index for 'at_2' ({0})".format(at_2))
# Complain if at_3 is invalid
if not(-self.num_atoms <= at_3 < self.num_atoms):
raise IndexError("Invalid index for 'at_3' ({0})".format(at_3))
# Complain if at_4 is invalid
if not(-self.num_atoms <= at_4 < self.num_atoms):
raise IndexError("Invalid index for 'at_4' ({0})".format(at_4))
# Should never be necessary (save for badly erroneous calling code),
# but coerce the at_x to their floor() values. This is again
# needed since they are multiplied by three in the index expresssions
# below, and can cause funny behavior when truncated by the indexing
at_1 = scast(np.floor(at_1), np.int_)
at_2 = scast(np.floor(at_2), np.int_)
at_3 = scast(np.floor(at_3), np.int_)
at_4 = scast(np.floor(at_4), np.int_)
# Proofread the atom numbers. Performed by double-iterative scan of
# the atom numbers, converting the index equality test results to
# ints and summing the results. Since each ats_n is not compared to
# itself, a sum of zero should diagnose the required mutually
# nonidentical indices.
#
# Pile the atom indices into a vector
ats = [at_1, at_2, at_3, at_4]
# Scan over the vector of indices pairwise without repetition, and
# without examining for at_i == at_i (which is trivially and always
# True). Store the outcomes as integers (True == 1; False == 0)
ats_test = [int(ats[x] == ats[y]) for x in range(4) \
for y in range(x+1,4)]
# For a proper set of indices, the sum over ats_test will be zero.
if sum(ats_test) > 0:
# Improper set of indices; at least one pair is duplicated.
# Collate the duplicative pairings and raise ValueError.
# np.triu_indices generates index pairs in the same sequence as
# the above double iteration over ats, but as a list of two
# np.arrays. column_stack puts them together as column vectors,
# allowing the conditional iteration over x to select only those
# index pairs that correspond to duplicated indices. The
# resulting filtered pairs are converted to tuples for concise
# formatting in the output.
ats_pairs = [tuple(np.column_stack(np.triu_indices(4,1))[x])
for x in range(6) if ats_test[x] == 1]
raise ValueError("Duplicate atom indices: {0}".format(ats_pairs))
## end if
# Check to ensure non-collinearity of the 1-2-3 and 2-3-4 atom trios
for idx in range(2):
# Store the relevant angle
ang = self.angle_single(g_num, [at_2, at_3][idx],
[at_1, at_2][idx],
[at_3, at_4][idx])
# Check for whether angle is too close to zero or 180 degrees
if np.min([ang, 180.0 - ang]) < PRM.NON_PARALLEL_TOL:
# Too close; raise error
raise XYZError(XYZError.DIHED,
"Angle {0} is insufficiently nonlinear"
.format([(at_2, at_1, at_3),
(at_3, at_2, at_4)][idx]),
"XYZ file: {0}".format(self.XYZ_path))
## end if
## next idx
# Store normalized atomic displacement vector at_2 --> at_3 as that
# defining the projection plane
plane_norm = self.displ_single(g_num, at_2, at_3)
plane_norm /= spla.norm(plane_norm)
# Retrieve the orthonormal basis in the projection plane, with the
# first vector being the normalized projection of the at_1 --> at_2
# displacement onto that plane
on1, on2 = ortho_basis(plane_norm, \
self.displ_single(g_num, at_1, at_2))
# Project the at_3 --> at_4 displacement onto the plane
#
# Retrieve the "back-side" displacement vector
back_vec = self.displ_single(g_num, at_3, at_4)
# Project onto the plane by subtracting out the plane_norm projection
# and re-normalize
back_vec = rej(back_vec, plane_norm)
back_vec /= spla.norm(back_vec)
# Calculate the absolute value of the departure of the dihedral/
# out-of-plane angle from 180 degrees as derived from the dot-product
# of on1 and back_vec. Both should be normalized at this point, so
# the calculation is straightforward
dihed = vec_angle(back_vec, on1)
# Given the handedness of the spanning vectors provided by ortho_basis,
# the sign of the dihed departure is that of the dot product
# of back_vec and on2.
dihed *= np.sign(np.dot(back_vec, on2))
# Conversion to the stated typical definition of a dihedral now
# requires addition of 180 degrees.
dihed += 180.0
# Should be set to return the value
return dihed | 0.002107 |
def _read_image_slice(self, arg):
"""
workhorse to read a slice
"""
if 'ndims' not in self._info:
raise ValueError("Attempt to slice empty extension")
if isinstance(arg, slice):
# one-dimensional, e.g. 2:20
return self._read_image_slice((arg,))
if not isinstance(arg, tuple):
raise ValueError("arguments must be slices, one for each "
"dimension, e.g. [2:5] or [2:5,8:25] etc.")
# should be a tuple of slices, one for each dimension
# e.g. [2:3, 8:100]
nd = len(arg)
if nd != self._info['ndims']:
raise ValueError("Got slice dimensions %d, "
"expected %d" % (nd, self._info['ndims']))
targ = arg
arg = []
for a in targ:
if isinstance(a, slice):
arg.append(a)
elif isinstance(a, int):
arg.append(slice(a, a+1, 1))
else:
raise ValueError("arguments must be slices, e.g. 2:12")
dims = self._info['dims']
arrdims = []
first = []
last = []
steps = []
# check the args and reverse dimensions since
# fits is backwards from numpy
dim = 0
for slc in arg:
start = slc.start
stop = slc.stop
step = slc.step
if start is None:
start = 0
if stop is None:
stop = dims[dim]
if step is None:
step = 1
if step < 1:
raise ValueError("slice steps must be >= 1")
if start < 0:
start = dims[dim] + start
if start < 0:
raise IndexError("Index out of bounds")
if stop < 0:
stop = dims[dim] + start + 1
# move to 1-offset
start = start + 1
if stop < start:
raise ValueError("python slices but include at least one "
"element, got %s" % slc)
if stop > dims[dim]:
stop = dims[dim]
first.append(start)
last.append(stop)
steps.append(step)
arrdims.append(stop-start+1)
dim += 1
first.reverse()
last.reverse()
steps.reverse()
first = numpy.array(first, dtype='i8')
last = numpy.array(last, dtype='i8')
steps = numpy.array(steps, dtype='i8')
npy_dtype = self._get_image_numpy_dtype()
array = numpy.zeros(arrdims, dtype=npy_dtype)
self._FITS.read_image_slice(self._ext+1, first, last, steps, array)
return array | 0.000723 |
def context(self):
""" Provides request context """
type = "client_associate" if self.key is None else "client_update"
data = {
"type": type,
"application_type": self.type,
}
# is this an update?
if self.key:
data["client_id"] = self.key
data["client_secret"] = self.secret
# Add optional params
if self.name:
data["application_name"] = self.name
if self.logo:
data["logo_url"] = self.logo
if self.contacts:
# space seporated list
data["contacts"] = " ".join(self.contacts)
if self.redirect:
data["redirect_uri"] = " ".join(self.redirect)
# Convert to JSON and send
return json.dumps(data) | 0.002475 |
def set_row_min_height(self, y: int, min_height: int):
"""Sets a minimum height for blocks in the row with coordinate y."""
if y < 0:
raise IndexError('y < 0')
self._min_heights[y] = min_height | 0.008734 |
def _izip(*iterables):
""" Iterate through multiple lists or arrays of equal size """
# This izip routine is from itertools
# izip('ABCD', 'xy') --> Ax By
iterators = map(iter, iterables)
while iterators:
yield tuple(map(next, iterators)) | 0.003745 |
def state(name):
'''
Returns the state of the container
name
Container name or ID
**RETURN DATA**
A string representing the current state of the container (either
``running``, ``paused``, or ``stopped``)
CLI Example:
.. code-block:: bash
salt myminion docker.state mycontainer
'''
contextkey = 'docker.state.{0}'.format(name)
if contextkey in __context__:
return __context__[contextkey]
__context__[contextkey] = _get_state(inspect_container(name))
return __context__[contextkey] | 0.001776 |
def dist(x1, x2=None, metric='sqeuclidean', to_numpy=True):
"""Compute distance between samples in x1 and x2 on gpu
Parameters
----------
x1 : np.array (n1,d)
matrix with n1 samples of size d
x2 : np.array (n2,d), optional
matrix with n2 samples of size d (if None then x2=x1)
metric : str
Metric from 'sqeuclidean', 'euclidean',
Returns
-------
M : np.array (n1,n2)
distance matrix computed with given metric
"""
if x2 is None:
x2 = x1
if metric == "sqeuclidean":
return euclidean_distances(x1, x2, squared=True, to_numpy=to_numpy)
elif metric == "euclidean":
return euclidean_distances(x1, x2, squared=False, to_numpy=to_numpy)
else:
raise NotImplementedError | 0.001271 |
def classFactory(iface):
"""Load Plugin class from file Plugin."""
# Try to import submodule to check if there are present.
try:
from parameters.generic_parameter import GenericParameter
except ImportError:
# Don't use safe.utilities.i18n.tr as we need to be outside of `safe`.
# Some safe functions will import safe_extras.parameters
QMessageBox.warning(
None,
QCoreApplication.translate(
'@default', 'InaSAFE submodule not found'),
QCoreApplication.translate(
'@default',
'InaSAFE could not find the submodule "parameters". '
'You should do "git submodule update" or if you need a new '
'clone, do "git clone --recursive [email protected]:inasafe/'
'inasafe.git". If this is already a new clone, you should '
'do "git submodule init" before "git submodule update".'
'Finally, restart QGIS.'))
from .safe.plugin import Plugin
return Plugin(iface) | 0.000939 |
def tasks_by_tag(self, registry_tag):
""" Get tasks from registry by its tag
:param registry_tag: any hash-able object
:return: Return task (if :attr:`.WTaskRegistryStorage.__multiple_tasks_per_tag__` is not True) or \
list of tasks
"""
if registry_tag not in self.__registry.keys():
return None
tasks = self.__registry[registry_tag]
return tasks if self.__multiple_tasks_per_tag__ is True else tasks[0] | 0.028369 |
def requirements(collector):
"""Just print out the requirements"""
out = sys.stdout
artifact = collector.configuration['dashmat'].artifact
if artifact not in (None, "", NotSpecified):
if isinstance(artifact, six.string_types):
out = open(artifact, 'w')
else:
out = artifact
for active in collector.configuration['__imported__'].values():
for requirement in active.requirements():
out.write("{0}\n".format(requirement)) | 0.002004 |
def _sending_task(self, backend):
"""
Used internally to safely increment `backend`s task count. Returns the
overall count of tasks for `backend`.
"""
with self.backend_mutex:
self.backends[backend] += 1
self.task_counter[backend] += 1
this_task = self.task_counter[backend]
return this_task | 0.007979 |
def standardize(self, axis=1):
"""
Divide by standard deviation either within or across records.
Parameters
----------
axis : int, optional, default = 0
Which axis to standardize along, within (1) or across (0) records
"""
if axis == 1:
return self.map(lambda x: x / std(x))
elif axis == 0:
stdval = self.std().toarray()
return self.map(lambda x: x / stdval)
else:
raise Exception('Axis must be 0 or 1') | 0.003731 |
def run_hive_script(script):
"""
Runs the contents of the given script in hive and returns stdout.
"""
if not os.path.isfile(script):
raise RuntimeError("Hive script: {0} does not exist.".format(script))
return run_hive(['-f', script]) | 0.003802 |
def _init_multicast_socket(self):
"""
Init multicast socket
:rtype: None
"""
self.debug("()")
# Create a UDP socket
self._multicast_socket = socket.socket(
socket.AF_INET,
socket.SOCK_DGRAM
)
# Allow reuse of addresses
self._multicast_socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1
)
# Set multicast interface to local_ip
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self._multicast_ip)
)
# Set multicast time-to-live
# Should keep our multicast packets from escaping the local network
self._multicast_socket.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_TTL,
self._multicast_ttl
)
self._add_membership_multicast_socket()
# Bind socket
if platform.system().lower() == "darwin":
self._multicast_socket.bind(("0.0.0.0", self._multicast_bind_port))
else:
self._multicast_socket.bind(
(self._multicast_ip, self._multicast_bind_port)
)
self._listening.append(self._multicast_socket) | 0.001516 |
def get_default_config_parsers() -> List[AnyParser]:
"""
Utility method to return the default parsers able to parse a dictionary from a file.
:return:
"""
return [SingleFileParserFunction(parser_function=read_config,
streaming_mode=True,
supported_exts={'.cfg', '.ini'},
supported_types={ConfigParser}),
] | 0.004464 |
def calls(self, truncate=False):
"""
Show 10 most frequently called queries. Requires the pg_stat_statements
Postgres module to be installed.
Record(
query='BEGIN;',
exec_time=datetime.timedelta(0, 0, 288174),
prop_exec_time='0.0%',
ncalls='845590',
sync_io_time=datetime.timedelta(0)
)
:param truncate: trim the Record.query output if greater than 40 chars
:returns: list of Records
"""
if self.pg_stat_statement():
if truncate:
select = """
SELECT CASE
WHEN length(query) < 40
THEN query
ELSE substr(query, 0, 38) || '..'
END AS qry,
"""
else:
select = 'SELECT query,'
return self.execute(sql.CALLS.format(select=select))
else:
return [self.get_missing_pg_stat_statement_error()] | 0.001936 |
def setup_schema(command, conf, vars):
"""Place any commands to setup depotexample here"""
# Load the models
# <websetup.websetup.schema.before.model.import>
from depotexample import model
# <websetup.websetup.schema.after.model.import>
# <websetup.websetup.schema.before.metadata.create_all>
print("Creating tables")
model.metadata.create_all(bind=config['tg.app_globals'].sa_engine)
# <websetup.websetup.schema.after.metadata.create_all>
transaction.commit()
print('Initializing Migrations')
import alembic.config, alembic.command
alembic_cfg = alembic.config.Config()
alembic_cfg.set_main_option("script_location", "migration")
alembic_cfg.set_main_option("sqlalchemy.url", config['sqlalchemy.url'])
alembic.command.stamp(alembic_cfg, "head") | 0.004902 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.