text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def plot_compare(self, lD, key=None,
cmap=None, ms=4, vmin=None, vmax=None,
vmin_map=None, vmax_map=None, cmap_map=None, normt_map=False,
ntMax=None, nchMax=None, nlbdMax=3,
lls=None, lct=None, lcch=None, lclbd=None, cbck=None,
inct=[1,10], incX=[1,5], inclbd=[1,10],
fmt_t='06.3f', fmt_X='01.0f', fmt_l='07.3f',
invert=True, Lplot='In', dmarker=None,
sharey=True, sharelamb=True,
Bck=True, fs=None, dmargin=None, wintit=None, tit=None,
fontsize=None, labelpad=None, draw=True, connect=True):
""" Plot several Data instances of the same diag
Useful to compare :
- the diag data for 2 different shots
- experimental vs synthetic data for the same shot
"""
C0 = isinstance(lD,list)
C0 = C0 and all([issubclass(dd.__class__,DataAbstract) for dd in lD])
C1 = issubclass(lD.__class__,DataAbstract)
assert C0 or C1, 'Provided first arg. must be a tf.data.DataAbstract or list !'
lD = [lD] if C1 else lD
kh = _plot.Data_plot([self]+lD, key=key, indref=0,
cmap=cmap, ms=ms, vmin=vmin, vmax=vmax,
vmin_map=vmin_map, vmax_map=vmax_map,
cmap_map=cmap_map, normt_map=normt_map,
ntMax=ntMax, nchMax=nchMax, nlbdMax=nlbdMax,
lls=lls, lct=lct, lcch=lcch, lclbd=lclbd, cbck=cbck,
inct=inct, incX=incX, inclbd=inclbd,
fmt_t=fmt_t, fmt_X=fmt_X, fmt_l=fmt_l, Lplot=Lplot,
invert=invert, dmarker=dmarker, Bck=Bck,
sharey=sharey, sharelamb=sharelamb,
fs=fs, dmargin=dmargin, wintit=wintit, tit=tit,
fontsize=fontsize, labelpad=labelpad,
draw=draw, connect=connect)
return kh | 0.010353 |
def genstis(outname):
""" Generate TestCases from cmdfile according to the pattern in patternfile"""
pattern="""class stisS%d(countrateCase):
def setUp(self):
self.obsmode="%s"
self.spectrum="%s"
self.setglobal(__file__)
self.runpy()\n"""
speclist=['/grp/hst/cdbs/calspec/gd71_mod_005.fits',
'/grp/hst/cdbs/calspec/gd153_mod_004.fits',
'/grp/hst/cdbs/calspec/g191b2b_mod_004.fits']
glist={'g140l':'fuvmama','g230l':'nuvmama','g430l':'ccd','g750l':'ccd',
'g230lb':'ccd'}
out=open(outname,'a')
out.write("""from pytools import testutil
import sys
from basecase import calcphotCase, calcspecCase, countrateCase,SpecSourcerateSpecCase\n
""")
count=0
for g in glist:
for sp in speclist:
obsmode='stis,%s,fuvmama,s52x2'%g
defn=pattern%(count,obsmode,sp)
out.write(defn)
count+=1
out.write("""\n\n
if __name__ == '__main__':
if 'debug' in sys.argv:
testutil.debug(__name__)
else:
testutil.testall(__name__,2)
""")
out.close() | 0.022502 |
def find_config_files(
path=['~/.vcspull'], match=['*'], filetype=['json', 'yaml'], include_home=False
):
"""Return repos from a directory and match. Not recursive.
:param path: list of paths to search
:type path: list
:param match: list of globs to search against
:type match: list
:param filetype: list of filetypes to search against
:type filetype: list
:param include_home: Include home configuration files
:type include_home: bool
:raises:
- LoadConfigRepoConflict: There are two configs that have same path
and name with different repo urls.
:returns: list of absolute paths to config files.
:rtype: list
"""
configs = []
if include_home is True:
configs.extend(find_home_config_files())
if isinstance(path, list):
for p in path:
configs.extend(find_config_files(p, match, filetype))
return configs
else:
path = os.path.expanduser(path)
if isinstance(match, list):
for m in match:
configs.extend(find_config_files(path, m, filetype))
else:
if isinstance(filetype, list):
for f in filetype:
configs.extend(find_config_files(path, match, f))
else:
match = os.path.join(path, match)
match += ".{filetype}".format(filetype=filetype)
configs = glob.glob(match)
return configs | 0.001355 |
def root(self, parts):
"""
Find the path root.
@param parts: A list of path parts.
@type parts: [str,..]
@return: The root.
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = None
name = parts[0]
log.debug('searching schema for (%s)', name)
qref = self.qualify(parts[0])
query = BlindQuery(qref)
result = query.execute(self.schema)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
log.debug('found (%s) as (%s)', name, Repr(result))
return result | 0.003165 |
def print_languages_and_exit(lst, status=1, header=True):
"""print a list of languages and exit"""
if header:
print("Available languages:")
for lg in lst:
print("- %s" % lg)
sys.exit(status) | 0.004505 |
def multiplicity(keys, axis=semantics.axis_default):
"""return the multiplicity of each key, or how often it occurs in the set
Parameters
----------
keys : indexable object
Returns
-------
ndarray, [keys.size], int
the number of times each input item occurs in the set
"""
index = as_index(keys, axis)
return index.count[index.inverse] | 0.002597 |
def get_specs(data):
"""
Takes a magic format file and returns a list of unique specimen names
"""
# sort the specimen names
speclist = []
for rec in data:
try:
spec = rec["er_specimen_name"]
except KeyError as e:
spec = rec["specimen"]
if spec not in speclist:
speclist.append(spec)
speclist.sort()
return speclist | 0.002457 |
def to_struct(cls, name=None):
"""
Convert the TreeModel into a compiled C struct
"""
if name is None:
name = cls.__name__
basic_attrs = dict([(attr_name, value)
for attr_name, value in cls.get_attrs()
if isinstance(value, Column)])
if not basic_attrs:
return None
src = 'struct {0} {{'.format(name)
for attr_name, value in basic_attrs.items():
src += '{0} {1};'.format(value.type.typename, attr_name)
src += '};'
if ROOT.gROOT.ProcessLine(src) != 0:
return None
return getattr(ROOT, name, None) | 0.002911 |
def _split_along_width(x_left_right_blocks):
"""Helper function for local 2d attention.
Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks,
height, width, depth] and returns two tensors which contain every alternate
position along the width
Args:
x_left_right_blocks: A [batch, num_h_blocks, num_w_blocks,
height, width, depth] tensor
Returns:
x_left_blocks, x_right_blocks: two [batch, num_h_blocks,
(num_w_blocks-2)/2, height, width,
depth] tensors
"""
(_, x_num_h_blocks, x_num_outer_w_blocks, x_memory_flange_h,
x_memory_flange_w, depth) = common_layers.shape_list(x_left_right_blocks)
x_num_w_blocks = (x_num_outer_w_blocks-1)//2
# get it ready for splitting the left and right memory blocks
x_left_right_blocks = tf.reshape(x_left_right_blocks,
[-1,
x_num_h_blocks,
x_num_outer_w_blocks//2, 2,
x_memory_flange_h,
x_memory_flange_w, depth])
x_left_blocks, x_right_blocks = tf.split(x_left_right_blocks,
num_or_size_splits=2, axis=3)
x_left_blocks = tf.squeeze(x_left_blocks, axis=3)
x_right_blocks = tf.squeeze(x_right_blocks, axis=3)
x_left_blocks = tf.slice(x_left_blocks, [0, 0, 0, 0, 0, 0],
[-1, -1, x_num_w_blocks, -1, -1, -1])
x_right_blocks = tf.slice(x_right_blocks, [0, 0, 1, 0, 0, 0],
[-1, -1, x_num_w_blocks, -1, -1, -1])
return x_left_blocks, x_right_blocks | 0.006981 |
def get_v_total_stress_at_depth(self, z):
"""
Determine the vertical total stress at depth z, where z can be a number or an array of numbers.
"""
if not hasattr(z, "__len__"):
return self.one_vertical_total_stress(z)
else:
sigma_v_effs = []
for value in z:
sigma_v_effs.append(self.one_vertical_total_stress(value))
return np.array(sigma_v_effs) | 0.006667 |
def on_action_run(self, task_vars, delegate_to_hostname, loader_basedir):
"""
Invoked by ActionModuleMixin to indicate a new task is about to start
executing. We use the opportunity to grab relevant bits from the
task-specific data.
:param dict task_vars:
Task variable dictionary.
:param str delegate_to_hostname:
:data:`None`, or the template-expanded inventory hostname this task
is being delegated to. A similar variable exists on PlayContext
when ``delegate_to:`` is active, however it is unexpanded.
:param str loader_basedir:
Loader base directory; see :attr:`loader_basedir`.
"""
self.inventory_hostname = task_vars['inventory_hostname']
self._task_vars = task_vars
self.host_vars = task_vars['hostvars']
self.delegate_to_hostname = delegate_to_hostname
self.loader_basedir = loader_basedir
self._mitogen_reset(mode='put') | 0.001994 |
async def send_rpc(self, client_id, conn_string, address, rpc_id, payload, timeout):
"""Send an RPC on behalf of a client.
See :meth:`AbstractDeviceAdapter.send_rpc`.
Args:
client_id (str): The client we are working for.
conn_string (str): A connection string that will be
passed to the underlying device adapter to connect.
address (int): The RPC address.
rpc_id (int): The ID number of the RPC
payload (bytes): The RPC argument payload
timeout (float): The RPC's expected timeout to hand to the underlying
device adapter.
Returns:
bytes: The RPC response.
Raises:
DeviceServerError: There is an issue with your client_id such
as not being connected to the device.
TileNotFoundError: The destination tile address does not exist
RPCNotFoundError: The rpc_id does not exist on the given tile
RPCErrorCode: The RPC was invoked successfully and wishes to fail
with a non-zero status code.
RPCInvalidIDError: The rpc_id is too large to fit in 16-bits.
TileBusSerror: The tile was busy and could not respond to the RPC.
Exception: The rpc raised an exception during processing.
DeviceAdapterError: If there is a hardware or communication issue
invoking the RPC.
"""
conn_id = self._client_connection(client_id, conn_string)
return await self.adapter.send_rpc(conn_id, address, rpc_id, payload, timeout) | 0.003075 |
def del_location(self, location, sync=True):
"""
delete location from this routing area
:param location: the location to be deleted from this routing area
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the location object on list to be removed on next save().
:return:
"""
LOGGER.debug("RoutingArea.del_location")
if not sync:
self.loc_2_rm.append(location)
else:
if location.id is None:
location.sync()
if self.id is not None and location.id is not None:
params = {
'id': self.id,
'locationID': location.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/locations/delete', 'parameters': params}
response = RoutingAreaService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'RoutingArea.del_location - Problem while updating routing area ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.loc_ids.remove(location.id)
location.routing_area_ids.remove(self.id)
else:
LOGGER.warning(
'RoutingArea.del_location - Problem while updating routing area ' +
self.name + '. Reason: location ' +
location.name + ' id is None or self.id is None'
) | 0.004084 |
def call(self, name, request=None, **params):
""" Call resource by ``Api`` name.
:param name: The resource's name (short form)
:param request: django.http.Request instance
:param **params: Params for a resource's call
:return object: Result of resource's execution
"""
if name not in self.resources:
raise exceptions.HttpError(
'Unknown method \'%s\'' % name,
status=status.HTTP_501_NOT_IMPLEMENTED)
request = request or HttpRequest()
resource = self.resources[name]
view = resource.as_view(api=self)
return view(request, **params) | 0.003003 |
def set(self, point):
"""Set pixel at (x, y) point."""
if not isinstance(point, Point):
point = Point(point)
rx = self.round(point.x)
ry = self.round(point.y)
item = Point((rx >> 1, min(ry >> 2, self.size.y)))
self.screen[item] |= self.pixels[ry & 3][rx & 1] | 0.00625 |
def invoke_rest_method(**kwargs):
"""
Invokes a rest api test
:param kwargs:
REQUIRED:
method = 'GET', 'POST', 'PUT', 'DELETE'
url = "http://localhost/api/controller"
or
baseurl = "http://localhost/"
endpoint = "api/controller"
OPTIONAL:
headers = {'header_name':'header_value', 'header2_name':'header2_value'}
json = {'key1':'value1', 'key2':'value2'} #Will be sent as json encoded data
or
form = {'key1':'value1', 'key2':'value2'} #Will be sent as form encoded data
api_key = "ey123asdk93e378hsdfsfdf"
silent = True,False (to not log request/response. Default False)
:return:
"""
logger = kwargs.get('logger', get_default_logger())
args = {}
headers = kwargs.get('headers', {})
api_key = kwargs.get('api_key', None)
if api_key is not None:
headers['Authorization'] = api_key
if headers is not {}:
args['headers'] = headers
json = kwargs.get('json', None)
if json is not None:
args['json'] = json
form = kwargs.get('form', None)
if form is not None:
args['data'] = form
if json is not None and form is not None:
raise ValueError('Cannot specify both json and form parameters')
url = kwargs.get('url', None)
if url is None:
baseurl = kwargs.get('baseurl', None)
endpoint = kwargs.get('endpoint', None)
if baseurl is None or endpoint is None:
raise ValueError('Invalid test arguments. Must specify {url} or {baseurl, endpoint}')
url = urljoin(baseurl, endpoint)
method = kwargs.get('method', None)
if method is None:
raise ValueError('Invalid test arguments. Must specify {method}')
silent = kwargs.get('silent', False)
if not silent:
logger.debug("Sending {} to url: {}".format(method, url))
req = requests.request(method, url, **args)
if not silent:
logger.debug("Received response code: {}".format(req.status_code))
return req | 0.002651 |
def get_band_structure_from_vasp_multiple_branches(dir_name, efermi=None,
projections=False):
"""
This method is used to get band structure info from a VASP directory. It
takes into account that the run can be divided in several branches named
"branch_x". If the run has not been divided in branches the method will
turn to parsing vasprun.xml directly.
The method returns None is there"s a parsing error
Args:
dir_name: Directory containing all bandstructure runs.
efermi: Efermi for bandstructure.
projections: True if you want to get the data on site projections if
any. Note that this is sometimes very large
Returns:
A BandStructure Object
"""
# TODO: Add better error handling!!!
if os.path.exists(os.path.join(dir_name, "branch_0")):
# get all branch dir names
branch_dir_names = [os.path.abspath(d)
for d in glob.glob("{i}/branch_*"
.format(i=dir_name))
if os.path.isdir(d)]
# sort by the directory name (e.g, branch_10)
sort_by = lambda x: int(x.split("_")[-1])
sorted_branch_dir_names = sorted(branch_dir_names, key=sort_by)
# populate branches with Bandstructure instances
branches = []
for dir_name in sorted_branch_dir_names:
xml_file = os.path.join(dir_name, "vasprun.xml")
if os.path.exists(xml_file):
run = Vasprun(xml_file, parse_projected_eigen=projections)
branches.append(run.get_band_structure(efermi=efermi))
else:
# It might be better to throw an exception
warnings.warn("Skipping {}. Unable to find {}"
.format(d=dir_name, f=xml_file))
return get_reconstructed_band_structure(branches, efermi)
else:
xml_file = os.path.join(dir_name, "vasprun.xml")
# Better handling of Errors
if os.path.exists(xml_file):
return Vasprun(xml_file, parse_projected_eigen=projections) \
.get_band_structure(kpoints_filename=None, efermi=efermi)
else:
return None | 0.000872 |
def set_prop(self, prop, value, ef=None):
"""
set attributes values
:param prop:
:param value:
:param ef:
:return:
"""
if ef:
# prop should be restricted to n_decoys, an int, the no. of decoys corresponding to a given FPF.
# value is restricted to the corresponding enrichment factor and should be a float
self.ef[prop] = value
else:
if prop == 'ensemble':
# value is a tuple of strings that gives the ensemble composition
self.ensemble = value
elif prop == 'auc':
# value is a float that gives the auc value
self.auc = value | 0.006935 |
def match_one(self, models, results, relation):
"""
Match the eargerly loaded resuls to their single parents.
:param models: The parents
:type models: list
:param results: The results collection
:type results: Collection
:param relation: The relation
:type relation: str
:rtype: list
"""
return self._match_one_or_many(models, results, relation, 'one') | 0.004505 |
def clip(obj, lower=0, upper=127):
"""
Return a copy of the object with piano-roll(s) clipped by a lower bound
and an upper bound specified by `lower` and `upper`, respectively.
Parameters
----------
lower : int or float
The lower bound to clip the piano-roll. Default to 0.
upper : int or float
The upper bound to clip the piano-roll. Default to 127.
"""
_check_supported(obj)
copied = deepcopy(obj)
copied.clip(lower, upper)
return copied | 0.001976 |
def build_pyfile_path_from_docname(self, docfile):
"""Build the expected Python file name based on the given documentation file name.
:param str docfile: The documentation file name from which to build the Python file name.
:rtype: str
"""
name, ext = os.path.splitext(docfile)
expected_py_name = name.replace('.', '/') + '.py'
return expected_py_name | 0.009804 |
def convert_bool(key, val, attr_type, attr={}, cdata=False):
"""Converts a boolean into an XML element"""
LOG.info('Inside convert_bool(): key="%s", val="%s", type(val) is: "%s"' % (
unicode_me(key), unicode_me(val), type(val).__name__)
)
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr['type'] = get_xml_type(val)
attrstring = make_attrstring(attr)
return '<%s%s>%s</%s>' % (key, attrstring, unicode(val).lower(), key) | 0.004175 |
def has_adjacent_fragments_only(self, min_index=None, max_index=None):
"""
Return ``True`` if the list contains only adjacent fragments,
that is, if it does not have gaps.
:param int min_index: examine fragments with index greater than or equal to this index (i.e., included)
:param int max_index: examine fragments with index smaller than this index (i.e., excluded)
:raises ValueError: if ``min_index`` is negative or ``max_index``
is bigger than the current number of fragments
:rtype: bool
"""
min_index, max_index = self._check_min_max_indices(min_index, max_index)
for i in range(min_index, max_index - 1):
current_interval = self[i].interval
next_interval = self[i + 1].interval
if not current_interval.is_adjacent_before(next_interval):
self.log(u"Found non adjacent fragments")
self.log([u" Index %d => %s", i, current_interval])
self.log([u" Index %d => %s", i + 1, next_interval])
return False
return True | 0.004405 |
def __update_central_neurons(self, t, next_cn_membrane, next_cn_active_sodium, next_cn_inactive_sodium, next_cn_active_potassium):
"""!
@brief Update of central neurons in line with new values of current in channels.
@param[in] t (doubles): Current time of simulation.
@param[in] next_membrane (list): New values of membrane potentials for central neurons.
@Param[in] next_active_sodium (list): New values of activation conductances of the sodium channels for central neurons.
@param[in] next_inactive_sodium (list): New values of inactivaton conductances of the sodium channels for central neurons.
@param[in] next_active_potassium (list): New values of activation conductances of the potassium channel for central neurons.
"""
for index in range(0, len(self._central_element)):
self._central_element[index].membrane_potential = next_cn_membrane[index];
self._central_element[index].active_cond_sodium = next_cn_active_sodium[index];
self._central_element[index].inactive_cond_sodium = next_cn_inactive_sodium[index];
self._central_element[index].active_cond_potassium = next_cn_active_potassium[index];
if (self._central_element[index].pulse_generation is False):
if (self._central_element[index].membrane_potential >= 0.0):
self._central_element[index].pulse_generation = True;
self._central_element[index].pulse_generation_time.append(t);
elif (self._central_element[index].membrane_potential < 0.0):
self._central_element[index].pulse_generation = False; | 0.013809 |
def meta_enter_message(python_input):
"""
Create the `Layout` for the 'Meta+Enter` message.
"""
def get_text_fragments():
return [('class:accept-message', ' [Meta+Enter] Execute ')]
def extra_condition():
" Only show when... "
b = python_input.default_buffer
return (
python_input.show_meta_enter_message and
(not b.document.is_cursor_at_the_end or
python_input.accept_input_on_enter is None) and
'\n' in b.text)
visible = ~is_done & has_focus(DEFAULT_BUFFER) & Condition(extra_condition)
return ConditionalContainer(
content=Window(FormattedTextControl(get_text_fragments)),
filter=visible) | 0.001383 |
def _get_object(objname, objtype):
'''
Helper function to retrieve objtype from pillars if objname
is string_types, used for SupportedLoginProviders and
OpenIdConnectProviderARNs.
'''
ret = None
if objname is None:
return ret
if isinstance(objname, string_types):
if objname in __opts__:
ret = __opts__[objname]
master_opts = __pillar__.get('master', {})
if objname in master_opts:
ret = master_opts[objname]
if objname in __pillar__:
ret = __pillar__[objname]
elif isinstance(objname, objtype):
ret = objname
if not isinstance(ret, objtype):
ret = None
return ret | 0.001422 |
def normalize(value, unit):
"""Converts the value so that it belongs to some expected range.
Returns the new value and new unit.
E.g:
>>> normalize(1024, 'KB')
(1, 'MB')
>>> normalize(90, 'min')
(1.5, 'hr')
>>> normalize(1.0, 'object')
(1, 'object')
"""
if value < 0:
raise ValueError('Negative value: %s %s.' % (value, unit))
if unit in functions.get_keys(INFORMATION_UNITS):
return _normalize_information(value, unit)
elif unit in TIME_UNITS:
return _normalize_time(value, unit)
else:
# Unknown unit, just return it
return functions.format_value(value), unit | 0.001515 |
def delete(key,
host=DEFAULT_HOST,
port=DEFAULT_PORT,
time=DEFAULT_TIME):
'''
Delete a key from memcache server
CLI Example:
.. code-block:: bash
salt '*' memcached.delete <key>
'''
if not isinstance(time, six.integer_types):
raise SaltInvocationError('\'time\' must be an integer')
conn = _connect(host, port)
_check_stats(conn)
return bool(conn.delete(key, time)) | 0.002208 |
def rewrite_elife_datasets_json(json_content, doi):
""" this does the work of rewriting elife datasets json """
# Add dates in bulk
elife_dataset_dates = []
elife_dataset_dates.append(("10.7554/eLife.00348", "used", "dataro17", u"2010"))
elife_dataset_dates.append(("10.7554/eLife.01179", "used", "dataro4", u"2016"))
elife_dataset_dates.append(("10.7554/eLife.01603", "used", "dataro2", u"2012"))
elife_dataset_dates.append(("10.7554/eLife.02304", "used", "dataro15", u"2005"))
elife_dataset_dates.append(("10.7554/eLife.02935", "used", "dataro2", u"2014"))
elife_dataset_dates.append(("10.7554/eLife.03583", "used", "dataro5", u"2013"))
if doi in map(lambda dataset: dataset[0], elife_dataset_dates):
for (match_doi, used_or_generated, id, dataset_date) in elife_dataset_dates:
if doi == match_doi:
if json_content.get(used_or_generated):
for dataset in json_content[used_or_generated]:
if dataset.get("id") and dataset["id"] == id:
if not dataset.get("date"):
dataset["date"] = dataset_date
# Continue with individual article JSON rewriting
if doi == "10.7554/eLife.01311":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] in ["dataro3", "dataro4", "dataro5"]:
if not dataset.get("date"):
dataset["date"] = u"2012"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Duke"}]
if dataset.get("id") and dataset["id"] == "dataro6":
if not dataset.get("date"):
dataset["date"] = u"2011"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "FlyBase"}]
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("date"):
dataset["date"] = u"2011"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Baylor College of Medicine (BCM)"}]
if dataset.get("id") and dataset["id"] in ["dataro8", "dataro9"]:
if not dataset.get("date"):
dataset["date"] = u"2012"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "University of California, Berkeley"}]
if doi == "10.7554/eLife.01440":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "EnsemblMetazoa"}]
if doi == "10.7554/eLife.01535":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "2000, 2005":
dataset["date"] = u"2000"
if doi == "10.7554/eLife.02304":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro11":
if not dataset.get("title"):
dataset["title"] = u"T.gondii LDH1 ternary complex with APAD+ and oxalate"
if doi == "10.7554/eLife.03574":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("date"):
dataset["date"] = u"2006"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Riley,M."}, {"type": "group", "name": "Abe,T."}, {"type": "group", "name": "Arnaud,M.B."}, {"type": "group", "name": "Berlyn,M.K."}, {"type": "group", "name": "Blattner,F.R."}, {"type": "group", "name": "Chaudhuri,R.R."}, {"type": "group", "name": "Glasner,J.D."}, {"type": "group", "name": "Horiuchi,T."}, {"type": "group", "name": "Keseler,I.M."}, {"type": "group", "name": "Kosuge,T."}, {"type": "group", "name": "Mori,H."}, {"type": "group", "name": "Perna,N.T."}, {"type": "group", "name": "Plunkett,G. III"}, {"type": "group", "name": "Rudd,K.E."}, {"type": "group", "name": "Serres,M.H."}, {"type": "group", "name": "Thomas,G.H."}, {"type": "group", "name": "Thomson,N.R."}, {"type": "group", "name": "Wishart,D."}, {"type": "group", "name": "Wanner,B.L."}]
if doi == "10.7554/eLife.03676":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro4":
if not dataset.get("date"):
dataset["date"] = u"2013"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Human Gene Sequencing Center"}]
if doi == "10.7554/eLife.03971":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Vanderperre B."}]
if doi == "10.7554/eLife.04660":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "2014-2015":
dataset["date"] = u"2014"
if doi == "10.7554/eLife.06421":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if dataset.get("date") and dataset.get("date") == "NA":
dataset["date"] = u"2006"
if doi == "10.7554/eLife.08445":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("date"):
dataset["date"] = u"2006"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "BDTNP SELEX"}]
if doi == "10.7554/eLife.08916":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if dataset.get("date") and dataset.get("date") == "2008, updated 2014":
dataset["date"] = u"2008"
if dataset.get("id") and dataset["id"] == "dataro3":
if dataset.get("date") and dataset.get("date") == "2013, updated 2014":
dataset["date"] = u"2013"
if doi == "10.7554/eLife.08955":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Kurdistani S"}, {"type": "group", "name": "Marrban C"}, {"type": "group", "name": "Su T"}]
if doi == "10.7554/eLife.09207":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Prostate Cancer Genome Sequencing Project"}]
if doi == "10.7554/eLife.10607":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "data-ro4":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Authors"}]
if doi == "10.7554/eLife.10670":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "HIVdb"}]
# Add dates, authors, other details
if doi == "10.7554/eLife.10856":
if json_content.get("generated"):
datasets_authors_for_10856 = [{"type": "group", "name": "Dagdas YF"}, {"type": "group", "name": "Belhaj K"}, {"type": "group", "name": "Maqbool A"}, {"type": "group", "name": "Chaparro-Garcia A"}, {"type": "group", "name": "Pandey P"}, {"type": "group", "name": "Petre B"}, {"type": "group", "name": "Tabassum N"}, {"type": "group", "name": "Cruz-Mireles N"}, {"type": "group", "name": "Hughes RK"}, {"type": "group", "name": "Sklenar J"}, {"type": "group", "name": "Win J"}, {"type": "group", "name": "Menke F"}, {"type": "group", "name": "Findlay K"}, {"type": "group", "name": "Banfield MJ"}, {"type": "group", "name": "Kamoun S"}, {"type": "group", "name": "Bozkurt TO"}]
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("date"):
dataset["date"] = u"2016"
if not dataset.get("title"):
dataset["title"] = u"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor"
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if dataset.get("uri") and dataset["uri"] == "http://www.ncbi.nlm.nih.":
dataset["uri"] = "https://www.ncbi.nlm.nih.gov/nuccore/976151098/"
if dataset.get("id") and dataset["id"] == "dataro8":
if not dataset.get("date"):
dataset["date"] = u"2015"
if not dataset.get("title"):
dataset["title"] = u"An effector of the Irish potato famine pathogen antagonizes a host autophagy cargo receptor"
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if dataset.get("uri") and dataset["uri"] == "http://www.ncbi.nlm.nih.":
dataset["uri"] = "https://www.ncbi.nlm.nih.gov/nuccore/976151096/"
if dataset.get("id") and dataset["id"] == "dataro9":
if not dataset.get("authors"):
dataset["authors"] = datasets_authors_for_10856
if doi == "10.7554/eLife.10877":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("title"):
dataset["title"] = u"Oct4 ChIP-Seq at G1 and G2/M phase of cell cycle in mouse embryonic stem cells"
if doi == "10.7554/eLife.10921":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Floor SN"}, {"type": "group", "name": "Doudna JA"}]
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Sidrauski C"}, {"type": "group", "name": "McGeachy A"}, {"type": "group", "name": "Ingolia N"}, {"type": "group", "name": "Walter P"}]
if doi == "10.7554/eLife.11117":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro14":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Authors"}]
if doi == "10.7554/eLife.12204":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Rhodes DR"}, {"type": "group", "name": "Kalyana-Sundaram S"}, {"type": "group", "name": "Mahavisno V"}, {"type": "group", "name": "Varambally R"}, {"type": "group", "name": "Yu J"}, {"type": "group", "name": "Briggs BB"}, {"type": "group", "name": "Barrette TR"}, {"type": "group", "name": "Anstet MJ"}, {"type": "group", "name": "Kincead-Beal C"}, {"type": "group", "name": "Kulkarni P"}, {"type": "group", "name": "Varambally S"}, {"type": "group", "name": "Ghosh D"}, {"type": "group", "name": "Chinnaiyan AM."}]
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Gaspar C"}, {"type": "group", "name": "Cardoso J"}, {"type": "group", "name": "Franken P"}, {"type": "group", "name": "Molenaar L"}, {"type": "group", "name": "Morreau H"}, {"type": "group", "name": "Möslein G"}, {"type": "group", "name": "Sampson J"}, {"type": "group", "name": "Boer JM"}, {"type": "group", "name": "de Menezes RX"}, {"type": "group", "name": "Fodde R."}]
if dataset.get("id") and dataset["id"] == "dataro3":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Graudens E"}, {"type": "group", "name": "Boulanger V"}, {"type": "group", "name": "Mollard C"}, {"type": "group", "name": "Mariage-Samson R"}, {"type": "group", "name": "Barlet X"}, {"type": "group", "name": "Grémy G"}, {"type": "group", "name": "Couillault C"}, {"type": "group", "name": "Lajémi M"}, {"type": "group", "name": "Piatier-Tonneau D"}, {"type": "group", "name": "Zaborski P"}, {"type": "group", "name": "Eveno E"}, {"type": "group", "name": "Auffray C"}, {"type": "group", "name": "Imbeaud S."}]
if dataset.get("id") and dataset["id"] == "dataro4":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Hong Y"}, {"type": "group", "name": "Downey T"}, {"type": "group", "name": "Eu KW"}, {"type": "group", "name": "Koh PK"},{"type": "group", "name": "Cheah PY"}]
if dataset.get("id") and dataset["id"] == "dataro5":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Kaiser S"}, {"type": "group", "name": "Park YK"}, {"type": "group", "name": "Franklin JL"}, {"type": "group", "name": "Halberg RB"}, {"type": "group", "name": "Yu M"}, {"type": "group", "name": "Jessen WJ"}, {"type": "group", "name": "Freudenberg J"}, {"type": "group", "name": "Chen X"}, {"type": "group", "name": "Haigis K"}, {"type": "group", "name": "Jegga AG"}, {"type": "group", "name": "Kong S"}, {"type": "group", "name": "Sakthivel B"}, {"type": "group", "name": "Xu H"}, {"type": "group", "name": "Reichling T"}, {"type": "group", "name": "Azhar M"}, {"type": "group", "name": "Boivin GP"}, {"type": "group", "name": "Roberts RB"}, {"type": "group", "name": "Bissahoyo AC"}, {"type": "group", "name": "Gonzales F"}, {"type": "group", "name": "Bloom GC"}, {"type": "group", "name": "Eschrich S"}, {"type": "group", "name": "Carter SL"}, {"type": "group", "name": "Aronow JE"}, {"type": "group", "name": "Kleimeyer J"}, {"type": "group", "name": "Kleimeyer M"}, {"type": "group", "name": "Ramaswamy V"}, {"type": "group", "name": "Settle SH"}, {"type": "group", "name": "Boone B"}, {"type": "group", "name": "Levy S"}, {"type": "group", "name": "Graff JM"}, {"type": "group", "name": "Doetschman T"}, {"type": "group", "name": "Groden J"}, {"type": "group", "name": "Dove WF"}, {"type": "group", "name": "Threadgill DW"}, {"type": "group", "name": "Yeatman TJ"}, {"type": "group", "name": "Coffey RJ Jr"}, {"type": "group", "name": "Aronow BJ."}]
if dataset.get("id") and dataset["id"] == "dataro6":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Muzny DM et al"}]
if dataset.get("id") and dataset["id"] == "dataro7":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Skrzypczak M"}, {"type": "group", "name": "Goryca K"}, {"type": "group", "name": "Rubel T"}, {"type": "group", "name": "Paziewska A"}, {"type": "group", "name": "Mikula M"}, {"type": "group", "name": "Jarosz D"}, {"type": "group", "name": "Pachlewski J"}, {"type": "group", "name": "Oledzki J"}, {"type": "group", "name": "Ostrowski J."}]
if dataset.get("id") and dataset["id"] == "dataro8":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Cancer Genome Atlas Network"}]
if doi == "10.7554/eLife.12876":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Department of Human Genetics, University of Utah"}]
if doi == "10.7554/eLife.13195":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Microbial Ecology Group, Colorado State University"}]
if doi == "10.7554/eLife.14158":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "data-ro1":
if not dataset.get("title"):
dataset["title"] = u"Bacterial initiation protein"
if dataset.get("id") and dataset["id"] == "data-ro2":
if not dataset.get("title"):
dataset["title"] = u"Bacterial initiation protein in complex with Phage inhibitor protein"
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "dataro3":
if not dataset.get("date"):
dataset["date"] = u"2007"
if doi == "10.7554/eLife.14243":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro2":
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "Tramantano M"}, {"type": "group", "name": "Sun L"}, {"type": "group", "name": "Au C"}, {"type": "group", "name": "Labuz D"}, {"type": "group", "name": "Liu Z"}, {"type": "group", "name": "Chou M"}, {"type": "group", "name": "Shen C"}, {"type": "group", "name": "Luk E"}]
if doi == "10.7554/eLife.16078":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date") == "current manuscript":
dataset["date"] = u"2016"
if doi == "10.7554/eLife.17082":
if json_content.get("used"):
for dataset in json_content["used"]:
if dataset.get("id") and dataset["id"] == "data-ro4":
if not dataset.get("date"):
dataset["date"] = u"2012"
if dataset.get("id") and dataset["id"] == "data-ro5":
if not dataset.get("date"):
dataset["date"] = u"2014"
if dataset.get("id") and dataset["id"] == "data-ro6":
if not dataset.get("date"):
dataset["date"] = u"2014"
if not dataset.get("authors"):
dataset["authors"] = [{"type": "group", "name": "The Cancer Genome Atlas (TCGA)"}]
if doi == "10.7554/eLife.17473":
if json_content.get("generated"):
for dataset in json_content["generated"]:
if dataset.get("id") and dataset["id"] == "dataro1":
if dataset.get("date") and dataset.get("date").startswith("Release date"):
dataset["date"] = u"2016"
return json_content | 0.002816 |
def delete_alarms(deployment_id, alert_id=None, metric_name=None, api_key=None, profile='telemetry'):
'''delete an alert specified by alert_id or if not specified blows away all the alerts
in the current deployment.
Returns (bool success, str message) tuple.
CLI Example:
salt myminion telemetry.delete_alarms rs-ds033197 profile=telemetry
'''
auth = _auth(profile=profile)
if alert_id is None:
# Delete all the alarms associated with this deployment
alert_ids = get_alert_config(deployment_id, api_key=api_key, profile=profile)
else:
alert_ids = [alert_id]
if not alert_ids:
return False, "failed to find alert associated with deployment: {0}".format(deployment_id)
failed_to_delete = []
for id in alert_ids:
delete_url = _get_telemetry_base(profile) + "/alerts/{0}".format(id)
try:
response = requests.delete(delete_url, headers=auth)
if metric_name:
log.debug("updating cache and delete %s key from %s",
metric_name, deployment_id)
_update_cache(deployment_id, metric_name, None)
except requests.exceptions.RequestException as e:
log.error('Delete failed: %s', e)
if response.status_code != 200:
failed_to_delete.append(id)
if failed_to_delete:
return False, "Failed to delete {0} alarms in deployment: {1}" .format(', '.join(failed_to_delete), deployment_id)
return True, "Successfully deleted {0} alerts in deployment: {1}".format(', '.join(alert_ids), deployment_id) | 0.004302 |
def drag(self, point):
"""Update the tracball during a drag.
Parameters
----------
point : (2,) int
The current x and y pixel coordinates of the mouse during a drag.
This will compute a movement for the trackball with the relative
motion between this point and the one marked by down().
"""
point = np.array(point, dtype=np.float32)
dx, dy = point - self._pdown
mindim = 0.3 * np.min(self._size)
target = self._target
x_axis = self._pose[:3, 0].flatten()
y_axis = self._pose[:3, 1].flatten()
z_axis = self._pose[:3, 2].flatten()
eye = self._pose[:3, 3].flatten()
# Interpret drag as a rotation
if self._state == Trackball.STATE_ROTATE:
x_angle = -dx / mindim
x_rot_mat = transformations.rotation_matrix(
x_angle, y_axis, target
)
y_angle = dy / mindim
y_rot_mat = transformations.rotation_matrix(
y_angle, x_axis, target
)
self._n_pose = y_rot_mat.dot(x_rot_mat.dot(self._pose))
# Interpret drag as a roll about the camera axis
elif self._state == Trackball.STATE_ROLL:
center = self._size / 2.0
v_init = self._pdown - center
v_curr = point - center
v_init = v_init / np.linalg.norm(v_init)
v_curr = v_curr / np.linalg.norm(v_curr)
theta = (-np.arctan2(v_curr[1], v_curr[0]) +
np.arctan2(v_init[1], v_init[0]))
rot_mat = transformations.rotation_matrix(theta, z_axis, target)
self._n_pose = rot_mat.dot(self._pose)
# Interpret drag as a camera pan in view plane
elif self._state == Trackball.STATE_PAN:
dx = -dx / (5.0 * mindim) * self._scale
dy = -dy / (5.0 * mindim) * self._scale
translation = dx * x_axis + dy * y_axis
self._n_target = self._target + translation
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._pose)
# Interpret drag as a zoom motion
elif self._state == Trackball.STATE_ZOOM:
radius = np.linalg.norm(eye - target)
ratio = 0.0
if dy > 0:
ratio = np.exp(abs(dy) / (0.5 * self._size[1])) - 1.0
elif dy < 0:
ratio = 1.0 - np.exp(dy / (0.5 * (self._size[1])))
translation = -np.sign(dy) * ratio * radius * z_axis
t_tf = np.eye(4)
t_tf[:3, 3] = translation
self._n_pose = t_tf.dot(self._pose) | 0.000743 |
def handle_scroll(self, *args):
"""When my ``scroll`` changes, tell my deckbuilder how it's scrolled
now.
"""
if 'bar' not in self.ids:
Clock.schedule_once(self.handle_scroll, 0)
return
att = 'deck_{}_hint_offsets'.format(
'x' if self.orientation == 'horizontal' else 'y'
)
offs = list(getattr(self.deckbuilder, att))
if len(offs) <= self.deckidx:
Clock.schedule_once(self.on_scroll, 0)
return
offs[self.deckidx] = self._scroll
setattr(self.deckbuilder, att, offs)
self.deckbuilder._trigger_layout() | 0.003086 |
def _get_definitions(source):
# type: (str) -> Tuple[Dict[str, str], int]
"""Extract a dictionary of arguments and definitions.
Args:
source: The source for a section of a usage string that contains
definitions.
Returns:
A two-tuple containing a dictionary of all arguments and definitions as
well as the length of the longest argument.
"""
max_len = 0
descs = collections.OrderedDict() # type: Dict[str, str]
lines = (s.strip() for s in source.splitlines())
non_empty_lines = (s for s in lines if s)
for line in non_empty_lines:
if line:
arg, desc = re.split(r'\s\s+', line.strip())
arg_len = len(arg)
if arg_len > max_len:
max_len = arg_len
descs[arg] = desc
return descs, max_len | 0.001195 |
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
mostly=None,
result_format=None, include_config=False, catch_exceptions=None, meta=None
):
"""Expect each column entry to match a list of specified data types.
expect_column_values_to_be_in_type_list is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`.
Args:
column (str): \
The column name.
type_list (list of str): \
A list of strings representing the data type that each column should have as entries.
For example, "double integer" refers to an integer with double precision.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly percent of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \
For more detail, see :ref:`meta`.
Returns:
A JSON-serializable expectation result object.
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
Warning:
expect_column_values_to_be_in_type_list is slated for major changes in future versions of great_expectations.
As of v0.3, great_expectations is exclusively based on pandas, which handles typing in its own peculiar way.
Future versions of great_expectations will allow for Datasets in SQL, spark, etc.
When we make that change, we expect some breaking changes in parts of the codebase that are based strongly on pandas notions of typing.
See also:
expect_column_values_to_be_of_type
"""
raise NotImplementedError | 0.006287 |
def __cloudflare_request(self, *, account, path, args=None):
"""Helper function to interact with the CloudFlare API.
Args:
account (:obj:`CloudFlareAccount`): CloudFlare Account object
path (`str`): URL endpoint to communicate with
args (:obj:`dict` of `str`: `str`): A dictionary of arguments for the endpoint to consume
Returns:
`dict`
"""
if not args:
args = {}
if not self.cloudflare_initialized[account.account_id]:
self.cloudflare_session[account.account_id] = requests.Session()
self.cloudflare_session[account.account_id].headers.update({
'X-Auth-Email': account.email,
'X-Auth-Key': account.api_key,
'Content-Type': 'application/json'
})
self.cloudflare_initialized[account.account_id] = True
if 'per_page' not in args:
args['per_page'] = 100
response = self.cloudflare_session[account.account_id].get(account.endpoint + path, params=args)
if response.status_code != 200:
raise CloudFlareError('Request failed: {}'.format(response.text))
return response.json() | 0.003239 |
def AddFrequency(self, start_time, end_time, headway_secs, exact_times=0,
problem_reporter=problems_module.default_problem_reporter):
"""Adds a period to this trip during which the vehicle travels
at regular intervals (rather than specifying exact times for each stop).
Args:
start_time: The time at which this headway period starts, either in
numerical seconds since midnight or as "HH:MM:SS" since midnight.
end_time: The time at which this headway period ends, either in
numerical seconds since midnight or as "HH:MM:SS" since midnight.
This value should be larger than start_time.
headway_secs: The amount of time, in seconds, between occurences of
this trip.
exact_times: If 1, indicates that frequency trips should be scheduled
exactly as determined by their start time and headway. Default is 0.
problem_reporter: Optional parameter that can be used to select
how any errors in the other input parameters will be reported.
Returns:
None
"""
if start_time == None or start_time == '': # 0 is OK
problem_reporter.MissingValue('start_time')
return
if isinstance(start_time, basestring):
try:
start_time = util.TimeToSecondsSinceMidnight(start_time)
except problems_module.Error:
problem_reporter.InvalidValue('start_time', start_time)
return
elif start_time < 0:
problem_reporter.InvalidValue('start_time', start_time)
if end_time == None or end_time == '':
problem_reporter.MissingValue('end_time')
return
if isinstance(end_time, basestring):
try:
end_time = util.TimeToSecondsSinceMidnight(end_time)
except problems_module.Error:
problem_reporter.InvalidValue('end_time', end_time)
return
elif end_time < 0:
problem_reporter.InvalidValue('end_time', end_time)
return
if not headway_secs:
problem_reporter.MissingValue('headway_secs')
return
try:
headway_secs = int(headway_secs)
except ValueError:
problem_reporter.InvalidValue('headway_secs', headway_secs)
return
if headway_secs <= 0:
problem_reporter.InvalidValue('headway_secs', headway_secs)
return
if end_time <= start_time:
problem_reporter.InvalidValue('end_time', end_time,
'should be greater than start_time')
if not exact_times:
exact_times = 0
if exact_times not in (0, 1):
problem_reporter.InvalidValue('exact_times', exact_times,
'Should be 0 (no fixed schedule) or 1 (fixed and regular schedule)')
self._headways.append((start_time, end_time, headway_secs, exact_times)) | 0.009804 |
def order_by(self, order_by: Union[set, str]):
"""Update order_by setting for filter set"""
clone = self._clone()
if isinstance(order_by, str):
order_by = {order_by}
clone._order_by = clone._order_by.union(order_by)
return clone | 0.007092 |
def netgetdata(url, maxtry=3, timeout=10):
"""
Get content of a file via a URL.
Parameters
----------
url : string
URL of the file to be downloaded
maxtry : int, optional (default 3)
Maximum number of download retries
timeout : int, optional (default 10)
Timeout in seconds for blocking operations
Returns
-------
str : io.BytesIO
Buffered I/O stream
Raises
------
urlerror.URLError (urllib2.URLError in Python 2,
urllib.error.URLError in Python 3)
If the file cannot be downloaded
"""
err = ValueError('maxtry parameter should be greater than zero')
for ntry in range(maxtry):
try:
rspns = urlrequest.urlopen(url, timeout=timeout)
cntnt = rspns.read()
break
except urlerror.URLError as e:
err = e
if not isinstance(e.reason, socket.timeout):
raise
else:
raise err
return io.BytesIO(cntnt) | 0.001 |
def get_my_data(self, session=None):
"""
Returns a list of data descriptions for data which has been purchased by the signed in user.
:param requests.session session: Requests session object
:rtype: dict
"""
params = clean_locals(locals())
method = 'GetMyData'
(response, elapsed_time) = self.request(method, params, session)
return response | 0.007229 |
def url_to_destination_params(url):
"""Convert a legacy runner URL to a job destination
>>> params_simple = url_to_destination_params("http://localhost:8913/")
>>> params_simple["url"]
'http://localhost:8913/'
>>> params_simple["private_token"] is None
True
>>> advanced_url = "https://[email protected]:8914/managers/longqueue"
>>> params_advanced = url_to_destination_params(advanced_url)
>>> params_advanced["url"]
'https://example.com:8914/managers/longqueue/'
>>> params_advanced["private_token"]
'1234x'
>>> runner_url = "pulsar://http://localhost:8913/"
>>> runner_params = url_to_destination_params(runner_url)
>>> runner_params['url']
'http://localhost:8913/'
"""
if url.startswith("pulsar://"):
url = url[len("pulsar://"):]
if not url.endswith("/"):
url += "/"
# Check for private token embedded in the URL. A URL of the form
# https://moo@cow:8913 will try to contact https://cow:8913
# with a private key of moo
private_token_format = "https?://(.*)@.*/?"
private_token_match = match(private_token_format, url)
private_token = None
if private_token_match:
private_token = private_token_match.group(1)
url = url.replace("%s@" % private_token, '', 1)
destination_args = {"url": url,
"private_token": private_token}
return destination_args | 0.000703 |
def extend_reservation(request, user_id, days=7):
''' Allows staff to extend the reservation on a given user's cart.
'''
user = User.objects.get(id=int(user_id))
cart = CartController.for_user(user)
cart.extend_reservation(datetime.timedelta(days=days))
return redirect(request.META["HTTP_REFERER"]) | 0.003077 |
def plot_lnp(fignum, s, datablock, fpars, direction_type_key):
"""
plots lines and planes on a great circle with alpha 95 and mean
Parameters
_________
fignum : number of plt.figure() object
datablock : nested list of dictionaries with keys in 3.0 or 2.5 format
3.0 keys: dir_dec, dir_inc, dir_tilt_correction = [-1,0,100], direction_type_key =['p','l']
2.5 keys: dec, inc, tilt_correction = [-1,0,100],direction_type_key =['p','l']
fpars : Fisher parameters calculated by, e.g., pmag.dolnp() or pmag.dolnp3_0()
direction_type_key : key for dictionary direction_type ('specimen_direction_type')
Effects
_______
plots the site level figure
"""
# make the stereonet
plot_net(fignum)
#
# plot on the data
#
dec_key, inc_key, tilt_key = 'dec', 'inc', 'tilt_correction'
if 'dir_dec' in datablock[0].keys(): # this is data model 3.0
dec_key, inc_key, tilt_key = 'dir_dec', 'dir_inc', 'dir_tilt_correction'
coord = datablock[0][tilt_key]
title = s
if coord == '-1':
title = title + ": specimen coordinates"
if coord == '0':
title = title + ": geographic coordinates"
if coord == '100':
title = title + ": tilt corrected coordinates"
DIblock, GCblock = [], []
for plotrec in datablock:
if plotrec[direction_type_key] == 'p': # direction is pole to plane
GCblock.append((float(plotrec[dec_key]), float(plotrec[inc_key])))
else: # assume direction is a directed line
DIblock.append((float(plotrec[dec_key]), float(plotrec[inc_key])))
if len(DIblock) > 0:
plot_di(fignum, DIblock) # plot directed lines
if len(GCblock) > 0:
for pole in GCblock:
plot_circ(fignum, pole, 90., 'g') # plot directed lines
#
# put on the mean direction
#
x, y = [], []
XY = pmag.dimap(float(fpars["dec"]), float(fpars["inc"]))
x.append(XY[0])
y.append(XY[1])
plt.figure(num=fignum)
plt.scatter(x, y, marker='d', s=80, c='g')
plt.title(title)
#
# get the alpha95
#
Xcirc, Ycirc = [], []
Da95, Ia95 = pmag.circ(float(fpars["dec"]), float(
fpars["inc"]), float(fpars["alpha95"]))
for k in range(len(Da95)):
XY = pmag.dimap(Da95[k], Ia95[k])
Xcirc.append(XY[0])
Ycirc.append(XY[1])
plt.plot(Xcirc, Ycirc, 'g') | 0.002533 |
def check_attr_dimension(attr_id, **kwargs):
"""
Check that the dimension of the resource attribute data is consistent
with the definition of the attribute.
If the attribute says 'volume', make sure every dataset connected
with this attribute via a resource attribute also has a dimension
of 'volume'.
"""
attr_i = _get_attr(attr_id)
datasets = db.DBSession.query(Dataset).filter(Dataset.id == ResourceScenario.dataset_id,
ResourceScenario.resource_attr_id == ResourceAttr.id,
ResourceAttr.attr_id == attr_id).all()
bad_datasets = []
for d in datasets:
if attr_i.dimension_id is None and d.unit is not None or \
attr_i.dimension_id is not None and d.unit is None or \
units.get_dimension_by_unit_id(d.unit_id) != attr_i.dimension_id:
# If there is an inconsistency
bad_datasets.append(d.id)
if len(bad_datasets) > 0:
raise HydraError("Datasets %s have a different dimension_id to attribute %s"%(bad_datasets, attr_id))
return 'OK' | 0.008881 |
def stretch_linear(self, ch_nb, cutoffs=(0.005, 0.005)):
"""Stretch linearly the contrast of the current image on channel
*ch_nb*, using *cutoffs* for left and right trimming.
"""
logger.debug("Perform a linear contrast stretch.")
if((self.channels[ch_nb].size ==
np.ma.count_masked(self.channels[ch_nb])) or
self.channels[ch_nb].min() == self.channels[ch_nb].max()):
logger.warning("Nothing to stretch !")
return
arr = self.channels[ch_nb]
carr = arr.compressed()
logger.debug("Calculate the histogram percentiles: ")
logger.debug("Left and right percentiles: " +
str(cutoffs[0] * 100) + " " + str(cutoffs[1] * 100))
left, right = np.percentile(
carr, [cutoffs[0] * 100, 100. - cutoffs[1] * 100])
delta_x = (right - left)
logger.debug("Interval: left=%f, right=%f width=%f",
left, right, delta_x)
if delta_x > 0.0:
self.channels[ch_nb] = np.ma.array((arr - left) / delta_x,
mask=arr.mask)
else:
logger.warning("Unable to make a contrast stretch!") | 0.001616 |
def asset_url_for(self, asset):
"""
Lookup the hashed asset path of a file name unless it starts with
something that resembles a web address, then take it as is.
:param asset: A logical path to an asset
:type asset: str
:return: Asset path or None if not found
"""
if '//' in asset:
return asset
if asset not in self.assets:
return None
return '{0}{1}'.format(self.assets_url, self.assets[asset]) | 0.003976 |
def get(self, key, default=None, type=None):
"""Returns the first value for a key.
If `type` is not None, the value will be converted by calling
`type` with the value as argument. If type() raises `ValueError`, it
will be treated as if the value didn't exist, and `default` will be
returned instead.
"""
try:
value = self[key]
if type is not None:
return type(value)
return value
except (KeyError, ValueError):
return default | 0.00361 |
def _should_retry(resp):
"""Given a urlfetch response, decide whether to retry that request."""
return (resp.status_code == httplib.REQUEST_TIMEOUT or
(resp.status_code >= 500 and
resp.status_code < 600)) | 0.0131 |
def set_env(envName, envValue):
"""
设置环境变量
:params envName: env名字
:params envValue: 值
"""
os.environ[envName] = os.environ[envName] + ':' + envValue | 0.005814 |
def plot_resp_diff(signal, rect_signal, sample_rate):
"""
Function design to generate a Bokeh figure containing the evolution of RIP signal, when
respiration was suspended for a long period, the rectangular signal that defines the
stages of inhalation and exhalation and the first derivative of the RIP signal.
Applied in the Notebook "Particularities of Inductive Respiration (RIP) Sensor ".
----------
Parameters
----------
signal : list
List with the acquired RIP signal.
rect_signal : list
Data samples of the rectangular signal that identifies inhalation and exhalation
segments.
sample_rate : int
Sampling rate of acquisition.
"""
signal = numpy.array(signal) - numpy.average(signal)
rect_signal = numpy.array(rect_signal)
time = numpy.linspace(0, len(signal) / sample_rate, len(signal))
signal_diff = numpy.diff(signal)
# Inhalation and Exhalation time segments.
# [Signal Binarization]
rect_signal_rev = rect_signal - numpy.average(rect_signal)
inhal_segments = numpy.where(rect_signal_rev >= 0)[0]
exhal_segments = numpy.where(rect_signal_rev < 0)[0]
rect_signal_rev[inhal_segments] = numpy.max(rect_signal_rev)
rect_signal_rev[exhal_segments] = numpy.min(rect_signal_rev)
# Normalized Data.
norm_signal = signal / numpy.max(signal)
norm_rect_signal = rect_signal_rev / numpy.max(rect_signal_rev)
norm_signal_diff = signal_diff / numpy.max(signal_diff)
# Smoothed Data.
smooth_diff = smooth(signal_diff, int(sample_rate / 10))
smooth_norm_diff = smooth(norm_signal_diff, int(sample_rate / 10))
# Scaled Rectangular Signal.
scaled_rect_signal = (rect_signal_rev * numpy.max(smooth_diff)) / numpy.max(rect_signal_rev)
# [Signal Differentiation]
diff_rect_signal = numpy.diff(rect_signal_rev)
inhal_begin = numpy.where(diff_rect_signal > 0)[0]
inhal_end = numpy.where(diff_rect_signal < 0)[0]
exhal_begin = inhal_end
exhal_end = inhal_begin[1:]
# Generation of a Bokeh figure where data will be plotted.
figure_list = plot([list([0]), list([0]), list([0])],
[list([0]), list([0]), list([0])], gridPlot=True, gridLines=3,
gridColumns=1, showPlot=False)
# Edition of Bokeh figure (title, axes labels...)
# [Top Figure]
title = Title()
title.text = "RIP Signal and Respiration Cycles"
figure_list[0].title = title
figure_list[0].line(time, signal, **opensignals_kwargs("line"))
# [Plot of inhalation and exhalation segments]
_inhal_exhal_segments(figure_list[0], list(time), list(rect_signal_rev), inhal_begin, inhal_end,
exhal_begin, exhal_end)
figure_list[0].yaxis.axis_label = "Raw Data (without DC component)"
# [Middle Figure]
title = Title()
title.text = "1st Derivative of RIP Signal and Respiration Cycles"
figure_list[1].title = title
figure_list[1].line(time[1:], smooth_diff, **opensignals_kwargs("line"))
# [Plot of inhalation and exhalation segments]
_inhal_exhal_segments(figure_list[1], list(time), list(scaled_rect_signal), inhal_begin,
inhal_end, exhal_begin, exhal_end)
figure_list[1].yaxis.axis_label = "Raw Differential Data"
# [Bottom Figure]
title = Title()
title.text = "RIP Signal and 1st Derivative (Normalized)"
figure_list[2].title = title
figure_list[2].line(time, norm_signal, **opensignals_kwargs("line"))
figure_list[2].line(time[1:], smooth_norm_diff, legend="RIP 1st Derivative", **opensignals_kwargs("line"))
# [Plot of inhalation and exhalation segments]
_inhal_exhal_segments(figure_list[2], list(time), list(norm_rect_signal), inhal_begin,
inhal_end, exhal_begin, exhal_end)
figure_list[2].yaxis.axis_label = "Normalized Data"
figure_list[2].xaxis.axis_label = "Time (s)"
grid_plot_ref = gridplot([[figure_list[0]], [figure_list[1]], [figure_list[2]]],
**opensignals_kwargs("gridplot"))
show(grid_plot_ref) | 0.00361 |
def get_properties(obj):
"""
Get values of all properties in specified object and its subobjects and returns them as a map.
The object can be a user defined object, map or array.
Returned properties correspondently are object properties, map key-pairs or array elements with their indexes.
:param obj: an object to get properties from.
:return: a map, containing the names of the object's properties and their values.
"""
properties = {}
if obj != None:
cycle_detect = []
RecursiveObjectReader._perform_get_properties(obj, None, properties, cycle_detect)
return properties | 0.011611 |
def rotate_shift_mask_simplifier(a, b):
"""
Handles the following case:
((A << a) | (A >> (_N - a))) & mask, where
A being a BVS,
a being a integer that is less than _N,
_N is either 32 or 64, and
mask can be evaluated to 0xffffffff (64-bit) or 0xffff (32-bit) after reversing the rotate-shift
operation.
It will be simplified to:
(A & (mask >>> a)) <<< a
"""
# is the second argument a BVV?
if b.op != 'BVV':
return None
# is it a rotate-shift?
if a.op != '__or__' or len(a.args) != 2:
return None
a_0, a_1 = a.args
if a_0.op != '__lshift__':
return None
if a_1.op != 'LShR':
return None
a_00, a_01 = a_0.args
a_10, a_11 = a_1.args
if not a_00 is a_10:
return None
if a_01.op != 'BVV' or a_11.op != 'BVV':
return None
lshift_ = a_01.args[0]
rshift_ = a_11.args[0]
bitwidth = lshift_ + rshift_
if bitwidth not in (32, 64):
return None
# is the second argument a mask?
# Note: the following check can be further loosen if we want to support more masks.
if bitwidth == 32:
m = ((b.args[0] << rshift_) & 0xffffffff) | (b.args[0] >> lshift_)
if m != 0xffff:
return None
else: # bitwidth == 64
m = ((b.args[0] << rshift_) & 0xffffffffffffffff) | (b.args[0] >> lshift_)
if m != 0xffffffff:
return None
# Show our power!
masked_a = (a_00 & m)
expr = (masked_a << lshift_) | (masked_a >> rshift_)
return expr | 0.003911 |
def _preprocess_scan_params(self, xml_params):
""" Processes the scan parameters. """
params = {}
for param in xml_params:
params[param.tag] = param.text or ''
# Set default values.
for key in self.scanner_params:
if key not in params:
params[key] = self.get_scanner_param_default(key)
if self.get_scanner_param_type(key) == 'selection':
params[key] = params[key].split('|')[0]
# Validate values.
for key in params:
param_type = self.get_scanner_param_type(key)
if not param_type:
continue
if param_type in ['integer', 'boolean']:
try:
params[key] = int(params[key])
except ValueError:
raise OSPDError('Invalid %s value' % key, 'start_scan')
if param_type == 'boolean':
if params[key] not in [0, 1]:
raise OSPDError('Invalid %s value' % key, 'start_scan')
elif param_type == 'selection':
selection = self.get_scanner_param_default(key).split('|')
if params[key] not in selection:
raise OSPDError('Invalid %s value' % key, 'start_scan')
if self.get_scanner_param_mandatory(key) and params[key] == '':
raise OSPDError('Mandatory %s value is missing' % key,
'start_scan')
return params | 0.001966 |
def wait(self, *args, **kwargs):
"""Wait for the completion event to be set."""
if _debug: IOCB._debug("wait(%d) %r %r", self.ioID, args, kwargs)
# waiting from a non-daemon thread could be trouble
return self.ioComplete.wait(*args, **kwargs) | 0.010909 |
def fw_update(self, data, fw_name=None):
"""Top level FW update function. """
LOG.debug("FW Update %s", data)
self._fw_update(fw_name, data) | 0.012195 |
def decr(self, key, value, default=0, time=100):
"""
Decrement a key, if it exists, returns its actual value, if it doesn't, return 0.
Minimum value of decrement return is 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be decremented
:type value: int
:param default: Default value if key does not exist.
:type default: int
:param time: Time in seconds to expire key.
:type time: int
:return: Actual value of the key on server
:rtype: int
"""
return self._incr_decr('decr', key, value, default, time) | 0.004608 |
def get_all_units(self, params=None):
"""
Get all units
This will iterate over all pages until it gets all elements.
So if the rate limit exceeded it will throw an Exception and you will get nothing
:param params: search params
:return: list
"""
if not params:
params = {}
return self._iterate_through_pages(self.get_units_per_page, resource=UNITS, **{'params': params}) | 0.008791 |
def set_l2cap_mtu (sock, mtu):
"""set_l2cap_mtu (sock, mtu)
Adjusts the MTU for the specified L2CAP socket. This method needs to be
invoked on both sides of the connection for it to work! The default mtu
that all L2CAP connections start with is 672 bytes.
mtu must be between 48 and 65535, inclusive.
"""
options = get_l2cap_options (sock)
options[0] = options[1] = mtu
set_l2cap_options (sock, options) | 0.009029 |
def get_document(
self,
collection_id, ref=None, mimetype="application/tei+xml, application/xml"):
""" Make a navigation request on the DTS API
:param collection_id: Id of the collection
:param ref: If ref is a tuple, it is treated as a range. String or int are treated as single ref
:param mimetype: Media type to request
:return: Response
:rtype: requests.Response
"""
parameters = {
"id": collection_id
}
_parse_ref_parameters(parameters, ref)
return self.call(
"documents",
parameters,
mimetype=mimetype
) | 0.005891 |
def rm(ctx, dataset, kwargs):
"removes the dataset's folder if it exists"
kwargs = parse_kwargs(kwargs)
data(dataset, **ctx.obj).rm(**kwargs) | 0.006494 |
def _create_deserializer(self) -> JsonObjectDeserializer:
"""
Creates a deserializer that is to be used by this decoder.
:return: the deserializer
"""
if self._deserializer_cache is None:
deserializer_cls = type(
"%sInternalDeserializer" % type(self),
(JsonObjectDeserializer,),
{
"_JSON_ENCODER_ARGS": self._args,
"_JSON_ENCODER_KWARGS": self._kwargs
}
)
self._deserializer_cache = deserializer_cls(self._get_property_mappings(), self._get_deserializable_cls())
return self._deserializer_cache | 0.004399 |
def local_subset(self, *args, **kwargs):
'''
Run :ref:`execution modules <all-salt.modules>` against subsets of minions
.. versionadded:: 2016.3.0
Wraps :py:meth:`salt.client.LocalClient.cmd_subset`
'''
local = salt.client.get_local_client(mopts=self.opts)
return local.cmd_subset(*args, **kwargs) | 0.008451 |
def _model_unique(ins):
""" Get unique constraints info
:type ins: sqlalchemy.orm.mapper.Mapper
:rtype: list[tuple[str]]
"""
unique = []
for t in ins.tables:
for c in t.constraints:
if isinstance(c, UniqueConstraint):
unique.append(tuple(col.key for col in c.columns))
return unique | 0.002882 |
def get_json(self):
"""Create JSON data for iSCSI target.
:returns: JSON data for iSCSI target as follows:
{
"DHCPUsage":{
},
"Name":{
},
"IPv4Address":{
},
"PortNumber":{
},
"BootLUN":{
},
"AuthenticationMethod":{
},
"ChapUserName":{
},
"ChapSecret":{
},
"MutualChapSecret":{
}
}
"""
json = {
'DHCPUsage': self.dhcp_usage,
'AuthenticationMethod': self.auth_method,
}
if not self.dhcp_usage:
json['Name'] = self.iqn
json['IPv4Address'] = self.ip
json['PortNumber'] = self.port
json['BootLUN'] = self.lun
if self.chap_user:
json['ChapUserName'] = self.chap_user
if self.chap_secret:
json['ChapSecret'] = self.chap_secret
if self.mutual_chap_secret:
json['MutualChapSecret'] = self.mutual_chap_secret
return json | 0.00165 |
def discrete_rainbow(N=7, cmap=cm.Set1, usepreset=True, shuffle=False, \
plot=False):
"""
Return a discrete colormap and the set of colors.
modified from
<http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations>
cmap: colormap instance, eg. cm.jet.
N: Number of colors.
Example
>>> x = resize(arange(100), (5,100))
>>> djet = cmap_discretize(cm.jet, 5)
>>> imshow(x, cmap=djet)
See available matplotlib colormaps at:
<http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/>
If N>20 the sampled colors might not be very distinctive.
If you want to error and try anyway, set usepreset=False
"""
import random
from scipy import interpolate
if usepreset:
if 0 < N <= 5:
cmap = cm.gist_rainbow
elif N <= 20:
cmap = cm.Set1
else:
sys.exit(discrete_rainbow.__doc__)
cdict = cmap._segmentdata.copy()
# N colors
colors_i = np.linspace(0,1.,N)
# N+1 indices
indices = np.linspace(0,1.,N+1)
rgbs = []
for key in ('red','green','blue'):
# Find the N colors
D = np.array(cdict[key])
I = interpolate.interp1d(D[:,0], D[:,1])
colors = I(colors_i)
rgbs.append(colors)
# Place these colors at the correct indices.
A = np.zeros((N+1,3), float)
A[:,0] = indices
A[1:,1] = colors
A[:-1,2] = colors
# Create a tuple for the dictionary.
L = []
for l in A:
L.append(tuple(l))
cdict[key] = tuple(L)
palette = zip(*rgbs)
if shuffle:
random.shuffle(palette)
if plot:
print_colors(palette)
# Return (colormap object, RGB tuples)
return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024), palette | 0.017993 |
async def modify(self, **kwargs):
'''
Corresponds to PATCH request with a resource identifier, modifying a single document in the database
'''
try:
pk = self.pk_type(kwargs['pk'])
# modify is a class method on MongoCollectionMixin
result = await self._meta.object_class.modify(self.db, key=pk, data=self.data)
if result is None:
raise NotFound('Object matching the given {} was not found'.format(self.pk))
return await result.serialize()
except Exception as ex:
logger.exception(ex)
raise BadRequest(ex) | 0.007776 |
def stream_subsegments(self):
"""
Stream all closed subsegments to the daemon
and remove reference to the parent segment.
No-op for a not sampled segment.
"""
segment = self.current_segment()
if self.streaming.is_eligible(segment):
self.streaming.stream(segment, self._stream_subsegment_out) | 0.005556 |
def read_mnist_labels(filename):
"""Read MNIST labels from the original ubyte file format.
Parameters
----------
filename : str
Filename/path from which to read labels.
Returns
-------
labels : :class:`~numpy.ndarray`, shape (nlabels, 1)
A one-dimensional unsigned byte array containing the
labels as integers.
"""
with gzip.open(filename, 'rb') as f:
magic, _ = struct.unpack('>ii', f.read(8))
if magic != MNIST_LABEL_MAGIC:
raise ValueError("Wrong magic number reading MNIST label file")
array = numpy.frombuffer(f.read(), dtype='uint8')
array = array.reshape(array.size, 1)
return array | 0.001437 |
def cli(ctx, packages, all, list, force, platform):
"""Install packages."""
if packages:
for package in packages:
Installer(package, platform, force).install()
elif all: # pragma: no cover
packages = Resources(platform).packages
for package in packages:
Installer(package, platform, force).install()
elif list:
Resources(platform).list_packages(installed=True, notinstalled=True)
else:
click.secho(ctx.get_help()) | 0.002004 |
def mainloop(self):
""" The main loop.
"""
# Print usage if not enough args or bad options
if len(self.args) < 2:
self.parser.error("No event type and info hash given!")
if sys.stdin.isatty():
self.options.no_fork = True | 0.007018 |
def format_date(date, gmt_offset=0, relative=True, shorter=False, full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
From tornado
"""
if not date:
return '-'
if isinstance(date, float) or isinstance(date, int):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
format = None
if not full_format:
ret_, fff_format = fix_full_format(days, seconds, relative, shorter, local_date, local_yesterday)
format = fff_format
if ret_:
return format
else:
format = format
if format is None:
format = "%(month_name)s %(day)s, %(year)s" if shorter else \
"%(month_name)s %(day)s, %(year)s at %(time)s"
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
return format % {
"month_name": local_date.strftime('%b'),
"weekday": local_date.strftime('%A'),
"day": str(local_date.day),
"year": str(local_date.year),
"month": local_date.month,
"time": str_time
} | 0.001435 |
def has_all_nonzero_segment_lengths(neuron, threshold=0.0):
'''Check presence of neuron segments with length not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a segment length is considered to
be non-zero
Returns:
CheckResult with result including list of (section_id, segment_id)
of zero length segments
'''
bad_ids = []
for sec in _nf.iter_sections(neuron):
p = sec.points
for i, s in enumerate(zip(p[:-1], p[1:])):
if segment_length(s) <= threshold:
bad_ids.append((sec.id, i))
return CheckResult(len(bad_ids) == 0, bad_ids) | 0.001427 |
def generate_items(self):
"""
Means array is valid only when all items are valid by this definition.
.. code-block:: python
{
'items': [
{'type': 'integer'},
{'type': 'string'},
],
}
Valid arrays are those with integers or strings, nothing else.
Since draft 06 definition can be also boolean. True means nothing, False
means everything is invalid.
"""
items_definition = self._definition['items']
if items_definition is True:
return
self.create_variable_is_list()
with self.l('if {variable}_is_list:'):
self.create_variable_with_length()
if items_definition is False:
with self.l('if {variable}:'):
self.l('raise JsonSchemaException("{name} must not be there")')
elif isinstance(items_definition, list):
for idx, item_definition in enumerate(items_definition):
with self.l('if {variable}_len > {}:', idx):
self.l('{variable}__{0} = {variable}[{0}]', idx)
self.generate_func_code_block(
item_definition,
'{}__{}'.format(self._variable, idx),
'{}[{}]'.format(self._variable_name, idx),
)
if isinstance(item_definition, dict) and 'default' in item_definition:
self.l('else: {variable}.append({})', repr(item_definition['default']))
if 'additionalItems' in self._definition:
if self._definition['additionalItems'] is False:
self.l('if {variable}_len > {}: raise JsonSchemaException("{name} must contain only specified items")', len(items_definition))
else:
with self.l('for {variable}_x, {variable}_item in enumerate({variable}[{0}:], {0}):', len(items_definition)):
self.generate_func_code_block(
self._definition['additionalItems'],
'{}_item'.format(self._variable),
'{}[{{{}_x}}]'.format(self._variable_name, self._variable),
)
else:
if items_definition:
with self.l('for {variable}_x, {variable}_item in enumerate({variable}):'):
self.generate_func_code_block(
items_definition,
'{}_item'.format(self._variable),
'{}[{{{}_x}}]'.format(self._variable_name, self._variable),
) | 0.003901 |
def syzygyJD(jd):
""" Finds the latest new or full moon and
returns the julian date of that event.
"""
sun = swe.sweObjectLon(const.SUN, jd)
moon = swe.sweObjectLon(const.MOON, jd)
dist = angle.distance(sun, moon)
# Offset represents the Syzygy type.
# Zero is conjunction and 180 is opposition.
offset = 180 if (dist >= 180) else 0
while abs(dist) > MAX_ERROR:
jd = jd - dist / 13.1833 # Moon mean daily motion
sun = swe.sweObjectLon(const.SUN, jd)
moon = swe.sweObjectLon(const.MOON, jd)
dist = angle.closestdistance(sun - offset, moon)
return jd | 0.007849 |
def vad_collector(self, padding_ms=300, ratio=0.75, frames=None):
"""Generator that yields series of consecutive audio frames comprising each utterence, separated by yielding a single None.
Determines voice activity by ratio of frames in padding_ms. Uses a buffer to include padding_ms prior to being triggered.
Example: (frame, ..., frame, None, frame, ..., frame, None, ...)
|---utterence---| |---utterence---|
"""
if frames is None: frames = self.frame_generator()
num_padding_frames = padding_ms // self.frame_duration_ms
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
for frame in frames:
is_speech = self.vad.is_speech(frame, self.sample_rate)
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
if num_voiced > ratio * ring_buffer.maxlen:
triggered = True
for f, s in ring_buffer:
yield f
ring_buffer.clear()
else:
yield frame
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
if num_unvoiced > ratio * ring_buffer.maxlen:
triggered = False
yield None
ring_buffer.clear() | 0.003911 |
def __get_percpu(self):
"""Update and/or return the per CPU list using the psutil library."""
# Never update more than 1 time per cached_time
if self.timer_percpu.finished():
self.percpu_percent = []
for cpu_number, cputimes in enumerate(psutil.cpu_times_percent(interval=0.0, percpu=True)):
cpu = {'key': self.get_key(),
'cpu_number': cpu_number,
'total': round(100 - cputimes.idle, 1),
'user': cputimes.user,
'system': cputimes.system,
'idle': cputimes.idle}
# The following stats are for API purposes only
if hasattr(cputimes, 'nice'):
cpu['nice'] = cputimes.nice
if hasattr(cputimes, 'iowait'):
cpu['iowait'] = cputimes.iowait
if hasattr(cputimes, 'irq'):
cpu['irq'] = cputimes.irq
if hasattr(cputimes, 'softirq'):
cpu['softirq'] = cputimes.softirq
if hasattr(cputimes, 'steal'):
cpu['steal'] = cputimes.steal
if hasattr(cputimes, 'guest'):
cpu['guest'] = cputimes.guest
if hasattr(cputimes, 'guest_nice'):
cpu['guest_nice'] = cputimes.guest_nice
# Append new CPU to the list
self.percpu_percent.append(cpu)
# Reset timer for cache
self.timer_percpu = Timer(self.cached_time)
return self.percpu_percent | 0.001846 |
def _fill(self, direction, limit=None):
"""
Shared function for `pad` and `backfill` to call Cython method.
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit) | 0.00172 |
def _recursively_replace_dict_for_pretty_dict(x):
"""Recursively replace `dict`s with `_PrettyDict`."""
# We use "PrettyDict" because collections.OrderedDict repr/str has the word
# "OrderedDict" in it. We only want to print "OrderedDict" if in fact the
# input really is an OrderedDict.
if isinstance(x, dict):
return _PrettyDict({
k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
if (isinstance(x, collections.Sequence) and
not isinstance(x, six.string_types)):
args = (_recursively_replace_dict_for_pretty_dict(x_) for x_ in x)
is_named_tuple = (isinstance(x, tuple) and
hasattr(x, "_asdict") and
hasattr(x, "_fields"))
return type(x)(*args) if is_named_tuple else type(x)(args)
if isinstance(x, collections.Mapping):
return type(x)(**{k: _recursively_replace_dict_for_pretty_dict(v)
for k, v in x.items()})
return x | 0.010352 |
def save_ids(f, self, *args, **kwargs):
"""Keep our history and outstanding attributes up to date after a method call."""
n_previous = len(self.client.history)
try:
ret = f(self, *args, **kwargs)
finally:
nmsgs = len(self.client.history) - n_previous
msg_ids = self.client.history[-nmsgs:]
self.history.extend(msg_ids)
map(self.outstanding.add, msg_ids)
return ret | 0.004717 |
def set_data_and_metadata(self, data_and_metadata, data_modified=None):
"""Sets the underlying data and data-metadata to the data_and_metadata.
Note: this does not make a copy of the data.
"""
self.increment_data_ref_count()
try:
if data_and_metadata:
data = data_and_metadata.data
data_shape_and_dtype = data_and_metadata.data_shape_and_dtype
intensity_calibration = data_and_metadata.intensity_calibration
dimensional_calibrations = data_and_metadata.dimensional_calibrations
metadata = data_and_metadata.metadata
timestamp = data_and_metadata.timestamp
data_descriptor = data_and_metadata.data_descriptor
timezone = data_and_metadata.timezone or Utility.get_local_timezone()
timezone_offset = data_and_metadata.timezone_offset or Utility.TimezoneMinutesToStringConverter().convert(Utility.local_utcoffset_minutes())
new_data_and_metadata = DataAndMetadata.DataAndMetadata(self.__load_data, data_shape_and_dtype, intensity_calibration, dimensional_calibrations, metadata, timestamp, data, data_descriptor, timezone, timezone_offset)
else:
new_data_and_metadata = None
self.__set_data_metadata_direct(new_data_and_metadata, data_modified)
if self.__data_and_metadata is not None:
if self.persistent_object_context and not self.persistent_object_context.is_write_delayed(self):
self.persistent_object_context.write_external_data(self, "data", self.__data_and_metadata.data)
self.__data_and_metadata.unloadable = True
finally:
self.decrement_data_ref_count() | 0.005006 |
def _create_serializer_of_type_with_cache(self, serializer_type: Type) -> "Serializer":
"""
Creates a deserializer of the given type, exploiting a cache.
:param serializer_type: the type of deserializer to create
:return: the created serializer
"""
if serializer_type not in self._serializers_cache:
self._serializers_cache[serializer_type] = self._create_serializer_of_type(serializer_type)
return self._serializers_cache[serializer_type] | 0.00789 |
def main(*args):
"""Launch the main routine."""
parser = argparse.ArgumentParser()
parser.add_argument("action",
help="create, check, run, make-nb, or run-nb")
parser.add_argument("--directory", "-dir", default=os.getcwd(),
help="path to directory with a .sciunit file")
parser.add_argument("--stop", "-s", default=True,
help="stop and raise errors, halting the program")
parser.add_argument("--tests", "-t", default=False,
help="runs tests instead of suites")
if args:
args = parser.parse_args(args)
else:
args = parser.parse_args()
file_path = os.path.join(args.directory, '.sciunit')
config = None
if args.action == 'create':
create(file_path)
elif args.action == 'check':
config = parse(file_path, show=True)
print("\nNo configuration errors reported.")
elif args.action == 'run':
config = parse(file_path)
run(config, path=args.directory,
stop_on_error=args.stop, just_tests=args.tests)
elif args.action == 'make-nb':
config = parse(file_path)
make_nb(config, path=args.directory,
stop_on_error=args.stop, just_tests=args.tests)
elif args.action == 'run-nb':
config = parse(file_path)
run_nb(config, path=args.directory)
else:
raise NameError('No such action %s' % args.action)
if config:
cleanup(config, path=args.directory) | 0.000655 |
def parquet_to_df(filename, use_threads=1):
"""parquet_to_df: Reads a Parquet file into a Pandas DataFrame
Args:
filename (string): The full path to the filename for the Parquet file
ntreads (int): The number of threads to use (defaults to 1)
"""
try:
return pq.read_table(filename, use_threads=use_threads).to_pandas()
except pa.lib.ArrowIOError:
print('Could not read parquet file {:s}'.format(filename))
return None | 0.004082 |
def did_you_mean(message: str, user_input: str, choices: Sequence[str]) -> str:
""" Given a list of choices and an invalid user input, display the closest
items in the list that match the input.
"""
if not choices:
return message
else:
result = {
difflib.SequenceMatcher(a=user_input, b=choice).ratio(): choice
for choice in choices
}
message += "\nDid you mean: %s?" % result[max(result)]
return message | 0.002045 |
def select_each(conn, query: str, parameter_groups, name=None):
"""Run select query for each parameter set in single transaction."""
with conn:
with conn.cursor(name=name) as cursor:
for parameters in parameter_groups:
cursor.execute(query, parameters)
yield cursor.fetchone() | 0.002967 |
def branch_out(self, limb=None):
''' Set the individual section branches
This adds the various sections of the config file into the
tree environment for access later. Optically can specify a specific
branch. This does not yet load them into the os environment.
Parameters:
limb (str/list):
The name of the section of the config to add into the environ
or a list of strings
'''
# Filter on sections
if not limb:
limbs = self._cfg.sections()
else:
# we must have the general always + secton
limb = limb if isinstance(limb, list) else [limb]
limbs = ['general']
limbs.extend(limb)
# add all limbs into the tree environ
for leaf in limbs:
leaf = leaf if leaf in self._cfg.sections() else leaf.upper()
self.environ[leaf] = OrderedDict()
options = self._cfg.options(leaf)
for opt in options:
if opt in self.environ['default']:
continue
val = self._cfg.get(leaf, opt)
if val.find(self._file_replace) == 0:
val = val.replace(self._file_replace, self.sasbasedir)
self.environ[leaf][opt] = val | 0.001499 |
def inject():
"""Injects pout into the builtins module so it can be called from anywhere without
having to be explicitely imported, this is really just for convenience when
debugging
https://stackoverflow.com/questions/142545/python-how-to-make-a-cross-module-variable
"""
try:
from .compat import builtins
module = sys.modules[__name__]
setattr(builtins, __name__, module)
#builtins.pout = pout
except ImportError:
pass | 0.00611 |
def snapshot_peek_sigb64( fd, off, bytelen ):
"""
Read the last :bytelen bytes of
fd and interpret it as a base64-encoded
string
"""
fd.seek( off - bytelen, os.SEEK_SET )
sigb64 = fd.read(bytelen)
if len(sigb64) != bytelen:
return None
try:
base64.b64decode(sigb64)
except:
return None
return sigb64 | 0.01626 |
def get_key(raw=False):
""" Gets a single key from stdin
"""
while True:
try:
if kbhit():
char = getch()
ordinal = ord(char)
if ordinal in (0, 224):
extention = ord(getch())
scan_code = ordinal + extention * 256
result = scan_codes.get(scan_code)
break
else:
result = char.decode()
break
except KeyboardInterrupt:
return "ctrl-c"
return result if raw else keys_flipped.get(result, result) | 0.001592 |
def find_txt(xml_tree, path, default=''):
"""
Extracts the text value from an XML tree, using XPath.
In case of error, will return a default value.
:param xml_tree: the XML Tree object. Assumed is <type 'lxml.etree._Element'>.
:param path: XPath to be applied, in order to extract the desired data.
:param default: Value to be returned in case of error.
:return: a str value.
"""
value = ''
try:
xpath_applied = xml_tree.xpath(path) # will consider the first match only
if len(xpath_applied) and xpath_applied[0] is not None:
xpath_result = xpath_applied[0]
if isinstance(xpath_result, type(xml_tree)):
value = xpath_result.text.strip()
else:
value = xpath_result
except Exception: # in case of any exception, returns default
value = default
return py23_compat.text_type(value) | 0.003243 |
def update_pidfile(pidfile):
"""Update pidfile.
Notice:
We should call this function only after we have successfully acquired
a lock and never before. It exits main program if it fails to parse
and/or write pidfile.
Arguments:
pidfile (str): pidfile to update
"""
try:
with open(pidfile, mode='r') as _file:
pid = _file.read(1024).rstrip()
try:
pid = int(pid)
except ValueError:
print("cleaning stale pidfile with invalid data:'{}'".format(pid))
write_pid(pidfile)
else:
if running(pid):
# This is to catch migration issues from 0.7.x to 0.8.x
# version, where old process is still around as it failed to
# be stopped. Since newer version has a different locking
# mechanism, we can end up with both versions running.
# In order to avoid this situation we refuse to startup.
sys.exit("process {} is already running".format(pid))
else:
# pidfile exists with a PID for a process that is not running.
# Let's update PID.
print("updating stale processID({}) in pidfile".format(pid))
write_pid(pidfile)
except FileNotFoundError:
# Either it's 1st time we run or previous run was terminated
# successfully.
print("creating pidfile {f}".format(f=pidfile))
write_pid(pidfile)
except OSError as exc:
sys.exit("failed to update pidfile:{e}".format(e=exc)) | 0.00062 |
def _log_error(self, request, error):
'''Log exceptions during a fetch.'''
_logger.error(
_('Fetching ‘{url}’ encountered an error: {error}'),
url=request.url, error=error
) | 0.00905 |
def size(self, units="MiB"):
"""
Returns the physical volume size in the given units. Default units are MiB.
*Args:*
* units (str): Unit label ('MiB', 'GiB', etc...). Default is MiB.
"""
self.open()
size = lvm_pv_get_size(self.handle)
self.close()
return size_convert(size, units) | 0.010989 |
def list_opts():
"""List all conf modules opts.
Goes through all conf modules and yields their opts.
"""
for mod in load_conf_modules():
mod_opts = mod.list_opts()
if type(mod_opts) is list:
for single_mod_opts in mod_opts:
yield single_mod_opts[0], single_mod_opts[1]
else:
yield mod_opts[0], mod_opts[1] | 0.002584 |
def get_psf_pix(self, ra, dec):
"""
Determine the local psf (a,b,pa) at a given sky location.
The psf is in pixel coordinates.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
a, b, pa : float
The psf semi-major axis (pixels), semi-minor axis (pixels), and rotation angle (degrees).
If a psf is defined then it is the psf that is returned, otherwise the image
restoring beam is returned.
"""
psf_sky = self.get_psf_sky(ra, dec)
psf_pix = self.wcshelper.sky2pix_ellipse([ra, dec], psf_sky[0], psf_sky[1], psf_sky[2])[2:]
return psf_pix | 0.006916 |
def load_permissions_on_identity_loaded(sender, identity):
"""Add system roles "Needs" to users' identities.
Every user gets the **any_user** Need.
Authenticated users get in addition the **authenticated_user** Need.
"""
identity.provides.add(
any_user
)
# if the user is not anonymous
if current_user.is_authenticated:
# Add the need provided to authenticated users
identity.provides.add(
authenticated_user
) | 0.002053 |
def batch_snapshot(self, read_timestamp=None, exact_staleness=None):
"""Return an object which wraps a batch read / query.
:type read_timestamp: :class:`datetime.datetime`
:param read_timestamp: Execute all reads at the given timestamp.
:type exact_staleness: :class:`datetime.timedelta`
:param exact_staleness: Execute all reads at a timestamp that is
``exact_staleness`` old.
:rtype: :class:`~google.cloud.spanner_v1.database.BatchSnapshot`
:returns: new wrapper
"""
return BatchSnapshot(
self, read_timestamp=read_timestamp, exact_staleness=exact_staleness
) | 0.00436 |
def download_from_host(self, source, output_directory, filename):
"""Download a file from a given host.
This method renames the file to the given string.
:param source: Dictionary containing information about host.
:type source: dict
:param output_directory: Directory to place output in.
:type output_directory: str
:param filename: The filename to rename to.
:type filename: str
:returns: Dictionary with information about downloaded file.
:rtype: dict
"""
result = self._run_command(
["plowdown", source["url"], "-o",
output_directory, "--temp-rename"],
stderr=open("/dev/null", "w")
)
result['host_name'] = source['host_name']
if 'error' in result:
return result
temporary_filename = self.parse_output(
result['host_name'], result['output'])
result['filename'] = os.path.join(output_directory, filename)
result.pop('output')
os.rename(temporary_filename, result['filename'])
return result | 0.001781 |
def init_widget(self):
""" Initialize the underlying widget.
"""
# Create and init the client
c = self.client = BridgedWebViewClient()
c.setWebView(self.widget, c.getId())
c.onLoadResource.connect(self.on_load_resource)
c.onPageFinished.connect(self.on_page_finished)
c.onPageStarted.connect(self.on_page_started)
c.onReceivedError.connect(self.on_received_error)
c.onScaleChanged.connect(self.on_scale_changed)
c.onProgressChanged.connect(self.on_progress_changed)
c.onReceivedTitle.connect(self.on_page_title_changed)
super(AndroidWebView, self).init_widget() | 0.002999 |
def p_let_arr_substr_in_args(p):
""" statement : LET ARRAY_ID LP arguments TO RP EQ expr
| ARRAY_ID LP arguments TO RP EQ expr
"""
i = 2 if p[1].upper() == 'LET' else 1
id_ = p[i]
arg_list = p[i + 2]
substr = (arg_list.children.pop().value,
make_number(gl.MAX_STRSLICE_IDX, lineno=p.lineno(i + 3)))
expr_ = p[i + 6]
p[0] = make_array_substr_assign(p.lineno(i), id_, arg_list, substr, expr_) | 0.002198 |
def _safe_match_argument(expected_type, argument_value):
"""Return a MATCH (SQL) string representing the given argument value."""
if GraphQLString.is_same_type(expected_type):
return _safe_match_string(argument_value)
elif GraphQLID.is_same_type(expected_type):
# IDs can be strings or numbers, but the GraphQL library coerces them to strings.
# We will follow suit and treat them as strings.
if not isinstance(argument_value, six.string_types):
if isinstance(argument_value, bytes): # should only happen in py3
argument_value = argument_value.decode('utf-8')
else:
argument_value = six.text_type(argument_value)
return _safe_match_string(argument_value)
elif GraphQLFloat.is_same_type(expected_type):
return represent_float_as_str(argument_value)
elif GraphQLInt.is_same_type(expected_type):
# Special case: in Python, isinstance(True, int) returns True.
# Safeguard against this with an explicit check against bool type.
if isinstance(argument_value, bool):
raise GraphQLInvalidArgumentError(u'Attempting to represent a non-int as an int: '
u'{}'.format(argument_value))
return type_check_and_str(int, argument_value)
elif GraphQLBoolean.is_same_type(expected_type):
return type_check_and_str(bool, argument_value)
elif GraphQLDecimal.is_same_type(expected_type):
return _safe_match_decimal(argument_value)
elif GraphQLDate.is_same_type(expected_type):
return _safe_match_date_and_datetime(expected_type, (datetime.date,), argument_value)
elif GraphQLDateTime.is_same_type(expected_type):
return _safe_match_date_and_datetime(expected_type,
(datetime.datetime, arrow.Arrow), argument_value)
elif isinstance(expected_type, GraphQLList):
return _safe_match_list(expected_type.of_type, argument_value)
else:
raise AssertionError(u'Could not safely represent the requested GraphQL type: '
u'{} {}'.format(expected_type, argument_value)) | 0.002733 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.